From 750ca4ff3ccfb588e7aa970b9c1c2ab7fa1bcb98 Mon Sep 17 00:00:00 2001 From: qube Date: Thu, 1 Dec 2016 14:46:05 +0100 Subject: [PATCH] Updating remaining vendor --- api/rpc/logs/logs.go | 2 +- cmd/amp-log-worker/main.go | 5 +- data/elasticsearch/elasticsearch.go | 13 +- data/kafka/kafka.go | 28 - glide.lock | 64 +- glide.yaml | 48 +- .../Shopify/sarama/.github/CONTRIBUTING.md | 31 - .../Shopify/sarama/.github/ISSUE_TEMPLATE.md | 19 - vendor/github.com/Shopify/sarama/.gitignore | 24 - vendor/github.com/Shopify/sarama/.travis.yml | 33 - vendor/github.com/Shopify/sarama/CHANGELOG.md | 323 -- vendor/github.com/Shopify/sarama/MIT-LICENSE | 20 - vendor/github.com/Shopify/sarama/Makefile | 21 - vendor/github.com/Shopify/sarama/README.md | 36 - vendor/github.com/Shopify/sarama/Vagrantfile | 19 - .../Shopify/sarama/api_versions_request.go | 24 - .../sarama/api_versions_request_test.go | 14 - .../Shopify/sarama/api_versions_response.go | 86 - .../sarama/api_versions_response_test.go | 32 - .../Shopify/sarama/async_producer.go | 903 ----- .../Shopify/sarama/async_producer_test.go | 801 ----- vendor/github.com/Shopify/sarama/broker.go | 526 --- .../github.com/Shopify/sarama/broker_test.go | 253 -- vendor/github.com/Shopify/sarama/client.go | 733 ---- .../github.com/Shopify/sarama/client_test.go | 608 ---- vendor/github.com/Shopify/sarama/config.go | 399 --- .../github.com/Shopify/sarama/config_test.go | 26 - vendor/github.com/Shopify/sarama/consumer.go | 715 ---- .../Shopify/sarama/consumer_group_members.go | 94 - .../sarama/consumer_group_members_test.go | 73 - .../sarama/consumer_metadata_request.go | 26 - .../sarama/consumer_metadata_request_test.go | 19 - .../sarama/consumer_metadata_response.go | 85 - .../sarama/consumer_metadata_response_test.go | 35 - .../Shopify/sarama/consumer_test.go | 854 ----- .../github.com/Shopify/sarama/crc32_field.go | 36 - .../Shopify/sarama/describe_groups_request.go | 30 - .../sarama/describe_groups_request_test.go | 34 - .../sarama/describe_groups_response.go | 174 - .../sarama/describe_groups_response_test.go | 91 - vendor/github.com/Shopify/sarama/dev.yml | 13 - .../Shopify/sarama/encoder_decoder.go | 84 - vendor/github.com/Shopify/sarama/errors.go | 194 -- .../Shopify/sarama/examples/README.md | 9 - .../sarama/examples/http_server/.gitignore | 2 - .../sarama/examples/http_server/README.md | 7 - .../examples/http_server/http_server.go | 246 -- .../examples/http_server/http_server_test.go | 109 - .../Shopify/sarama/fetch_request.go | 136 - .../Shopify/sarama/fetch_request_test.go | 34 - .../Shopify/sarama/fetch_response.go | 210 -- .../Shopify/sarama/fetch_response_test.go | 84 - .../Shopify/sarama/functional_client_test.go | 90 - .../sarama/functional_consumer_test.go | 61 - .../sarama/functional_offset_manager_test.go | 47 - .../sarama/functional_producer_test.go | 203 -- .../Shopify/sarama/functional_test.go | 148 - .../Shopify/sarama/heartbeat_request.go | 47 - .../Shopify/sarama/heartbeat_request_test.go | 21 - .../Shopify/sarama/heartbeat_response.go | 32 - .../Shopify/sarama/heartbeat_response_test.go | 18 - .../Shopify/sarama/join_group_request.go | 108 - .../Shopify/sarama/join_group_request_test.go | 41 - .../Shopify/sarama/join_group_response.go | 114 - .../sarama/join_group_response_test.go | 98 - .../Shopify/sarama/leave_group_request.go | 40 - .../sarama/leave_group_request_test.go | 19 - .../Shopify/sarama/leave_group_response.go | 32 - .../sarama/leave_group_response_test.go | 24 - .../github.com/Shopify/sarama/length_field.go | 29 - .../Shopify/sarama/list_groups_request.go | 24 - .../sarama/list_groups_request_test.go | 7 - .../Shopify/sarama/list_groups_response.go | 68 - .../sarama/list_groups_response_test.go | 58 - vendor/github.com/Shopify/sarama/message.go | 163 - .../github.com/Shopify/sarama/message_set.go | 89 - .../github.com/Shopify/sarama/message_test.go | 113 - .../Shopify/sarama/metadata_request.go | 52 - .../Shopify/sarama/metadata_request_test.go | 29 - .../Shopify/sarama/metadata_response.go | 239 -- .../Shopify/sarama/metadata_response_test.go | 139 - .../github.com/Shopify/sarama/mockbroker.go | 300 -- .../Shopify/sarama/mockresponses.go | 455 --- .../github.com/Shopify/sarama/mocks/README.md | 13 - .../Shopify/sarama/mocks/async_producer.go | 174 - .../sarama/mocks/async_producer_test.go | 132 - .../Shopify/sarama/mocks/consumer.go | 299 -- .../Shopify/sarama/mocks/consumer_test.go | 249 -- .../github.com/Shopify/sarama/mocks/mocks.go | 48 - .../Shopify/sarama/mocks/sync_producer.go | 148 - .../sarama/mocks/sync_producer_test.go | 124 - .../Shopify/sarama/offset_commit_request.go | 190 - .../sarama/offset_commit_request_test.go | 90 - .../Shopify/sarama/offset_commit_response.go | 85 - .../sarama/offset_commit_response_test.go | 24 - .../Shopify/sarama/offset_fetch_request.go | 81 - .../sarama/offset_fetch_request_test.go | 31 - .../Shopify/sarama/offset_fetch_response.go | 143 - .../sarama/offset_fetch_response_test.go | 22 - .../Shopify/sarama/offset_manager.go | 542 --- .../Shopify/sarama/offset_manager_test.go | 369 -- .../Shopify/sarama/offset_request.go | 117 - .../Shopify/sarama/offset_request_test.go | 26 - .../Shopify/sarama/offset_response.go | 142 - .../Shopify/sarama/offset_response_test.go | 62 - .../Shopify/sarama/packet_decoder.go | 45 - .../Shopify/sarama/packet_encoder.go | 42 - .../github.com/Shopify/sarama/partitioner.go | 123 - .../Shopify/sarama/partitioner_test.go | 215 -- .../github.com/Shopify/sarama/prep_encoder.go | 110 - .../Shopify/sarama/produce_request.go | 157 - .../Shopify/sarama/produce_request_test.go | 47 - .../Shopify/sarama/produce_response.go | 158 - .../Shopify/sarama/produce_response_test.go | 67 - .../github.com/Shopify/sarama/produce_set.go | 166 - .../Shopify/sarama/produce_set_test.go | 143 - .../github.com/Shopify/sarama/real_decoder.go | 259 -- .../github.com/Shopify/sarama/real_encoder.go | 115 - vendor/github.com/Shopify/sarama/request.go | 117 - .../github.com/Shopify/sarama/request_test.go | 87 - .../Shopify/sarama/response_header.go | 21 - .../Shopify/sarama/response_header_test.go | 21 - vendor/github.com/Shopify/sarama/sarama.go | 58 - .../Shopify/sarama/sasl_handshake_request.go | 33 - .../sarama/sasl_handshake_request_test.go | 17 - .../Shopify/sarama/sasl_handshake_response.go | 38 - .../sarama/sasl_handshake_response_test.go | 24 - .../Shopify/sarama/sync_group_request.go | 100 - .../Shopify/sarama/sync_group_request_test.go | 38 - .../Shopify/sarama/sync_group_response.go | 40 - .../sarama/sync_group_response_test.go | 40 - .../Shopify/sarama/sync_producer.go | 140 - .../Shopify/sarama/sync_producer_test.go | 196 -- .../github.com/Shopify/sarama/tools/README.md | 10 - .../tools/kafka-console-consumer/.gitignore | 2 - .../tools/kafka-console-consumer/README.md | 29 - .../kafka-console-consumer.go | 145 - .../.gitignore | 2 - .../kafka-console-partitionconsumer/README.md | 28 - .../kafka-console-partitionconsumer.go | 102 - .../tools/kafka-console-producer/.gitignore | 2 - .../tools/kafka-console-producer/README.md | 34 - .../kafka-console-producer.go | 118 - vendor/github.com/Shopify/sarama/utils.go | 150 - .../github.com/Shopify/sarama/utils_test.go | 21 - .../Shopify/sarama/vagrant/boot_cluster.sh | 22 - .../Shopify/sarama/vagrant/create_topics.sh | 8 - .../Shopify/sarama/vagrant/install_cluster.sh | 49 - .../Shopify/sarama/vagrant/kafka.conf | 5 - .../Shopify/sarama/vagrant/provision.sh | 15 - .../Shopify/sarama/vagrant/run_toxiproxy.sh | 22 - .../Shopify/sarama/vagrant/server.properties | 127 - .../Shopify/sarama/vagrant/setup_services.sh | 29 - .../Shopify/sarama/vagrant/toxiproxy.conf | 6 - .../Shopify/sarama/vagrant/zookeeper.conf | 4 - .../sarama/vagrant/zookeeper.properties | 36 - .../distribution/docs/spec/manifest-v2-1.md | 2 +- .../distribution/docs/spec/manifest-v2-2.md | 2 +- .../manifest/schema1/config_builder.go | 9 +- vendor/github.com/docker/go-units/duration.go | 4 +- .../docker/go-units/duration_test.go | 20 +- .../eapache/go-resiliency/.gitignore | 24 - .../eapache/go-resiliency/.travis.yml | 7 - .../eapache/go-resiliency/README.md | 21 - .../eapache/go-resiliency/batcher/README.md | 31 - .../eapache/go-resiliency/batcher/batcher.go | 108 - .../go-resiliency/batcher/batcher_test.go | 123 - .../eapache/go-resiliency/breaker/README.md | 34 - .../eapache/go-resiliency/breaker/breaker.go | 161 - .../go-resiliency/breaker/breaker_test.go | 196 -- .../eapache/go-resiliency/deadline/README.md | 27 - .../go-resiliency/deadline/deadline.go | 45 - .../go-resiliency/deadline/deadline_test.go | 65 - .../eapache/go-resiliency/retrier/README.md | 26 - .../eapache/go-resiliency/retrier/backoffs.go | 24 - .../go-resiliency/retrier/backoffs_test.go | 55 - .../go-resiliency/retrier/classifier.go | 66 - .../go-resiliency/retrier/classifier_test.go | 66 - .../eapache/go-resiliency/retrier/retrier.go | 69 - .../go-resiliency/retrier/retrier_test.go | 129 - .../eapache/go-resiliency/semaphore/README.md | 22 - .../go-resiliency/semaphore/semaphore.go | 45 - .../go-resiliency/semaphore/semaphore_test.go | 61 - .../eapache/go-xerial-snappy/.gitignore | 24 - .../eapache/go-xerial-snappy/.travis.yml | 7 - .../eapache/go-xerial-snappy/LICENSE | 21 - .../eapache/go-xerial-snappy/README.md | 13 - .../eapache/go-xerial-snappy/snappy.go | 43 - .../eapache/go-xerial-snappy/snappy_test.go | 49 - vendor/github.com/eapache/queue/.gitignore | 23 - vendor/github.com/eapache/queue/.travis.yml | 7 - vendor/github.com/eapache/queue/LICENSE | 21 - vendor/github.com/eapache/queue/README.md | 16 - vendor/github.com/eapache/queue/queue.go | 102 - vendor/github.com/eapache/queue/queue_test.go | 178 - vendor/github.com/golang/protobuf/.gitignore | 1 - vendor/github.com/golang/protobuf/Makefile | 1 - vendor/github.com/golang/protobuf/README.md | 44 +- .../golang/protobuf/_conformance/Makefile | 33 - .../protobuf/_conformance/conformance.go | 161 - .../conformance_proto/conformance.pb.go | 1472 -------- .../conformance_proto/conformance.proto | 285 -- .../golang/protobuf/jsonpb/jsonpb.go | 68 +- .../golang/protobuf/jsonpb/jsonpb_test.go | 109 +- .../jsonpb_test_proto/more_test_objects.pb.go | 97 +- .../jsonpb_test_proto/more_test_objects.proto | 4 - .../jsonpb_test_proto/test_objects.pb.go | 164 +- .../jsonpb_test_proto/test_objects.proto | 1 - .../golang/protobuf/proto/all_test.go | 58 +- .../golang/protobuf/proto/any_test.go | 36 +- .../golang/protobuf/proto/clone_test.go | 33 - .../golang/protobuf/proto/decode.go | 117 +- .../golang/protobuf/proto/decode_test.go | 256 -- .../golang/protobuf/proto/encode.go | 14 +- .../golang/protobuf/proto/encode_test.go | 83 - .../github.com/golang/protobuf/proto/equal.go | 8 +- .../golang/protobuf/proto/equal_test.go | 12 - .../golang/protobuf/proto/extensions.go | 31 - .../golang/protobuf/proto/extensions_test.go | 55 +- .../github.com/golang/protobuf/proto/lib.go | 2 +- .../golang/protobuf/proto/properties.go | 10 +- .../protobuf/proto/proto3_proto/proto3.pb.go | 101 +- .../protobuf/proto/proto3_proto/proto3.proto | 5 - .../golang/protobuf/proto/testdata/test.pb.go | 679 ++-- .../golang/protobuf/proto/testdata/test.proto | 8 - .../github.com/golang/protobuf/proto/text.go | 2 +- .../golang/protobuf/proto/text_parser.go | 23 +- .../golang/protobuf/proto/text_parser_test.go | 100 - .../protoc-gen-go/descriptor/descriptor.pb.go | 347 +- .../protoc-gen-go/generator/generator.go | 34 +- .../protobuf/protoc-gen-go/grpc/grpc.go | 3 +- .../protoc-gen-go/plugin/plugin.pb.go | 42 +- .../protobuf/protoc-gen-go/testdata/Makefile | 3 +- .../protoc-gen-go/testdata/my_test/test.pb.go | 48 +- .../testdata/my_test/test.pb.go.golden | 48 +- .../protoc-gen-go/testdata/proto3.proto | 1 - .../golang/protobuf/ptypes/any/any.pb.go | 30 +- .../golang/protobuf/ptypes/any/any.proto | 16 +- .../protobuf/ptypes/duration/duration.pb.go | 14 +- .../golang/protobuf/ptypes/empty/empty.pb.go | 16 +- .../protobuf/ptypes/struct/struct.pb.go | 54 +- .../protobuf/ptypes/timestamp/timestamp.pb.go | 15 +- .../protobuf/ptypes/wrappers/wrappers.pb.go | 22 +- vendor/github.com/golang/snappy/.gitignore | 16 - vendor/github.com/golang/snappy/AUTHORS | 15 - vendor/github.com/golang/snappy/CONTRIBUTORS | 37 - vendor/github.com/golang/snappy/LICENSE | 27 - vendor/github.com/golang/snappy/README | 107 - .../golang/snappy/cmd/snappytool/main.cpp | 77 - vendor/github.com/golang/snappy/decode.go | 237 -- .../github.com/golang/snappy/decode_amd64.go | 14 - .../github.com/golang/snappy/decode_amd64.s | 490 --- .../github.com/golang/snappy/decode_other.go | 101 - vendor/github.com/golang/snappy/encode.go | 285 -- .../github.com/golang/snappy/encode_amd64.go | 29 - .../github.com/golang/snappy/encode_amd64.s | 730 ---- .../github.com/golang/snappy/encode_other.go | 238 -- .../github.com/golang/snappy/golden_test.go | 1965 ----------- vendor/github.com/golang/snappy/snappy.go | 87 - .../github.com/golang/snappy/snappy_test.go | 1353 -------- .../snappy/testdata/Mark.Twain-Tom.Sawyer.txt | 396 --- .../Mark.Twain-Tom.Sawyer.txt.rawsnappy | Bin 9871 -> 0 bytes vendor/github.com/google/go-github/AUTHORS | 3 + .../go-github/github/activity_events.go | 6 +- .../go-github/github/activity_events_test.go | 4 +- .../google/go-github/github/admin.go | 100 + .../google/go-github/github/admin_test.go | 80 + .../google/go-github/github/event_types.go | 20 + .../google/go-github/github/github.go | 2 + .../google/go-github/github/messages.go | 5 +- .../google/go-github/github/messages_test.go | 4 + .../google/go-github/github/pulls_reviews.go | 19 + .../google/go-github/github/repos.go | 110 +- .../google/go-github/github/repos_test.go | 115 +- .../github.com/hashicorp/hcl/decoder_test.go | 9 +- .../hashicorp/hcl/hcl/parser/parser.go | 5 +- .../printer/testdata/multiline_string.golden | 4 +- .../printer/testdata/multiline_string.input | 4 +- .../hashicorp/hcl/hcl/scanner/scanner.go | 2 +- .../hashicorp/hcl/hcl/scanner/scanner_test.go | 33 +- .../hashicorp/hcl/hcl/strconv/quote.go | 7 + .../hashicorp/hcl/hcl/strconv/quote_test.go | 4 + .../multiline_literal_with_hil.hcl | 2 + .../hashicorp/hcl/testhelper/unix2dos.go | 2 +- vendor/github.com/klauspost/crc32/.gitignore | 24 - vendor/github.com/klauspost/crc32/.travis.yml | 13 - vendor/github.com/klauspost/crc32/LICENSE | 28 - vendor/github.com/klauspost/crc32/README.md | 87 - vendor/github.com/klauspost/crc32/crc32.go | 207 -- .../github.com/klauspost/crc32/crc32_amd64.go | 230 -- .../github.com/klauspost/crc32/crc32_amd64.s | 319 -- .../klauspost/crc32/crc32_amd64p32.go | 43 - .../klauspost/crc32/crc32_amd64p32.s | 67 - .../klauspost/crc32/crc32_generic.go | 89 - .../klauspost/crc32/crc32_otherarch.go | 15 - .../github.com/klauspost/crc32/crc32_s390x.go | 91 - .../github.com/klauspost/crc32/crc32_s390x.s | 249 -- .../github.com/klauspost/crc32/crc32_test.go | 284 -- .../klauspost/crc32/example_test.go | 28 - .../magiconair/properties/.travis.yml | 2 +- .../magiconair/properties/CHANGELOG.md | 4 + .../magiconair/properties/properties.go | 6 +- vendor/github.com/nats-io/go-nats/.travis.yml | 19 - vendor/github.com/nats-io/go-nats/README.md | 322 -- vendor/github.com/nats-io/go-nats/TODO.md | 26 - .../github.com/nats-io/go-nats/bench/bench.go | 354 -- .../nats-io/go-nats/bench/benchlib_test.go | 226 -- vendor/github.com/nats-io/go-nats/enc.go | 249 -- vendor/github.com/nats-io/go-nats/enc_test.go | 257 -- .../go-nats/encoders/builtin/default_enc.go | 106 - .../go-nats/encoders/builtin/enc_test.go | 449 --- .../go-nats/encoders/builtin/gob_enc.go | 34 - .../go-nats/encoders/builtin/gob_test.go | 129 - .../go-nats/encoders/builtin/json_enc.go | 45 - .../go-nats/encoders/builtin/json_test.go | 210 -- .../go-nats/encoders/protobuf/protobuf_enc.go | 66 - .../encoders/protobuf/protobuf_test.go | 129 - .../encoders/protobuf/testdata/pbtest.pb.go | 40 - .../encoders/protobuf/testdata/pbtest.proto | 11 - .../nats-io/go-nats/example_test.go | 266 -- .../nats-io/go-nats/examples/nats-bench.go | 146 - .../nats-io/go-nats/examples/nats-pub.go | 46 - .../nats-io/go-nats/examples/nats-qsub.go | 60 - .../nats-io/go-nats/examples/nats-req.go | 48 - .../nats-io/go-nats/examples/nats-rply.go | 60 - .../nats-io/go-nats/examples/nats-sub.go | 59 - vendor/github.com/nats-io/go-nats/nats.go | 2630 -------------- .../github.com/nats-io/go-nats/nats_test.go | 1100 ------ vendor/github.com/nats-io/go-nats/netchan.go | 100 - vendor/github.com/nats-io/go-nats/parser.go | 470 --- .../github.com/nats-io/go-nats/scripts/cov.sh | 19 - .../nats-io/go-nats/test/auth_test.go | 180 - .../nats-io/go-nats/test/basic_test.go | 797 ----- .../nats-io/go-nats/test/bench_test.go | 129 - .../nats-io/go-nats/test/cluster_test.go | 605 ---- .../nats-io/go-nats/test/configs/certs/ca.pem | 38 - .../test/configs/certs/client-cert.pem | 30 - .../go-nats/test/configs/certs/client-key.pem | 51 - .../go-nats/test/configs/certs/key.pem | 51 - .../go-nats/test/configs/certs/server.pem | 31 - .../nats-io/go-nats/test/configs/tls.conf | 17 - .../go-nats/test/configs/tlsverify.conf | 17 - .../nats-io/go-nats/test/conn_test.go | 1297 ------- .../nats-io/go-nats/test/netchan_test.go | 355 -- .../nats-io/go-nats/test/reconnect_test.go | 623 ---- .../nats-io/go-nats/test/sub_test.go | 1473 -------- .../github.com/nats-io/go-nats/test/test.go | 93 - vendor/github.com/nats-io/go-nats/util/tls.go | 37 - .../nats-io/go-nats/util/tls_pre17.go | 35 - vendor/github.com/nats-io/nats/.travis.yml | 5 +- vendor/github.com/nats-io/nats/README.md | 4 +- vendor/github.com/nats-io/nats/bench/bench.go | 2 +- .../nats-io/nats/bench/benchlib_test.go | 3 +- vendor/github.com/nats-io/nats/enc.go | 2 +- vendor/github.com/nats-io/nats/enc_test.go | 6 +- .../nats-io/nats/encoders/builtin/enc_test.go | 6 +- .../nats-io/nats/encoders/builtin/gob_test.go | 4 +- .../nats/encoders/builtin/json_test.go | 6 +- .../nats/encoders/protobuf/protobuf_enc.go | 2 +- .../nats/encoders/protobuf/protobuf_test.go | 8 +- .../github.com/nats-io/nats/example_test.go | 2 +- .../nats-io/nats/examples/nats-bench.go | 4 +- .../nats-io/nats/examples/nats-pub.go | 2 +- .../nats-io/nats/examples/nats-qsub.go | 2 +- .../nats-io/nats/examples/nats-req.go | 2 +- .../nats-io/nats/examples/nats-rply.go | 2 +- .../nats-io/nats/examples/nats-sub.go | 2 +- vendor/github.com/nats-io/nats/nats.go | 4 +- vendor/github.com/nats-io/nats/nats_test.go | 8 +- vendor/github.com/nats-io/nats/scripts/cov.sh | 2 +- .../github.com/nats-io/nats/test/auth_test.go | 2 +- .../nats-io/nats/test/basic_test.go | 2 +- .../nats-io/nats/test/bench_test.go | 2 +- .../nats-io/nats/test/cluster_test.go | 8 +- .../github.com/nats-io/nats/test/conn_test.go | 2 +- .../nats-io/nats/test/netchan_test.go | 2 +- .../nats-io/nats/test/reconnect_test.go | 2 +- .../github.com/nats-io/nats/test/sub_test.go | 2 +- vendor/github.com/nats-io/nats/test/test.go | 2 +- .../runc/libcontainer/cgroups/fs/cpuset.go | 26 +- .../runc/libcontainer/label/label_selinux.go | 2 +- .../libcontainer/stacktrace/capture_test.go | 2 +- .../prometheus/common/route/route.go | 7 +- .../prometheus/common/route/route_test.go | 2 +- .../x/crypto/acme/autocert/autocert.go | 1 + .../x/crypto/acme/autocert/autocert_test.go | 20 +- .../golang.org/x/oauth2/google/appengine.go | 3 + .../x/oauth2/google/appengine_hook.go | 1 + .../x/oauth2/google/appenginevm_hook.go | 1 + vendor/golang.org/x/oauth2/google/default.go | 74 +- vendor/golang.org/x/oauth2/google/google.go | 1 + vendor/golang.org/x/sys/plan9/const_plan9.go | 11 - .../golang.org/x/sys/plan9/mksysnum_plan9.sh | 2 + vendor/golang.org/x/sys/unix/.gitignore | 1 - ...sm_linux_mips64x.s => asm_dragonfly_386.s} | 17 +- .../golang.org/x/sys/unix/asm_linux_s390x.s | 28 - .../golang.org/x/sys/unix/bluetooth_linux.go | 35 - .../x/sys/unix/gccgo_linux_sparc64.go | 20 - vendor/golang.org/x/sys/unix/mkall.sh | 30 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 42 +- vendor/golang.org/x/sys/unix/mkpost.go | 62 - .../x/sys/unix/mksyscall_solaris.pl | 4 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 8 +- vendor/golang.org/x/sys/unix/syscall.go | 2 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 79 +- .../golang.org/x/sys/unix/syscall_bsd_test.go | 41 +- .../golang.org/x/sys/unix/syscall_darwin.go | 2 - .../x/sys/unix/syscall_darwin_386.go | 2 + .../x/sys/unix/syscall_darwin_amd64.go | 4 +- .../x/sys/unix/syscall_darwin_arm.go | 2 + .../x/sys/unix/syscall_darwin_arm64.go | 2 + .../x/sys/unix/syscall_dragonfly.go | 1 - .../x/sys/unix/syscall_dragonfly_386.go | 63 + .../x/sys/unix/syscall_dragonfly_amd64.go | 2 + .../golang.org/x/sys/unix/syscall_freebsd.go | 1 - .../x/sys/unix/syscall_freebsd_386.go | 2 + .../x/sys/unix/syscall_freebsd_amd64.go | 2 + .../x/sys/unix/syscall_freebsd_arm.go | 2 + .../x/sys/unix/syscall_freebsd_test.go | 24 - vendor/golang.org/x/sys/unix/syscall_linux.go | 112 +- .../x/sys/unix/syscall_linux_386.go | 15 +- .../x/sys/unix/syscall_linux_amd64.go | 15 +- .../x/sys/unix/syscall_linux_arm.go | 34 +- .../x/sys/unix/syscall_linux_arm64.go | 50 +- .../x/sys/unix/syscall_linux_mips64x.go | 208 -- .../x/sys/unix/syscall_linux_ppc64x.go | 43 +- .../x/sys/unix/syscall_linux_s390x.go | 329 -- .../x/sys/unix/syscall_linux_sparc64.go | 169 - .../x/sys/unix/syscall_linux_test.go | 186 - .../x/sys/unix/syscall_netbsd_386.go | 2 + .../x/sys/unix/syscall_netbsd_amd64.go | 2 + .../x/sys/unix/syscall_netbsd_arm.go | 2 + .../golang.org/x/sys/unix/syscall_openbsd.go | 1 - .../x/sys/unix/syscall_openbsd_386.go | 2 + .../x/sys/unix/syscall_openbsd_amd64.go | 2 + .../golang.org/x/sys/unix/syscall_solaris.go | 263 +- .../x/sys/unix/syscall_solaris_amd64.go | 4 + .../x/sys/unix/syscall_unix_test.go | 35 - vendor/golang.org/x/sys/unix/types_darwin.go | 7 - vendor/golang.org/x/sys/unix/types_linux.go | 68 +- vendor/golang.org/x/sys/unix/types_solaris.go | 40 - .../x/sys/unix/zerrors_dragonfly_386.go | 1530 +++++++++ .../x/sys/unix/zerrors_freebsd_386.go | 14 - .../x/sys/unix/zerrors_freebsd_amd64.go | 14 - .../x/sys/unix/zerrors_linux_386.go | 77 - .../x/sys/unix/zerrors_linux_amd64.go | 77 - .../x/sys/unix/zerrors_linux_arm.go | 185 - .../x/sys/unix/zerrors_linux_arm64.go | 73 - .../x/sys/unix/zerrors_linux_mips64.go | 1917 ----------- .../x/sys/unix/zerrors_linux_mips64le.go | 1917 ----------- .../x/sys/unix/zerrors_linux_ppc64.go | 67 - .../x/sys/unix/zerrors_linux_ppc64le.go | 67 - .../x/sys/unix/zerrors_linux_s390x.go | 2046 ----------- .../x/sys/unix/zerrors_linux_sparc64.go | 2096 ----------- .../x/sys/unix/zerrors_solaris_amd64.go | 26 +- .../x/sys/unix/zsyscall_darwin_386.go | 1 - .../x/sys/unix/zsyscall_darwin_amd64.go | 17 - .../x/sys/unix/zsyscall_darwin_arm.go | 1 - .../x/sys/unix/zsyscall_darwin_arm64.go | 1 - ...nux_s390x.go => zsyscall_dragonfly_386.go} | 1146 +++---- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1 - .../x/sys/unix/zsyscall_freebsd_386.go | 1 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 1 - .../x/sys/unix/zsyscall_freebsd_arm.go | 1 - .../x/sys/unix/zsyscall_linux_386.go | 151 +- .../x/sys/unix/zsyscall_linux_amd64.go | 151 +- .../x/sys/unix/zsyscall_linux_arm.go | 132 +- .../x/sys/unix/zsyscall_linux_arm64.go | 125 +- .../x/sys/unix/zsyscall_linux_mips64.go | 1814 ---------- .../x/sys/unix/zsyscall_linux_mips64le.go | 1814 ---------- .../x/sys/unix/zsyscall_linux_ppc64.go | 192 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 192 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 1845 ---------- .../x/sys/unix/zsyscall_netbsd_386.go | 1 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 1 - .../x/sys/unix/zsyscall_netbsd_arm.go | 1 - .../x/sys/unix/zsyscall_openbsd_386.go | 1 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 1 - .../x/sys/unix/zsyscall_solaris_amd64.go | 616 +--- .../x/sys/unix/zsysnum_darwin_arm64.go | 2 +- .../x/sys/unix/zsysnum_dragonfly_386.go | 304 ++ .../x/sys/unix/zsysnum_linux_mips64.go | 327 -- .../x/sys/unix/zsysnum_linux_mips64le.go | 327 -- .../x/sys/unix/zsysnum_linux_s390x.go | 328 -- .../x/sys/unix/zsysnum_linux_sparc64.go | 348 -- .../x/sys/unix/ztypes_darwin_amd64.go | 5 - .../x/sys/unix/ztypes_dragonfly_386.go | 437 +++ .../x/sys/unix/ztypes_freebsd_386.go | 12 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 12 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 67 +- .../x/sys/unix/ztypes_linux_amd64.go | 67 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 167 +- .../x/sys/unix/ztypes_linux_arm64.go | 57 +- .../x/sys/unix/ztypes_linux_mips64.go | 635 ---- .../x/sys/unix/ztypes_linux_mips64le.go | 635 ---- .../x/sys/unix/ztypes_linux_ppc64.go | 63 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 63 +- .../x/sys/unix/ztypes_linux_s390x.go | 657 ---- .../x/sys/unix/ztypes_linux_sparc64.go | 648 ---- .../x/sys/unix/ztypes_solaris_amd64.go | 62 +- vendor/golang.org/x/sys/windows/asm.s | 8 + .../golang.org/x/sys/windows/dll_windows.go | 135 +- vendor/golang.org/x/sys/windows/env_unset.go | 1 - vendor/golang.org/x/sys/windows/mksyscall.go | 7 - .../x/sys/windows/registry/mksyscall.go | 7 - .../x/sys/windows/registry/registry_test.go | 14 +- .../x/sys/windows/registry/syscall.go | 2 + .../x/sys/windows/registry/value.go | 10 +- .../sys/windows/registry/zsyscall_windows.go | 11 +- .../golang.org/x/sys/windows/svc/mgr/mgr.go | 5 +- vendor/golang.org/x/sys/windows/syscall.go | 6 + .../x/sys/windows/syscall_windows.go | 9 +- .../x/sys/windows/zsyscall_windows.go | 94 +- .../x/sys/windows/ztypes_windows.go | 105 - vendor/golang.org/x/text/internal/gen/code.go | 22 +- .../x/text/secure/precis/options.go | 3 + .../x/text/secure/precis/profile.go | 42 +- .../x/text/unicode/runenames/tables.go | 346 +- vendor/gopkg.in/olivere/elastic.v3/.gitignore | 1 - .../gopkg.in/olivere/elastic.v3/.travis.yml | 32 +- .../olivere/elastic.v3/CHANGELOG-5.0.md | 195 ++ vendor/gopkg.in/olivere/elastic.v3/README.md | 92 +- .../elastic.v3/acknowledged_response.go | 11 + .../olivere/elastic.v3/backoff/backoff.go | 2 +- .../elastic.v3/backoff/backoff_test.go | 2 +- .../olivere/elastic.v3/backoff/retry.go | 2 +- .../olivere/elastic.v3/backoff/retry_test.go | 2 +- vendor/gopkg.in/olivere/elastic.v3/bulk.go | 77 +- .../olivere/elastic.v3/bulk_delete_request.go | 17 +- .../olivere/elastic.v3/bulk_index_request.go | 93 +- .../elastic.v3/bulk_index_request_test.go | 29 +- .../olivere/elastic.v3/bulk_processor.go | 8 +- .../olivere/elastic.v3/bulk_processor_test.go | 20 +- .../olivere/elastic.v3/bulk_request.go | 4 +- .../gopkg.in/olivere/elastic.v3/bulk_test.go | 31 +- .../olivere/elastic.v3/bulk_update_request.go | 41 +- .../elastic.v3/bulk_update_request_test.go | 2 +- .../olivere/elastic.v3/clear_scroll.go | 9 +- .../olivere/elastic.v3/clear_scroll_test.go | 28 +- vendor/gopkg.in/olivere/elastic.v3/client.go | 197 +- .../olivere/elastic.v3/client_test.go | 60 +- .../elastic.v3/cluster-test/cluster-test.go | 26 +- .../olivere/elastic.v3/cluster_health.go | 11 +- .../olivere/elastic.v3/cluster_health_test.go | 12 +- .../olivere/elastic.v3/cluster_state.go | 13 +- .../olivere/elastic.v3/cluster_state_test.go | 6 +- .../olivere/elastic.v3/cluster_stats.go | 11 +- .../olivere/elastic.v3/cluster_stats_test.go | 6 +- .../elastic.v3/config/elasticsearch.yml | 4 +- .../olivere/elastic.v3/config/jvm.options | 100 + .../elastic.v3/config/log4j2.properties | 74 + .../olivere/elastic.v3/config/logging.yml | 15 - .../elastic.v3/config/scripts/.gitkeep | 0 .../gopkg.in/olivere/elastic.v3/connection.go | 4 +- vendor/gopkg.in/olivere/elastic.v3/count.go | 10 +- .../gopkg.in/olivere/elastic.v3/count_test.go | 26 +- vendor/gopkg.in/olivere/elastic.v3/decoder.go | 2 +- .../olivere/elastic.v3/decoder_test.go | 6 +- vendor/gopkg.in/olivere/elastic.v3/delete.go | 71 +- .../olivere/elastic.v3/delete_by_query.go | 621 +++- .../elastic.v3/delete_by_query_test.go | 143 +- .../olivere/elastic.v3/delete_template.go | 24 +- .../elastic.v3/delete_template_test.go | 6 +- .../olivere/elastic.v3/delete_test.go | 30 +- vendor/gopkg.in/olivere/elastic.v3/doc.go | 2 +- vendor/gopkg.in/olivere/elastic.v3/errors.go | 5 +- .../olivere/elastic.v3/example_test.go | 160 +- vendor/gopkg.in/olivere/elastic.v3/exists.go | 23 +- .../olivere/elastic.v3/exists_test.go | 12 +- vendor/gopkg.in/olivere/elastic.v3/explain.go | 13 +- .../olivere/elastic.v3/explain_test.go | 14 +- .../elastic.v3/fetch_source_context.go | 2 +- .../elastic.v3/fetch_source_context_test.go | 2 +- .../olivere/elastic.v3/field_stats.go | 11 +- .../gopkg.in/olivere/elastic.v3/geo_point.go | 2 +- .../olivere/elastic.v3/geo_point_test.go | 2 +- vendor/gopkg.in/olivere/elastic.v3/get.go | 71 +- .../olivere/elastic.v3/get_template.go | 13 +- .../olivere/elastic.v3/get_template_test.go | 14 +- .../gopkg.in/olivere/elastic.v3/get_test.go | 34 +- .../gopkg.in/olivere/elastic.v3/highlight.go | 2 +- .../olivere/elastic.v3/highlight_test.go | 15 +- vendor/gopkg.in/olivere/elastic.v3/index.go | 89 +- .../gopkg.in/olivere/elastic.v3/index_test.go | 42 +- .../olivere/elastic.v3/indices_close.go | 13 +- .../olivere/elastic.v3/indices_close_test.go | 20 +- .../olivere/elastic.v3/indices_create.go | 16 +- .../olivere/elastic.v3/indices_create_test.go | 18 +- .../olivere/elastic.v3/indices_delete.go | 13 +- .../elastic.v3/indices_delete_template.go | 13 +- .../olivere/elastic.v3/indices_delete_test.go | 10 +- .../elastic.v3/indices_delete_warmer.go | 137 - .../elastic.v3/indices_delete_warmer_test.go | 48 - .../olivere/elastic.v3/indices_exists.go | 13 +- .../elastic.v3/indices_exists_template.go | 13 +- .../indices_exists_template_test.go | 17 +- .../olivere/elastic.v3/indices_exists_test.go | 10 +- .../olivere/elastic.v3/indices_exists_type.go | 17 +- .../elastic.v3/indices_exists_type_test.go | 28 +- .../olivere/elastic.v3/indices_flush.go | 13 +- .../olivere/elastic.v3/indices_flush_test.go | 6 +- .../olivere/elastic.v3/indices_forcemerge.go | 26 +- .../elastic.v3/indices_forcemerge_test.go | 6 +- .../olivere/elastic.v3/indices_get.go | 13 +- .../olivere/elastic.v3/indices_get_aliases.go | 36 +- .../elastic.v3/indices_get_aliases_test.go | 22 +- .../olivere/elastic.v3/indices_get_mapping.go | 14 +- .../elastic.v3/indices_get_mapping_test.go | 2 +- .../elastic.v3/indices_get_settings.go | 13 +- .../elastic.v3/indices_get_settings_test.go | 6 +- .../elastic.v3/indices_get_template.go | 13 +- .../elastic.v3/indices_get_template_test.go | 2 +- .../olivere/elastic.v3/indices_get_test.go | 8 +- .../olivere/elastic.v3/indices_get_warmer.go | 200 -- .../elastic.v3/indices_get_warmer_test.go | 83 - .../olivere/elastic.v3/indices_open.go | 13 +- .../olivere/elastic.v3/indices_open_test.go | 10 +- .../olivere/elastic.v3/indices_put_alias.go | 9 +- .../elastic.v3/indices_put_alias_test.go | 20 +- .../olivere/elastic.v3/indices_put_mapping.go | 13 +- .../elastic.v3/indices_put_mapping_test.go | 14 +- .../elastic.v3/indices_put_settings.go | 13 +- .../elastic.v3/indices_put_settings_test.go | 12 +- .../elastic.v3/indices_put_template.go | 13 +- .../elastic.v3/indices_put_warmer_test.go | 100 - .../olivere/elastic.v3/indices_refresh.go | 72 +- .../elastic.v3/indices_refresh_test.go | 47 +- .../olivere/elastic.v3/indices_rollover.go | 268 ++ .../elastic.v3/indices_rollover_test.go | 116 + .../olivere/elastic.v3/indices_shrink.go | 174 + .../olivere/elastic.v3/indices_shrink_test.go | 34 + .../olivere/elastic.v3/indices_stats.go | 13 +- .../olivere/elastic.v3/indices_stats_test.go | 6 +- .../elastic.v3/ingest_delete_pipeline.go | 124 + .../elastic.v3/ingest_delete_pipeline_test.go | 31 + .../olivere/elastic.v3/ingest_get_pipeline.go | 118 + .../elastic.v3/ingest_get_pipeline_test.go | 118 + .../olivere/elastic.v3/ingest_put_pipeline.go | 152 + .../elastic.v3/ingest_put_pipeline_test.go | 31 + .../elastic.v3/ingest_simulate_pipeline.go | 157 + .../ingest_simulate_pipeline_test.go | 35 + .../gopkg.in/olivere/elastic.v3/inner_hit.go | 22 +- .../olivere/elastic.v3/inner_hit_test.go | 2 +- vendor/gopkg.in/olivere/elastic.v3/logger.go | 2 +- vendor/gopkg.in/olivere/elastic.v3/mget.go | 137 +- .../gopkg.in/olivere/elastic.v3/mget_test.go | 16 +- vendor/gopkg.in/olivere/elastic.v3/msearch.go | 10 +- .../olivere/elastic.v3/msearch_test.go | 24 +- .../olivere/elastic.v3/mtermvectors.go | 11 +- .../olivere/elastic.v3/mtermvectors_test.go | 14 +- .../gopkg.in/olivere/elastic.v3/nodes_info.go | 13 +- .../olivere/elastic.v3/nodes_info_test.go | 10 +- .../olivere/elastic.v3/nodes_stats.go | 75 +- .../olivere/elastic.v3/nodes_stats_test.go | 8 +- .../gopkg.in/olivere/elastic.v3/optimize.go | 135 - .../olivere/elastic.v3/optimize_test.go | 47 - .../gopkg.in/olivere/elastic.v3/percolate.go | 315 -- .../olivere/elastic.v3/percolate_test.go | 98 +- vendor/gopkg.in/olivere/elastic.v3/ping.go | 17 +- .../gopkg.in/olivere/elastic.v3/ping_test.go | 10 +- vendor/gopkg.in/olivere/elastic.v3/plugins.go | 6 +- .../olivere/elastic.v3/plugins_test.go | 2 +- .../{search_template.go => put_template.go} | 19 +- .../olivere/elastic.v3/put_template_test.go | 54 + vendor/gopkg.in/olivere/elastic.v3/query.go | 2 +- .../elastic.v3/recipes/sliced_scroll.go | 161 + vendor/gopkg.in/olivere/elastic.v3/reindex.go | 132 +- .../olivere/elastic.v3/reindex_test.go | 66 +- .../gopkg.in/olivere/elastic.v3/reindexer.go | 270 -- .../olivere/elastic.v3/reindexer_test.go | 285 -- vendor/gopkg.in/olivere/elastic.v3/request.go | 8 +- vendor/gopkg.in/olivere/elastic.v3/rescore.go | 2 +- .../gopkg.in/olivere/elastic.v3/rescorer.go | 2 +- .../gopkg.in/olivere/elastic.v3/response.go | 2 +- .../{run-es.sh => run-es-5.0.0-beta1.sh} | 3 +- .../olivere/elastic.v3/run-es-5.0.0-rc1.sh | 1 + .../olivere/elastic.v3/run-es-5.0.0.sh | 1 + .../olivere/elastic.v3/run-es-5.0.1.sh | 1 + vendor/gopkg.in/olivere/elastic.v3/scan.go | 375 -- .../gopkg.in/olivere/elastic.v3/scan_test.go | 658 ---- vendor/gopkg.in/olivere/elastic.v3/script.go | 2 +- .../olivere/elastic.v3/script_test.go | 2 +- vendor/gopkg.in/olivere/elastic.v3/scroll.go | 43 +- .../olivere/elastic.v3/scroll_test.go | 106 +- vendor/gopkg.in/olivere/elastic.v3/search.go | 63 +- .../olivere/elastic.v3/search_aggs.go | 4 +- .../elastic.v3/search_aggs_bucket_children.go | 2 +- .../search_aggs_bucket_children_test.go | 2 +- .../search_aggs_bucket_date_histogram.go | 2 +- .../search_aggs_bucket_date_histogram_test.go | 2 +- .../search_aggs_bucket_date_range.go | 2 +- .../search_aggs_bucket_date_range_test.go | 2 +- .../elastic.v3/search_aggs_bucket_filter.go | 2 +- .../search_aggs_bucket_filter_test.go | 2 +- .../elastic.v3/search_aggs_bucket_filters.go | 2 +- .../search_aggs_bucket_filters_test.go | 2 +- .../search_aggs_bucket_geo_distance.go | 2 +- .../search_aggs_bucket_geo_distance_test.go | 2 +- .../elastic.v3/search_aggs_bucket_global.go | 2 +- .../search_aggs_bucket_global_test.go | 2 +- .../search_aggs_bucket_histogram.go | 2 +- .../search_aggs_bucket_histogram_test.go | 2 +- .../elastic.v3/search_aggs_bucket_missing.go | 2 +- .../search_aggs_bucket_missing_test.go | 2 +- .../elastic.v3/search_aggs_bucket_nested.go | 2 +- .../search_aggs_bucket_nested_test.go | 2 +- .../elastic.v3/search_aggs_bucket_range.go | 2 +- .../search_aggs_bucket_range_test.go | 2 +- .../elastic.v3/search_aggs_bucket_sampler.go | 37 +- .../search_aggs_bucket_sampler_test.go | 26 +- .../search_aggs_bucket_significant_terms.go | 2 +- ...arch_aggs_bucket_significant_terms_test.go | 2 +- .../elastic.v3/search_aggs_bucket_terms.go | 2 +- .../search_aggs_bucket_terms_test.go | 2 +- .../elastic.v3/search_aggs_metrics_avg.go | 2 +- .../search_aggs_metrics_avg_test.go | 2 +- .../search_aggs_metrics_cardinality.go | 2 +- .../search_aggs_metrics_cardinality_test.go | 2 +- .../search_aggs_metrics_extended_stats.go | 2 +- ...search_aggs_metrics_extended_stats_test.go | 2 +- .../search_aggs_metrics_geo_bounds.go | 2 +- .../search_aggs_metrics_geo_bounds_test.go | 2 +- .../elastic.v3/search_aggs_metrics_max.go | 2 +- .../search_aggs_metrics_max_test.go | 2 +- .../elastic.v3/search_aggs_metrics_min.go | 2 +- .../search_aggs_metrics_min_test.go | 2 +- .../search_aggs_metrics_percentile_ranks.go | 2 +- ...arch_aggs_metrics_percentile_ranks_test.go | 2 +- .../search_aggs_metrics_percentiles.go | 2 +- .../search_aggs_metrics_percentiles_test.go | 2 +- .../elastic.v3/search_aggs_metrics_stats.go | 2 +- .../search_aggs_metrics_stats_test.go | 2 +- .../elastic.v3/search_aggs_metrics_sum.go | 2 +- .../search_aggs_metrics_sum_test.go | 2 +- .../search_aggs_metrics_top_hits.go | 14 +- .../search_aggs_metrics_top_hits_test.go | 2 +- .../search_aggs_metrics_value_count.go | 2 +- .../search_aggs_metrics_value_count_test.go | 2 +- .../search_aggs_pipeline_avg_bucket.go | 2 +- .../search_aggs_pipeline_avg_bucket_test.go | 2 +- .../search_aggs_pipeline_bucket_script.go | 2 +- ...search_aggs_pipeline_bucket_script_test.go | 2 +- .../search_aggs_pipeline_bucket_selector.go | 2 +- ...arch_aggs_pipeline_bucket_selector_test.go | 2 +- .../search_aggs_pipeline_cumulative_sum.go | 2 +- ...earch_aggs_pipeline_cumulative_sum_test.go | 2 +- .../search_aggs_pipeline_derivative.go | 2 +- .../search_aggs_pipeline_derivative_test.go | 2 +- .../search_aggs_pipeline_max_bucket.go | 2 +- .../search_aggs_pipeline_max_bucket_test.go | 2 +- .../search_aggs_pipeline_min_bucket.go | 2 +- .../search_aggs_pipeline_min_bucket_test.go | 2 +- .../search_aggs_pipeline_mov_avg.go | 2 +- .../search_aggs_pipeline_mov_avg_test.go | 2 +- .../search_aggs_pipeline_serial_diff.go | 2 +- .../search_aggs_pipeline_serial_diff_test.go | 2 +- .../search_aggs_pipeline_sum_bucket.go | 2 +- .../search_aggs_pipeline_sum_bucket_test.go | 2 +- .../elastic.v3/search_aggs_pipeline_test.go | 35 +- .../olivere/elastic.v3/search_aggs_test.go | 26 +- .../olivere/elastic.v3/search_queries_bool.go | 2 +- .../elastic.v3/search_queries_bool_test.go | 2 +- .../elastic.v3/search_queries_boosting.go | 2 +- .../search_queries_boosting_test.go | 2 +- .../elastic.v3/search_queries_common_terms.go | 2 +- .../search_queries_common_terms_test.go | 14 +- .../search_queries_constant_score.go | 2 +- .../search_queries_constant_score_test.go | 2 +- .../elastic.v3/search_queries_dis_max.go | 2 +- .../elastic.v3/search_queries_dis_max_test.go | 2 +- .../elastic.v3/search_queries_exists.go | 2 +- .../elastic.v3/search_queries_exists_test.go | 2 +- .../olivere/elastic.v3/search_queries_fsq.go | 17 +- .../search_queries_fsq_score_funcs.go | 2 +- .../elastic.v3/search_queries_fsq_test.go | 2 +- .../elastic.v3/search_queries_fuzzy.go | 2 +- .../elastic.v3/search_queries_fuzzy_test.go | 2 +- .../search_queries_geo_bounding_box.go | 2 +- .../search_queries_geo_bounding_box_test.go | 2 +- .../elastic.v3/search_queries_geo_distance.go | 2 +- .../search_queries_geo_distance_test.go | 2 +- .../elastic.v3/search_queries_geo_polygon.go | 2 +- .../search_queries_geo_polygon_test.go | 2 +- .../elastic.v3/search_queries_has_child.go | 18 +- .../search_queries_has_child_test.go | 6 +- .../elastic.v3/search_queries_has_parent.go | 14 +- .../search_queries_has_parent_test.go | 6 +- .../olivere/elastic.v3/search_queries_ids.go | 2 +- .../elastic.v3/search_queries_ids_test.go | 2 +- .../elastic.v3/search_queries_indices.go | 2 +- .../elastic.v3/search_queries_indices_test.go | 2 +- .../elastic.v3/search_queries_match.go | 2 +- .../elastic.v3/search_queries_match_all.go | 2 +- .../search_queries_match_all_test.go | 2 +- .../elastic.v3/search_queries_match_test.go | 2 +- .../elastic.v3/search_queries_missing.go | 67 - .../search_queries_more_like_this.go | 2 +- .../search_queries_more_like_this_test.go | 14 +- .../elastic.v3/search_queries_multi_match.go | 2 +- .../search_queries_multi_match_test.go | 2 +- .../elastic.v3/search_queries_nested.go | 2 +- .../elastic.v3/search_queries_nested_test.go | 2 +- .../olivere/elastic.v3/search_queries_not.go | 45 - .../elastic.v3/search_queries_percolator.go | 115 + .../search_queries_percolator_test.go | 68 + .../elastic.v3/search_queries_prefix.go | 2 +- .../elastic.v3/search_queries_prefix_test.go | 2 +- .../elastic.v3/search_queries_query_string.go | 2 +- .../search_queries_query_string_test.go | 2 +- .../elastic.v3/search_queries_range.go | 2 +- .../elastic.v3/search_queries_range_test.go | 2 +- .../elastic.v3/search_queries_regexp.go | 2 +- .../elastic.v3/search_queries_regexp_test.go | 2 +- .../elastic.v3/search_queries_script.go | 2 +- .../elastic.v3/search_queries_script_test.go | 2 +- .../search_queries_simple_query_string.go | 2 +- ...search_queries_simple_query_string_test.go | 18 +- .../elastic.v3/search_queries_slice.go | 53 + .../elastic.v3/search_queries_slice_test.go | 27 + .../search_queries_template_query.go | 84 - .../olivere/elastic.v3/search_queries_term.go | 2 +- .../elastic.v3/search_queries_term_test.go | 2 +- .../elastic.v3/search_queries_terms.go | 2 +- .../elastic.v3/search_queries_terms_test.go | 2 +- .../olivere/elastic.v3/search_queries_type.go | 2 +- .../elastic.v3/search_queries_type_test.go | 2 +- .../elastic.v3/search_queries_wildcard.go | 2 +- .../search_queries_wildcard_test.go | 11 +- .../olivere/elastic.v3/search_request.go | 2 +- .../olivere/elastic.v3/search_request_test.go | 2 +- .../olivere/elastic.v3/search_source.go | 89 +- .../olivere/elastic.v3/search_source_test.go | 20 +- .../elastic.v3/search_suggester_test.go | 106 +- .../elastic.v3/search_templates_test.go | 98 - .../olivere/elastic.v3/search_test.go | 145 +- .../gopkg.in/olivere/elastic.v3/setup_test.go | 73 +- vendor/gopkg.in/olivere/elastic.v3/sort.go | 88 +- .../gopkg.in/olivere/elastic.v3/sort_test.go | 17 - vendor/gopkg.in/olivere/elastic.v3/suggest.go | 71 +- .../olivere/elastic.v3/suggest_field.go | 29 +- .../olivere/elastic.v3/suggest_field_test.go | 5 +- .../olivere/elastic.v3/suggest_test.go | 62 +- .../gopkg.in/olivere/elastic.v3/suggester.go | 2 +- .../elastic.v3/suggester_completion.go | 4 +- .../elastic.v3/suggester_completion_fuzzy.go | 2 +- .../suggester_completion_fuzzy_test.go | 2 +- .../elastic.v3/suggester_completion_test.go | 2 +- .../olivere/elastic.v3/suggester_context.go | 2 +- .../elastic.v3/suggester_context_category.go | 2 +- .../suggester_context_category_test.go | 2 +- .../elastic.v3/suggester_context_geo.go | 6 +- .../elastic.v3/suggester_context_geo_test.go | 2 +- .../olivere/elastic.v3/suggester_phrase.go | 17 +- .../elastic.v3/suggester_phrase_test.go | 2 +- .../olivere/elastic.v3/suggester_term.go | 18 +- .../olivere/elastic.v3/suggester_term_test.go | 2 +- .../olivere/elastic.v3/tasks_cancel.go | 11 +- .../olivere/elastic.v3/tasks_cancel_test.go | 6 +- .../gopkg.in/olivere/elastic.v3/tasks_list.go | 11 +- .../olivere/elastic.v3/tasks_list_test.go | 8 +- .../olivere/elastic.v3/termvectors.go | 13 +- .../olivere/elastic.v3/termvectors_test.go | 22 +- vendor/gopkg.in/olivere/elastic.v3/update.go | 87 +- .../olivere/elastic.v3/update_by_query.go | 245 +- .../elastic.v3/update_by_query_test.go | 49 +- .../olivere/elastic.v3/update_test.go | 83 +- .../olivere/elastic.v5}/.gitignore | 22 +- .../gopkg.in/olivere/elastic.v5/.travis.yml | 19 + .../olivere/elastic.v5/CHANGELOG-3.0.md | 363 ++ .../olivere/elastic.v5/CHANGELOG-5.0.md | 195 ++ .../olivere/elastic.v5/CONTRIBUTING.md | 40 + .../gopkg.in/olivere/elastic.v5/CONTRIBUTORS | 70 + .../olivere/elastic.v5/ISSUE_TEMPLATE.md | 17 + .../olivere/elastic.v5}/LICENSE | 18 +- vendor/gopkg.in/olivere/elastic.v5/README.md | 457 +++ .../elastic.v5/acknowledged_response.go | 11 + .../olivere/elastic.v5/backoff/LICENSE | 22 + .../olivere/elastic.v5/backoff/backoff.go | 159 + .../elastic.v5/backoff/backoff_test.go | 146 + .../olivere/elastic.v5/backoff/retry.go | 53 + .../olivere/elastic.v5/backoff/retry_test.go | 44 + vendor/gopkg.in/olivere/elastic.v5/bulk.go | 397 +++ .../olivere/elastic.v5/bulk_delete_request.go | 145 + .../elastic.v5/bulk_delete_request_test.go | 68 + .../olivere/elastic.v5/bulk_index_request.go | 225 ++ .../elastic.v5/bulk_index_request_test.go | 103 + .../olivere/elastic.v5/bulk_processor.go | 543 +++ .../olivere/elastic.v5/bulk_processor_test.go | 423 +++ .../olivere/elastic.v5/bulk_request.go | 17 + .../gopkg.in/olivere/elastic.v5/bulk_test.go | 508 +++ .../olivere/elastic.v5/bulk_update_request.go | 243 ++ .../elastic.v5/bulk_update_request_test.go | 93 + .../olivere/elastic.v5/canonicalize.go | 38 + .../olivere/elastic.v5/canonicalize_test.go | 61 + .../olivere/elastic.v5/clear_scroll.go | 103 + .../olivere/elastic.v5/clear_scroll_test.go | 88 + vendor/gopkg.in/olivere/elastic.v5/client.go | 1608 +++++++++ .../olivere/elastic.v5/client_test.go | 1025 ++++++ .../olivere/elastic.v5/cluster-test/Makefile | 16 + .../olivere/elastic.v5/cluster-test/README.md | 63 + .../elastic.v5/cluster-test/cluster-test.go | 362 ++ .../olivere/elastic.v5/cluster_health.go | 245 ++ .../olivere/elastic.v5/cluster_health_test.go | 120 + .../olivere/elastic.v5/cluster_state.go | 285 ++ .../olivere/elastic.v5/cluster_state_test.go | 94 + .../olivere/elastic.v5/cluster_stats.go | 350 ++ .../olivere/elastic.v5/cluster_stats_test.go | 93 + .../elastic.v5/config/elasticsearch.yml | 15 + .../olivere/elastic.v5/config/jvm.options | 100 + .../elastic.v5/config/log4j2.properties | 74 + .../elastic.v5/config/scripts/.gitkeep | 0 .../gopkg.in/olivere/elastic.v5/connection.go | 90 + vendor/gopkg.in/olivere/elastic.v5/count.go | 311 ++ .../gopkg.in/olivere/elastic.v5/count_test.go | 128 + vendor/gopkg.in/olivere/elastic.v5/decoder.go | 26 + .../olivere/elastic.v5/decoder_test.go | 51 + vendor/gopkg.in/olivere/elastic.v5/delete.go | 209 ++ .../olivere/elastic.v5/delete_by_query.go | 649 ++++ .../elastic.v5/delete_by_query_test.go | 147 + .../olivere/elastic.v5/delete_template.go | 110 + .../elastic.v5/delete_template_test.go | 24 + .../olivere/elastic.v5/delete_test.go | 120 + vendor/gopkg.in/olivere/elastic.v5/doc.go | 51 + vendor/gopkg.in/olivere/elastic.v5/errors.go | 141 + .../olivere/elastic.v5/errors_test.go | 202 ++ .../olivere/elastic.v5/example_test.go | 549 +++ vendor/gopkg.in/olivere/elastic.v5/exists.go | 177 + .../olivere/elastic.v5/exists_test.go | 54 + vendor/gopkg.in/olivere/elastic.v5/explain.go | 322 ++ .../olivere/elastic.v5/explain_test.go | 45 + .../elastic.v5/fetch_source_context.go | 74 + .../elastic.v5/fetch_source_context_test.go | 125 + .../olivere/elastic.v5/field_stats.go | 257 ++ .../olivere/elastic.v5/field_stats_test.go | 267 ++ .../gopkg.in/olivere/elastic.v5/geo_point.go | 48 + .../olivere/elastic.v5/geo_point_test.go | 24 + vendor/gopkg.in/olivere/elastic.v5/get.go | 257 ++ .../olivere/elastic.v5/get_template.go | 114 + .../olivere/elastic.v5/get_template_test.go | 53 + .../gopkg.in/olivere/elastic.v5/get_test.go | 167 + .../gopkg.in/olivere/elastic.v5/highlight.go | 455 +++ .../olivere/elastic.v5/highlight_test.go | 193 ++ vendor/gopkg.in/olivere/elastic.v5/index.go | 289 ++ .../gopkg.in/olivere/elastic.v5/index_test.go | 281 ++ .../olivere/elastic.v5/indices_close.go | 154 + .../olivere/elastic.v5/indices_close_test.go | 85 + .../olivere/elastic.v5/indices_create.go | 131 + .../olivere/elastic.v5/indices_create_test.go | 64 + .../olivere/elastic.v5/indices_delete.go | 130 + .../elastic.v5/indices_delete_template.go | 123 + .../olivere/elastic.v5/indices_delete_test.go | 24 + .../olivere/elastic.v5/indices_exists.go | 151 + .../elastic.v5/indices_exists_template.go | 114 + .../indices_exists_template_test.go | 69 + .../olivere/elastic.v5/indices_exists_test.go | 24 + .../olivere/elastic.v5/indices_exists_type.go | 161 + .../elastic.v5/indices_exists_type_test.go | 136 + .../olivere/elastic.v5/indices_flush.go | 170 + .../olivere/elastic.v5/indices_flush_test.go | 71 + .../olivere/elastic.v5/indices_forcemerge.go | 190 + .../elastic.v5/indices_forcemerge_test.go | 58 + .../olivere/elastic.v5/indices_get.go | 203 ++ .../olivere/elastic.v5/indices_get_aliases.go | 158 + .../elastic.v5/indices_get_aliases_test.go | 181 + .../olivere/elastic.v5/indices_get_mapping.go | 171 + .../elastic.v5/indices_get_mapping_test.go | 50 + .../elastic.v5/indices_get_settings.go | 184 + .../elastic.v5/indices_get_settings_test.go | 83 + .../elastic.v5/indices_get_template.go | 129 + .../elastic.v5/indices_get_template_test.go | 41 + .../olivere/elastic.v5/indices_get_test.go | 99 + .../olivere/elastic.v5/indices_open.go | 158 + .../olivere/elastic.v5/indices_open_test.go | 24 + .../olivere/elastic.v5/indices_put_alias.go | 296 ++ .../elastic.v5/indices_put_alias_test.go | 223 ++ .../indices_put_mapping.go} | 130 +- .../elastic.v5/indices_put_mapping_test.go | 86 + .../elastic.v5/indices_put_settings.go | 185 + .../elastic.v5/indices_put_settings_test.go | 96 + .../elastic.v5/indices_put_template.go | 180 + .../olivere/elastic.v5/indices_refresh.go | 105 + .../elastic.v5/indices_refresh_test.go | 82 + .../olivere/elastic.v5/indices_rollover.go | 268 ++ .../elastic.v5/indices_rollover_test.go | 116 + .../olivere/elastic.v5/indices_shrink.go | 174 + .../olivere/elastic.v5/indices_shrink_test.go | 34 + .../olivere/elastic.v5/indices_stats.go | 386 +++ .../olivere/elastic.v5/indices_stats_test.go | 87 + .../elastic.v5/ingest_delete_pipeline.go | 124 + .../elastic.v5/ingest_delete_pipeline_test.go | 31 + .../olivere/elastic.v5/ingest_get_pipeline.go | 118 + .../elastic.v5/ingest_get_pipeline_test.go | 118 + .../olivere/elastic.v5/ingest_put_pipeline.go | 152 + .../elastic.v5/ingest_put_pipeline_test.go | 31 + .../elastic.v5/ingest_simulate_pipeline.go | 157 + .../ingest_simulate_pipeline_test.go | 35 + .../gopkg.in/olivere/elastic.v5/inner_hit.go | 160 + .../inner_hit_test.go} | 20 +- vendor/gopkg.in/olivere/elastic.v5/logger.go | 10 + vendor/gopkg.in/olivere/elastic.v5/mget.go | 253 ++ .../gopkg.in/olivere/elastic.v5/mget_test.go | 97 + vendor/gopkg.in/olivere/elastic.v5/msearch.go | 98 + .../olivere/elastic.v5/msearch_test.go | 199 ++ .../olivere/elastic.v5/mtermvectors.go | 471 +++ .../olivere/elastic.v5/mtermvectors_test.go | 135 + .../gopkg.in/olivere/elastic.v5/nodes_info.go | 310 ++ .../olivere/elastic.v5/nodes_info_test.go | 44 + .../olivere/elastic.v5/nodes_stats.go | 707 ++++ .../olivere/elastic.v5/nodes_stats_test.go | 139 + .../olivere/elastic.v5/percolate_test.go | 58 + vendor/gopkg.in/olivere/elastic.v5/ping.go | 129 + .../gopkg.in/olivere/elastic.v5/ping_test.go | 66 + vendor/gopkg.in/olivere/elastic.v5/plugins.go | 40 + .../olivere/elastic.v5/plugins_test.go | 32 + .../olivere/elastic.v5/put_template.go | 146 + .../olivere/elastic.v5/put_template_test.go | 54 + vendor/gopkg.in/olivere/elastic.v5/query.go | 13 + vendor/gopkg.in/olivere/elastic.v5/reindex.go | 553 +++ .../olivere/elastic.v5/reindex_test.go | 292 ++ vendor/gopkg.in/olivere/elastic.v5/request.go | 123 + .../olivere/elastic.v5/request_test.go | 107 + vendor/gopkg.in/olivere/elastic.v5/rescore.go | 44 + .../gopkg.in/olivere/elastic.v5/rescorer.go | 64 + .../gopkg.in/olivere/elastic.v5/response.go | 43 + .../olivere/elastic.v5/run-es-5.0.0-beta1.sh | 1 + .../olivere/elastic.v5/run-es-5.0.0-rc1.sh | 1 + .../olivere/elastic.v5/run-es-5.0.0.sh | 1 + .../olivere/elastic.v5/run-es-5.0.1.sh | 1 + vendor/gopkg.in/olivere/elastic.v5/script.go | 131 + .../olivere/elastic.v5/script_test.go | 78 + vendor/gopkg.in/olivere/elastic.v5/scroll.go | 447 +++ .../olivere/elastic.v5/scroll_test.go | 328 ++ vendor/gopkg.in/olivere/elastic.v5/search.go | 488 +++ .../olivere/elastic.v5/search_aggs.go | 1274 +++++++ .../elastic.v5/search_aggs_bucket_children.go | 76 + .../search_aggs_bucket_children_test.go | 46 + .../search_aggs_bucket_date_histogram.go | 285 ++ .../search_aggs_bucket_date_histogram_test.go | 49 + .../search_aggs_bucket_date_range.go | 234 ++ .../search_aggs_bucket_date_range_test.go | 130 + .../elastic.v5/search_aggs_bucket_filter.go | 77 + .../search_aggs_bucket_filter_test.go | 66 + .../elastic.v5/search_aggs_bucket_filters.go | 138 + .../search_aggs_bucket_filters_test.go | 99 + .../search_aggs_bucket_geo_distance.go | 194 ++ .../search_aggs_bucket_geo_distance_test.go | 71 + .../search_aggs_bucket_geohash_grid.go | 102 + .../search_aggs_bucket_geohash_grid_test.go | 84 + .../elastic.v5/search_aggs_bucket_global.go | 71 + .../search_aggs_bucket_global_test.go | 44 + .../search_aggs_bucket_histogram.go | 253 ++ .../search_aggs_bucket_histogram_test.go | 61 + .../elastic.v5/search_aggs_bucket_missing.go | 81 + .../search_aggs_bucket_missing_test.go | 44 + .../elastic.v5/search_aggs_bucket_nested.go | 82 + .../search_aggs_bucket_nested_test.go | 62 + .../elastic.v5/search_aggs_bucket_range.go | 232 ++ .../search_aggs_bucket_range_test.go | 156 + .../search_aggs_bucket_reverse_nested.go | 86 + .../search_aggs_bucket_reverse_nested_test.go | 83 + .../elastic.v5/search_aggs_bucket_sampler.go | 110 + .../search_aggs_bucket_sampler_test.go | 30 + .../search_aggs_bucket_significant_terms.go | 389 +++ ...arch_aggs_bucket_significant_terms_test.go | 211 ++ .../elastic.v5/search_aggs_bucket_terms.go | 341 ++ .../search_aggs_bucket_terms_test.go | 104 + .../elastic.v5/search_aggs_metrics_avg.go | 101 + .../search_aggs_metrics_avg_test.go | 61 + .../search_aggs_metrics_cardinality.go | 120 + .../search_aggs_metrics_cardinality_test.go | 78 + .../search_aggs_metrics_extended_stats.go | 99 + ...search_aggs_metrics_extended_stats_test.go | 44 + .../search_aggs_metrics_geo_bounds.go | 105 + .../search_aggs_metrics_geo_bounds_test.go | 61 + .../elastic.v5/search_aggs_metrics_max.go | 99 + .../search_aggs_metrics_max_test.go | 61 + .../elastic.v5/search_aggs_metrics_min.go | 100 + .../search_aggs_metrics_min_test.go | 61 + .../search_aggs_metrics_percentile_ranks.go | 131 + ...arch_aggs_metrics_percentile_ranks_test.go | 78 + .../search_aggs_metrics_percentiles.go | 130 + .../search_aggs_metrics_percentiles_test.go | 78 + .../elastic.v5/search_aggs_metrics_stats.go | 99 + .../search_aggs_metrics_stats_test.go | 61 + .../elastic.v5/search_aggs_metrics_sum.go | 99 + .../search_aggs_metrics_sum_test.go | 61 + .../search_aggs_metrics_top_hits.go | 143 + .../search_aggs_metrics_top_hits_test.go | 31 + .../search_aggs_metrics_value_count.go | 102 + .../search_aggs_metrics_value_count_test.go | 63 + .../search_aggs_pipeline_avg_bucket.go | 113 + .../search_aggs_pipeline_avg_bucket_test.go | 27 + .../search_aggs_pipeline_bucket_script.go | 132 + ...search_aggs_pipeline_bucket_script_test.go | 30 + .../search_aggs_pipeline_bucket_selector.go | 134 + ...arch_aggs_pipeline_bucket_selector_test.go | 29 + .../search_aggs_pipeline_cumulative_sum.go | 90 + ...earch_aggs_pipeline_cumulative_sum_test.go | 27 + .../search_aggs_pipeline_derivative.go | 124 + .../search_aggs_pipeline_derivative_test.go | 27 + .../search_aggs_pipeline_max_bucket.go | 114 + .../search_aggs_pipeline_max_bucket_test.go | 27 + .../search_aggs_pipeline_min_bucket.go | 114 + .../search_aggs_pipeline_min_bucket_test.go | 27 + .../search_aggs_pipeline_mov_avg.go | 393 +++ .../search_aggs_pipeline_mov_avg_test.go | 132 + .../search_aggs_pipeline_serial_diff.go | 124 + .../search_aggs_pipeline_serial_diff_test.go | 27 + .../search_aggs_pipeline_sum_bucket.go | 113 + .../search_aggs_pipeline_sum_bucket_test.go | 27 + .../elastic.v5/search_aggs_pipeline_test.go | 1003 ++++++ .../olivere/elastic.v5/search_aggs_test.go | 3054 +++++++++++++++++ .../olivere/elastic.v5/search_queries_bool.go | 212 ++ .../elastic.v5/search_queries_bool_test.go | 34 + .../elastic.v5/search_queries_boosting.go | 97 + .../search_queries_boosting_test.go | 30 + .../elastic.v5/search_queries_common_terms.go | 146 + .../search_queries_common_terms_test.go | 86 + .../search_queries_constant_score.go | 59 + .../search_queries_constant_score_test.go | 27 + .../elastic.v5/search_queries_dis_max.go | 104 + .../elastic.v5/search_queries_dis_max_test.go | 28 + .../elastic.v5/search_queries_exists.go | 49 + .../elastic.v5/search_queries_exists_test.go | 27 + .../olivere/elastic.v5/search_queries_fsq.go | 172 + .../search_queries_fsq_score_funcs.go | 567 +++ .../elastic.v5/search_queries_fsq_test.go | 166 + .../elastic.v5/search_queries_fuzzy.go | 120 + .../elastic.v5/search_queries_fuzzy_test.go | 27 + .../search_queries_geo_bounding_box.go | 121 + .../search_queries_geo_bounding_box_test.go | 63 + .../elastic.v5/search_queries_geo_distance.go | 116 + .../search_queries_geo_distance_test.go | 70 + .../elastic.v5/search_queries_geo_polygon.go | 72 + .../search_queries_geo_polygon_test.go | 58 + .../elastic.v5/search_queries_has_child.go | 131 + .../search_queries_has_child_test.go | 45 + .../elastic.v5/search_queries_has_parent.go | 97 + .../search_queries_has_parent_test.go | 27 + .../olivere/elastic.v5/search_queries_ids.go | 76 + .../elastic.v5/search_queries_ids_test.go | 27 + .../elastic.v5/search_queries_indices.go | 89 + .../elastic.v5/search_queries_indices_test.go | 46 + .../elastic.v5/search_queries_match.go | 214 ++ .../elastic.v5/search_queries_match_all.go | 41 + .../search_queries_match_all_test.go} | 14 +- .../elastic.v5/search_queries_match_test.go | 78 + .../search_queries_more_like_this.go | 412 +++ .../search_queries_more_like_this_test.go | 93 + .../elastic.v5/search_queries_multi_match.go | 275 ++ .../search_queries_multi_match_test.go | 131 + .../elastic.v5/search_queries_nested.go | 85 + .../elastic.v5/search_queries_nested_test.go | 52 + .../elastic.v5/search_queries_percolator.go | 115 + .../search_queries_percolator_test.go | 68 + .../elastic.v5/search_queries_prefix.go | 67 + .../elastic.v5/search_queries_prefix_test.go | 45 + .../elastic.v5/search_queries_query_string.go | 359 ++ .../search_queries_query_string_test.go | 46 + .../elastic.v5/search_queries_range.go | 144 + .../search_queries_range_test.go} | 31 +- .../elastic.v5/search_queries_raw_string.go | 26 + .../search_queries_raw_string_test.go | 44 + .../elastic.v5/search_queries_regexp.go | 82 + .../elastic.v5/search_queries_regexp_test.go | 47 + .../elastic.v5/search_queries_script.go | 51 + .../elastic.v5/search_queries_script_test.go | 45 + .../search_queries_simple_query_string.go | 185 + ...search_queries_simple_query_string_test.go | 88 + .../olivere/elastic.v5/search_queries_term.go | 58 + .../elastic.v5/search_queries_term_test.go | 46 + .../elastic.v5/search_queries_terms.go | 58 + .../elastic.v5/search_queries_terms_test.go | 46 + .../olivere/elastic.v5/search_queries_type.go | 26 + .../elastic.v5/search_queries_type_test.go | 27 + .../elastic.v5/search_queries_wildcard.go | 81 + .../search_queries_wildcard_test.go | 68 + .../olivere/elastic.v5/search_request.go | 178 + .../olivere/elastic.v5/search_request_test.go | 48 + .../olivere/elastic.v5/search_source.go | 488 +++ .../olivere/elastic.v5/search_source_test.go | 259 ++ .../elastic.v5/search_suggester_test.go | 241 ++ .../olivere/elastic.v5/search_test.go | 1025 ++++++ .../gopkg.in/olivere/elastic.v5/setup_test.go | 263 ++ vendor/gopkg.in/olivere/elastic.v5/sort.go | 501 +++ .../gopkg.in/olivere/elastic.v5/sort_test.go | 238 ++ vendor/gopkg.in/olivere/elastic.v5/suggest.go | 159 + .../olivere/elastic.v5/suggest_field.go | 83 + .../olivere/elastic.v5/suggest_field_test.go | 29 + .../olivere/elastic.v5/suggest_test.go | 163 + .../gopkg.in/olivere/elastic.v5/suggester.go | 15 + .../elastic.v5/suggester_completion.go | 138 + .../elastic.v5/suggester_completion_fuzzy.go | 179 + .../suggester_completion_fuzzy_test.go | 50 + .../elastic.v5/suggester_completion_test.go | 52 + .../olivere/elastic.v5/suggester_context.go | 11 + .../elastic.v5/suggester_context_category.go | 99 + .../suggester_context_category_test.go | 97 + .../elastic.v5/suggester_context_geo.go | 130 + .../elastic.v5/suggester_context_geo_test.go | 48 + .../olivere/elastic.v5/suggester_phrase.go | 546 +++ .../elastic.v5/suggester_phrase_test.go | 169 + .../olivere/elastic.v5/suggester_term.go | 233 ++ .../olivere/elastic.v5/suggester_term_test.go | 29 + .../olivere/elastic.v5/tasks_cancel.go | 146 + .../olivere/elastic.v5/tasks_cancel_test.go | 51 + .../gopkg.in/olivere/elastic.v5/tasks_list.go | 215 ++ .../olivere/elastic.v5/tasks_list_test.go | 66 + .../olivere/elastic.v5/termvectors.go | 460 +++ .../olivere/elastic.v5/termvectors_test.go | 157 + vendor/gopkg.in/olivere/elastic.v5/update.go | 293 ++ .../olivere/elastic.v5/update_by_query.go | 651 ++++ .../elastic.v5/update_by_query_test.go | 148 + .../olivere/elastic.v5/update_test.go | 233 ++ .../olivere/elastic.v5/uritemplates}/LICENSE | 4 +- .../elastic.v5/uritemplates/uritemplates.go | 359 ++ .../olivere/elastic.v5/uritemplates/utils.go | 13 + .../elastic.v5/uritemplates/utils_test.go | 105 + 1218 files changed, 65932 insertions(+), 73995 deletions(-) delete mode 100644 data/kafka/kafka.go delete mode 100644 vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md delete mode 100644 vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md delete mode 100644 vendor/github.com/Shopify/sarama/.gitignore delete mode 100644 vendor/github.com/Shopify/sarama/.travis.yml delete mode 100644 vendor/github.com/Shopify/sarama/CHANGELOG.md delete mode 100644 vendor/github.com/Shopify/sarama/MIT-LICENSE delete mode 100644 vendor/github.com/Shopify/sarama/Makefile delete mode 100644 vendor/github.com/Shopify/sarama/README.md delete mode 100644 vendor/github.com/Shopify/sarama/Vagrantfile delete mode 100644 vendor/github.com/Shopify/sarama/api_versions_request.go delete mode 100644 vendor/github.com/Shopify/sarama/api_versions_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/api_versions_response.go delete mode 100644 vendor/github.com/Shopify/sarama/api_versions_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/async_producer.go delete mode 100644 vendor/github.com/Shopify/sarama/async_producer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/broker.go delete mode 100644 vendor/github.com/Shopify/sarama/broker_test.go delete mode 100644 vendor/github.com/Shopify/sarama/client.go delete mode 100644 vendor/github.com/Shopify/sarama/client_test.go delete mode 100644 vendor/github.com/Shopify/sarama/config.go delete mode 100644 vendor/github.com/Shopify/sarama/config_test.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_group_members.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_group_members_test.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_request.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_response.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/crc32_field.go delete mode 100644 vendor/github.com/Shopify/sarama/describe_groups_request.go delete mode 100644 vendor/github.com/Shopify/sarama/describe_groups_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/describe_groups_response.go delete mode 100644 vendor/github.com/Shopify/sarama/describe_groups_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/dev.yml delete mode 100644 vendor/github.com/Shopify/sarama/encoder_decoder.go delete mode 100644 vendor/github.com/Shopify/sarama/errors.go delete mode 100644 vendor/github.com/Shopify/sarama/examples/README.md delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/.gitignore delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/README.md delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/http_server.go delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go delete mode 100644 vendor/github.com/Shopify/sarama/fetch_request.go delete mode 100644 vendor/github.com/Shopify/sarama/fetch_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/fetch_response.go delete mode 100644 vendor/github.com/Shopify/sarama/fetch_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_client_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_consumer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_offset_manager_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_producer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_test.go delete mode 100644 vendor/github.com/Shopify/sarama/heartbeat_request.go delete mode 100644 vendor/github.com/Shopify/sarama/heartbeat_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/heartbeat_response.go delete mode 100644 vendor/github.com/Shopify/sarama/heartbeat_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/join_group_request.go delete mode 100644 vendor/github.com/Shopify/sarama/join_group_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/join_group_response.go delete mode 100644 vendor/github.com/Shopify/sarama/join_group_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/leave_group_request.go delete mode 100644 vendor/github.com/Shopify/sarama/leave_group_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/leave_group_response.go delete mode 100644 vendor/github.com/Shopify/sarama/leave_group_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/length_field.go delete mode 100644 vendor/github.com/Shopify/sarama/list_groups_request.go delete mode 100644 vendor/github.com/Shopify/sarama/list_groups_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/list_groups_response.go delete mode 100644 vendor/github.com/Shopify/sarama/list_groups_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/message.go delete mode 100644 vendor/github.com/Shopify/sarama/message_set.go delete mode 100644 vendor/github.com/Shopify/sarama/message_test.go delete mode 100644 vendor/github.com/Shopify/sarama/metadata_request.go delete mode 100644 vendor/github.com/Shopify/sarama/metadata_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/metadata_response.go delete mode 100644 vendor/github.com/Shopify/sarama/metadata_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/mockbroker.go delete mode 100644 vendor/github.com/Shopify/sarama/mockresponses.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/README.md delete mode 100644 vendor/github.com/Shopify/sarama/mocks/async_producer.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/async_producer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/consumer.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/consumer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/mocks.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/sync_producer.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_commit_request.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_commit_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_commit_response.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_commit_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_request.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_response.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_manager.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_manager_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_request.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_response.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/packet_decoder.go delete mode 100644 vendor/github.com/Shopify/sarama/packet_encoder.go delete mode 100644 vendor/github.com/Shopify/sarama/partitioner.go delete mode 100644 vendor/github.com/Shopify/sarama/partitioner_test.go delete mode 100644 vendor/github.com/Shopify/sarama/prep_encoder.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_request.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_response.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_set.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_set_test.go delete mode 100644 vendor/github.com/Shopify/sarama/real_decoder.go delete mode 100644 vendor/github.com/Shopify/sarama/real_encoder.go delete mode 100644 vendor/github.com/Shopify/sarama/request.go delete mode 100644 vendor/github.com/Shopify/sarama/request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/response_header.go delete mode 100644 vendor/github.com/Shopify/sarama/response_header_test.go delete mode 100644 vendor/github.com/Shopify/sarama/sarama.go delete mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_request.go delete mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_response.go delete mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_group_request.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_group_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_group_response.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_group_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_producer.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_producer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/tools/README.md delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go delete mode 100644 vendor/github.com/Shopify/sarama/utils.go delete mode 100644 vendor/github.com/Shopify/sarama/utils_test.go delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/create_topics.sh delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/install_cluster.sh delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/kafka.conf delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/provision.sh delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/server.properties delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/setup_services.sh delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/toxiproxy.conf delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/zookeeper.properties delete mode 100644 vendor/github.com/eapache/go-resiliency/.gitignore delete mode 100644 vendor/github.com/eapache/go-resiliency/.travis.yml delete mode 100644 vendor/github.com/eapache/go-resiliency/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/batcher/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/batcher/batcher.go delete mode 100644 vendor/github.com/eapache/go-resiliency/batcher/batcher_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/breaker/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker.go delete mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/deadline/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/deadline/deadline.go delete mode 100644 vendor/github.com/eapache/go-resiliency/deadline/deadline_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/backoffs.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/backoffs_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/classifier.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/classifier_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/retrier.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/retrier_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/semaphore/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/semaphore/semaphore.go delete mode 100644 vendor/github.com/eapache/go-resiliency/semaphore/semaphore_test.go delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/.gitignore delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/.travis.yml delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/LICENSE delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/README.md delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy.go delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy_test.go delete mode 100644 vendor/github.com/eapache/queue/.gitignore delete mode 100644 vendor/github.com/eapache/queue/.travis.yml delete mode 100644 vendor/github.com/eapache/queue/LICENSE delete mode 100644 vendor/github.com/eapache/queue/README.md delete mode 100644 vendor/github.com/eapache/queue/queue.go delete mode 100644 vendor/github.com/eapache/queue/queue_test.go delete mode 100644 vendor/github.com/golang/protobuf/_conformance/Makefile delete mode 100644 vendor/github.com/golang/protobuf/_conformance/conformance.go delete mode 100644 vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go delete mode 100644 vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto delete mode 100644 vendor/github.com/golang/protobuf/proto/decode_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode_test.go delete mode 100644 vendor/github.com/golang/snappy/.gitignore delete mode 100644 vendor/github.com/golang/snappy/AUTHORS delete mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/snappy/LICENSE delete mode 100644 vendor/github.com/golang/snappy/README delete mode 100644 vendor/github.com/golang/snappy/cmd/snappytool/main.cpp delete mode 100644 vendor/github.com/golang/snappy/decode.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/decode_other.go delete mode 100644 vendor/github.com/golang/snappy/encode.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/encode_other.go delete mode 100644 vendor/github.com/golang/snappy/golden_test.go delete mode 100644 vendor/github.com/golang/snappy/snappy.go delete mode 100644 vendor/github.com/golang/snappy/snappy_test.go delete mode 100644 vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt delete mode 100644 vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy create mode 100644 vendor/github.com/google/go-github/github/admin.go create mode 100644 vendor/github.com/google/go-github/github/admin_test.go create mode 100644 vendor/github.com/google/go-github/github/pulls_reviews.go create mode 100644 vendor/github.com/hashicorp/hcl/test-fixtures/multiline_literal_with_hil.hcl delete mode 100644 vendor/github.com/klauspost/crc32/.gitignore delete mode 100644 vendor/github.com/klauspost/crc32/.travis.yml delete mode 100644 vendor/github.com/klauspost/crc32/LICENSE delete mode 100644 vendor/github.com/klauspost/crc32/README.md delete mode 100644 vendor/github.com/klauspost/crc32/crc32.go delete mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64.go delete mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64.s delete mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64p32.go delete mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64p32.s delete mode 100644 vendor/github.com/klauspost/crc32/crc32_generic.go delete mode 100644 vendor/github.com/klauspost/crc32/crc32_otherarch.go delete mode 100644 vendor/github.com/klauspost/crc32/crc32_s390x.go delete mode 100644 vendor/github.com/klauspost/crc32/crc32_s390x.s delete mode 100644 vendor/github.com/klauspost/crc32/crc32_test.go delete mode 100644 vendor/github.com/klauspost/crc32/example_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/.travis.yml delete mode 100644 vendor/github.com/nats-io/go-nats/README.md delete mode 100644 vendor/github.com/nats-io/go-nats/TODO.md delete mode 100644 vendor/github.com/nats-io/go-nats/bench/bench.go delete mode 100644 vendor/github.com/nats-io/go-nats/bench/benchlib_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/enc.go delete mode 100644 vendor/github.com/nats-io/go-nats/enc_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go delete mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/enc_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go delete mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/gob_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go delete mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/json_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/encoders/protobuf/protobuf_enc.go delete mode 100644 vendor/github.com/nats-io/go-nats/encoders/protobuf/protobuf_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/encoders/protobuf/testdata/pbtest.pb.go delete mode 100644 vendor/github.com/nats-io/go-nats/encoders/protobuf/testdata/pbtest.proto delete mode 100644 vendor/github.com/nats-io/go-nats/example_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/examples/nats-bench.go delete mode 100644 vendor/github.com/nats-io/go-nats/examples/nats-pub.go delete mode 100644 vendor/github.com/nats-io/go-nats/examples/nats-qsub.go delete mode 100644 vendor/github.com/nats-io/go-nats/examples/nats-req.go delete mode 100644 vendor/github.com/nats-io/go-nats/examples/nats-rply.go delete mode 100644 vendor/github.com/nats-io/go-nats/examples/nats-sub.go delete mode 100644 vendor/github.com/nats-io/go-nats/nats.go delete mode 100644 vendor/github.com/nats-io/go-nats/nats_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/netchan.go delete mode 100644 vendor/github.com/nats-io/go-nats/parser.go delete mode 100755 vendor/github.com/nats-io/go-nats/scripts/cov.sh delete mode 100644 vendor/github.com/nats-io/go-nats/test/auth_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/test/basic_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/test/bench_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/test/cluster_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/test/configs/certs/ca.pem delete mode 100644 vendor/github.com/nats-io/go-nats/test/configs/certs/client-cert.pem delete mode 100644 vendor/github.com/nats-io/go-nats/test/configs/certs/client-key.pem delete mode 100644 vendor/github.com/nats-io/go-nats/test/configs/certs/key.pem delete mode 100644 vendor/github.com/nats-io/go-nats/test/configs/certs/server.pem delete mode 100644 vendor/github.com/nats-io/go-nats/test/configs/tls.conf delete mode 100644 vendor/github.com/nats-io/go-nats/test/configs/tlsverify.conf delete mode 100644 vendor/github.com/nats-io/go-nats/test/conn_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/test/netchan_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/test/reconnect_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/test/sub_test.go delete mode 100644 vendor/github.com/nats-io/go-nats/test/test.go delete mode 100644 vendor/github.com/nats-io/go-nats/util/tls.go delete mode 100644 vendor/github.com/nats-io/go-nats/util/tls_pre17.go delete mode 100644 vendor/golang.org/x/sys/unix/.gitignore rename vendor/golang.org/x/sys/unix/{asm_linux_mips64x.s => asm_dragonfly_386.s} (56%) delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s delete mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_test.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_test.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go rename vendor/golang.org/x/sys/unix/{zsyscall_linux_s390x.go => zsyscall_dragonfly_386.go} (57%) delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/windows/asm.s delete mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/CHANGELOG-5.0.md create mode 100644 vendor/gopkg.in/olivere/elastic.v3/acknowledged_response.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/config/jvm.options create mode 100644 vendor/gopkg.in/olivere/elastic.v3/config/log4j2.properties delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/config/logging.yml create mode 100644 vendor/gopkg.in/olivere/elastic.v3/config/scripts/.gitkeep delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer_test.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer_test.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/indices_rollover.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/indices_rollover_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/indices_shrink.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/indices_shrink_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/ingest_delete_pipeline.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/ingest_delete_pipeline_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/ingest_get_pipeline.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/ingest_get_pipeline_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/ingest_put_pipeline.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/ingest_put_pipeline_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/ingest_simulate_pipeline.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/ingest_simulate_pipeline_test.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/optimize.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/optimize_test.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/percolate.go rename vendor/gopkg.in/olivere/elastic.v3/{search_template.go => put_template.go} (87%) create mode 100644 vendor/gopkg.in/olivere/elastic.v3/put_template_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/recipes/sliced_scroll.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/reindexer.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/reindexer_test.go rename vendor/gopkg.in/olivere/elastic.v3/{run-es.sh => run-es-5.0.0-beta1.sh} (64%) create mode 100755 vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0-rc1.sh create mode 100755 vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0.sh create mode 100755 vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.1.sh delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/scan.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/scan_test.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/search_queries_missing.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/search_queries_not.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/search_queries_percolator.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/search_queries_percolator_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/search_queries_slice.go create mode 100644 vendor/gopkg.in/olivere/elastic.v3/search_queries_slice_test.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query.go delete mode 100644 vendor/gopkg.in/olivere/elastic.v3/search_templates_test.go rename vendor/{github.com/nats-io/go-nats => gopkg.in/olivere/elastic.v5}/.gitignore (68%) create mode 100644 vendor/gopkg.in/olivere/elastic.v5/.travis.yml create mode 100644 vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-3.0.md create mode 100644 vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md create mode 100644 vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md rename vendor/{github.com/eapache/go-resiliency => gopkg.in/olivere/elastic.v5}/LICENSE (66%) create mode 100644 vendor/gopkg.in/olivere/elastic.v5/README.md create mode 100644 vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/backoff/LICENSE create mode 100644 vendor/gopkg.in/olivere/elastic.v5/backoff/backoff.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/backoff/backoff_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/backoff/retry.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/backoff/retry_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk_request.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/canonicalize.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/canonicalize_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/client.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/client_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/cluster-test/Makefile create mode 100644 vendor/gopkg.in/olivere/elastic.v5/cluster-test/README.md create mode 100644 vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/cluster_health.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/cluster_health_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/cluster_state.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/cluster_state_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/cluster_stats_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/config/elasticsearch.yml create mode 100644 vendor/gopkg.in/olivere/elastic.v5/config/jvm.options create mode 100644 vendor/gopkg.in/olivere/elastic.v5/config/log4j2.properties create mode 100644 vendor/gopkg.in/olivere/elastic.v5/config/scripts/.gitkeep create mode 100644 vendor/gopkg.in/olivere/elastic.v5/connection.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/count.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/count_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/decoder.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/decoder_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/delete.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/delete_template.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/delete_template_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/delete_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/doc.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/errors.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/errors_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/example_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/exists.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/exists_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/explain.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/explain_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/fetch_source_context_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/field_stats.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/field_stats_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/geo_point.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/geo_point_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/get.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/get_template.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/get_template_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/get_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/highlight.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/highlight_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/index.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/index_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_close.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_close_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_create.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_create_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_delete.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_delete_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_exists.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_exists_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_flush.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_flush_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_get.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_get_settings_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_get_template_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_get_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_open.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_open_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_put_alias_test.go rename vendor/gopkg.in/olivere/{elastic.v3/indices_put_warmer.go => elastic.v5/indices_put_mapping.go} (50%) create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_put_settings_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_refresh_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_rollover_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_shrink_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_stats.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/indices_stats_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/inner_hit.go rename vendor/gopkg.in/olivere/{elastic.v3/search_queries_not_test.go => elastic.v5/inner_hit_test.go} (51%) create mode 100644 vendor/gopkg.in/olivere/elastic.v5/logger.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/mget.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/mget_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/msearch.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/msearch_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/mtermvectors_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/nodes_info.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/nodes_info_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/nodes_stats_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/percolate_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ping.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/ping_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/plugins.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/plugins_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/put_template.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/put_template_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/query.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/reindex.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/reindex_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/request.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/request_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/rescore.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/rescorer.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/response.go create mode 100755 vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-beta1.sh create mode 100755 vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-rc1.sh create mode 100755 vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh create mode 100755 vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh create mode 100644 vendor/gopkg.in/olivere/elastic.v5/script.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/script_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/scroll.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/scroll_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_bool_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_exists_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_ids_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_indices_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go rename vendor/gopkg.in/olivere/{elastic.v3/search_queries_missing_test.go => elastic.v5/search_queries_match_all_test.go} (63%) create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_match_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_nested_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go rename vendor/gopkg.in/olivere/{elastic.v3/search_queries_template_query_test.go => elastic.v5/search_queries_range_test.go} (52%) create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_script_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_term_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_type_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_request.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_request_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_source.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_source_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_suggester_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/setup_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/sort.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/sort_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggest.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggest_field.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggest_field_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggest_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_completion_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_context.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_context_category_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_phrase_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_term.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/suggester_term_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/tasks_cancel_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/tasks_list.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/tasks_list_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/termvectors.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/termvectors_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/update.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/update_by_query.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/update_by_query_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/update_test.go rename vendor/{github.com/nats-io/go-nats => gopkg.in/olivere/elastic.v5/uritemplates}/LICENSE (94%) create mode 100644 vendor/gopkg.in/olivere/elastic.v5/uritemplates/uritemplates.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils_test.go diff --git a/api/rpc/logs/logs.go b/api/rpc/logs/logs.go index 94ebf3d8c..2f0d82683 100644 --- a/api/rpc/logs/logs.go +++ b/api/rpc/logs/logs.go @@ -67,7 +67,7 @@ func (s *Server) Get(ctx context.Context, in *GetRequest) (*GetReply, error) { // TODO timestamp queries // Perform request - searchResult, err := request.Query(masterQuery).Do() + searchResult, err := request.Query(masterQuery).Do(ctx) if err != nil { return nil, err } diff --git a/cmd/amp-log-worker/main.go b/cmd/amp-log-worker/main.go index 0385c05a5..5021111a2 100644 --- a/cmd/amp-log-worker/main.go +++ b/cmd/amp-log-worker/main.go @@ -6,6 +6,7 @@ import ( "github.com/appcelerator/amp/data/elasticsearch" "github.com/golang/protobuf/proto" "github.com/nats-io/go-nats-streaming" + "golang.org/x/net/context" "log" "os" "os/signal" @@ -85,7 +86,7 @@ func main() { } log.Printf("Connected to elasticsearch at %s\n", amp.ElasticsearchDefaultURL) - es.CreateIndexIfNotExists(esIndex, esType, esMapping) + es.CreateIndexIfNotExists(context.Background(), esIndex, esType, esMapping) if err != nil { log.Fatalf("Unable to create index: %s", err) } @@ -130,7 +131,7 @@ func messageHandler(msg *stan.Msg) { log.Printf("error parsing timestamp: %v", err) } logEntry.Timestamp = timestamp.Format("2006-01-02T15:04:05.999") - err = es.Index(esIndex, esType, logEntry) + err = es.Index(context.Background(), esIndex, esType, logEntry) // TODO: Should we use a timeout context ? if err != nil { log.Printf("error indexing log entry: %v", err) } diff --git a/data/elasticsearch/elasticsearch.go b/data/elasticsearch/elasticsearch.go index 42be35cf2..83a641a20 100644 --- a/data/elasticsearch/elasticsearch.go +++ b/data/elasticsearch/elasticsearch.go @@ -1,6 +1,7 @@ package elasticsearch import ( + "context" "gopkg.in/olivere/elastic.v3" "time" ) @@ -33,15 +34,15 @@ func (es *Elasticsearch) GetClient() *elastic.Client { } // CreateIndexIfNotExists Creates an index if it doesn't already exists -func (es *Elasticsearch) CreateIndexIfNotExists(esIndex string, esType string, mapping string) error { +func (es *Elasticsearch) CreateIndexIfNotExists(ctx context.Context, esIndex string, esType string, mapping string) error { // Use the IndexExists service to check if the index exists - exists, err := es.client.IndexExists(esIndex).Do() + exists, err := es.client.IndexExists(esIndex).Do(ctx) if err != nil { return err } if !exists { // Create a new index. - createIndex, err := es.client.CreateIndex(esIndex).Do() + createIndex, err := es.client.CreateIndex(esIndex).Do(ctx) if err != nil { return err } @@ -49,7 +50,7 @@ func (es *Elasticsearch) CreateIndexIfNotExists(esIndex string, esType string, m return err } - response, err := es.client.PutMapping().Index(esIndex).Type(esType).BodyString(mapping).Do() + response, err := es.client.PutMapping().Index(esIndex).Type(esType).BodyString(mapping).Do(ctx) if err != nil { return err } @@ -61,13 +62,13 @@ func (es *Elasticsearch) CreateIndexIfNotExists(esIndex string, esType string, m } // Index store a document inside elastic search -func (es *Elasticsearch) Index(esIndex string, esType string, body interface{}) error { +func (es *Elasticsearch) Index(ctx context.Context, esIndex string, esType string, body interface{}) error { // Add a document to the index _, err := es.client.Index(). Index(esIndex). Type(esType). BodyJson(body). - Do() + Do(ctx) if err != nil { return err } diff --git a/data/kafka/kafka.go b/data/kafka/kafka.go deleted file mode 100644 index 295a87010..000000000 --- a/data/kafka/kafka.go +++ /dev/null @@ -1,28 +0,0 @@ -package kafka - -import ( - "github.com/Shopify/sarama" -) - -var ( - client sarama.Client -) - -// Kafka singleton -type Kafka struct { -} - -// Connect to kafka -func (kafka *Kafka) Connect(host string) error { - config := sarama.NewConfig() - config.Version = sarama.V0_10_0_0 - - var err error - client, err = sarama.NewClient([]string{host}, config) - return err -} - -// NewConsumer creates a new consumer -func (kafka *Kafka) NewConsumer() (sarama.Consumer, error) { - return sarama.NewConsumerFromClient(client) -} diff --git a/glide.lock b/glide.lock index 371c08847..af90fbba8 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: ec02545304a5a3664049f9f779d5edaa8aea90e0a7ab3a2413088b0fe0b1de3c -updated: 2016-12-01T09:57:27.183114315+01:00 +hash: 05f6e879c1859210303cf741cff154fb3d2362cd85b15c78fa8ec385a2c1b8d2 +updated: 2016-12-01T14:38:06.543961287+01:00 imports: - name: github.com/beorn7/perks version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 @@ -14,12 +14,8 @@ imports: - etcdserver/etcdserverpb - mvcc/mvccpb - pkg/tlsutil -- name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - subpackages: - - spew - name: github.com/docker/distribution - version: 38fbd03266d86923d963566a6565c3fa01f496d8 + version: c59995570762ec8ef1b1d5a0b147600622979cb1 subpackages: - digest - reference @@ -52,15 +48,7 @@ imports: - sockets - tlsconfig - name: github.com/docker/go-units - version: 8a7beacffa3009a9ac66bad506b18ffdd110cf97 -- name: github.com/eapache/go-resiliency - version: b86b1ec0dd4209a588dc1285cdd471e73525c0b3 - subpackages: - - breaker -- name: github.com/eapache/go-xerial-snappy - version: bb955e01b9346ac19dc29eb16586c90ded99a98c -- name: github.com/eapache/queue - version: 44cc805cf13205b55f69e14bcb69867d1ae92f98 + version: e30f1e79f3cd72542f2026ceec18d3bd67ab859c - name: github.com/fatih/color version: dea9d3a26a087187530244679c1cfb3a42937794 - name: github.com/fatih/structs @@ -76,15 +64,13 @@ imports: - proto - protoc-gen-gogo/descriptor - name: github.com/golang/protobuf - version: 4bd1920723d7b7c925de087aa32e2187708897f7 + version: 8616e8ee5e20a1704615e6c8d7afcdac06087a67 subpackages: - jsonpb - proto - protoc-gen-go/descriptor -- name: github.com/golang/snappy - version: d9eb7a3d35ec988b8585d4a0068e462c27d28380 - name: github.com/google/go-github - version: d4f1b2d029be1730fd349ca929cc9c0da4a27007 + version: 4e2c2aeb92b9a57d97165d1624ff66b2aabee111 subpackages: - github - name: github.com/google/go-querystring @@ -101,7 +87,7 @@ imports: - third_party/googleapis/google/api - utilities - name: github.com/hashicorp/hcl - version: 7cb7455c285ca3bf3362aa4ba6a06a6d6f5c3ba0 + version: ae25c981c128d7a7a5241e3b7d7409089355df69 subpackages: - hcl/ast - hcl/parser @@ -121,10 +107,8 @@ imports: - client/v2 - models - pkg/escape -- name: github.com/klauspost/crc32 - version: cb6bfca970f6908083f26f39a79009d608efd5cd - name: github.com/magiconair/properties - version: 0723e352fa358f9322c938cc2dadda874e9151a9 + version: 9c47895dc1ce54302908ab8a43385d1f5df2c11c - name: github.com/mattn/go-colorable version: d228849504861217f796da67fae4f6e347643f15 - name: github.com/mattn/go-isatty @@ -139,21 +123,19 @@ imports: version: 756f7b183b7ab78acdbbee5c7f392838ed459dda - name: github.com/mitchellh/mapstructure version: f3009df150dadf309fdee4a54ed65c124afad715 -- name: github.com/nats-io/go-nats - version: 6b6bf392d34d01f57cc563ae123f00c13778bd57 - subpackages: - - encoders/builtin - - util - name: github.com/nats-io/go-nats-streaming version: f0b13ceaf55c6d86604169520dfd8ee3c03e9524 subpackages: - pb - name: github.com/nats-io/nats - version: 6b6bf392d34d01f57cc563ae123f00c13778bd57 + version: 61923ed1eaf8398000991fbbee2ef11ab5a5be0d + subpackages: + - encoders/builtin + - util - name: github.com/nats-io/nuid version: 289cccf02c178dc782430d534e3c1f5b72af807f - name: github.com/opencontainers/runc - version: f156f73c2aab1b735df23c9323c976ad5dca1d78 + version: 8893fa693bf9bf29e5a156369bc51b887df43924 subpackages: - libcontainer/user - name: github.com/pelletier/go-buffruneio @@ -171,15 +153,13 @@ imports: subpackages: - go - name: github.com/prometheus/common - version: 0d5de9d6d8629cb8bee6d4674da4127cd8b615a3 + version: 195bde7883f7c39ea62b0d92ab7359b5327065cb subpackages: - expfmt - internal/bitbucket.org/ww/goautoneg - model - name: github.com/prometheus/procfs version: abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 -- name: github.com/Shopify/sarama - version: bd61cae2be85fa6ff40eb23dcdd24567967ac2ae - name: github.com/Sirupsen/logrus version: 55eb11d21d2a31a3cc93838241d04800f52e823d subpackages: @@ -199,7 +179,7 @@ imports: - name: github.com/spf13/viper version: 651d9d916abc3c3d6a91a12549495caba5edffd2 - name: golang.org/x/crypto - version: ede567c8e044a5913dad1d1af3696d9da953104c + version: 21853a76e0cca35cf785c34dceee499a3d5a7168 subpackages: - ssh/terminal - name: golang.org/x/net @@ -214,16 +194,16 @@ imports: - trace - websocket - name: golang.org/x/oauth2 - version: d5040cddfc0da40b408c9a1da4728662435176a9 + version: f6093e37b6cb4092101a298aba5d794eb570757f subpackages: - internal - name: golang.org/x/sys - version: 30237cf4eefd639b184d1f2cb77a581ea0be8947 + version: 9c60d1c508f5134d1ca726b4641db998f2523357 subpackages: - unix - windows - name: golang.org/x/text - version: b01949dc0793a9af5e4cb3fce4d42999e76e8ca1 + version: 5c6cf4f9a2357d38515014cea8c488ed22bdab90 subpackages: - transform - unicode/norm @@ -249,13 +229,19 @@ imports: - peer - transport - name: gopkg.in/olivere/elastic.v3 - version: 060365a7e27c56c938b234824ef7f9a3a7834b99 + version: b272fab42387a42585235d56f8c6b53b13ddfb7d +- name: gopkg.in/olivere/elastic.v5 + version: 0746557b3f90c3119b44e7d7ee0f76db3ff26acb subpackages: - backoff - uritemplates - name: gopkg.in/yaml.v2 version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 testImports: +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: diff --git a/glide.yaml b/glide.yaml index f4c344d35..aa2914bc8 100644 --- a/glide.yaml +++ b/glide.yaml @@ -1,47 +1,63 @@ package: github.com/appcelerator/amp import: -- package: github.com/Shopify/sarama - version: ^1.10.1 - package: github.com/coreos/etcd - version: ^3.1.0-rc.0 + version: ^3.1.0-rc.1 subpackages: - clientv3 - mvcc/mvccpb +- package: github.com/docker/docker + version: 1.13.x + subpackages: + - api/types + - api/types/events + - api/types/filters + - api/types/mount + - api/types/network + - api/types/swarm + - client + - pkg/stringid - package: github.com/fatih/color - version: ^1.0.0 + version: ^1.1.0 +- package: github.com/fatih/structs - package: github.com/golang/protobuf subpackages: - proto - package: github.com/google/go-github subpackages: - github +- package: github.com/grpc-ecosystem/grpc-gateway + version: ^1.1.0 + subpackages: + - runtime + - third_party/googleapis/google/api + - utilities - package: github.com/howeyc/gopass - package: github.com/influxdata/influxdb version: ^1.1.0 subpackages: - client/v2 - package: github.com/mitchellh/go-homedir +- package: github.com/nats-io/go-nats-streaming + version: ^0.3.0 +- package: github.com/nats-io/nats + version: ^1.2.2 - package: github.com/spf13/cobra - package: github.com/spf13/pflag - package: github.com/spf13/viper - package: golang.org/x/net subpackages: - context + - websocket - package: golang.org/x/oauth2 -- package: gopkg.in/olivere/elastic.v3 - version: ^3.0.55 -- package: gopkg.in/yaml.v2 -- package: github.com/docker/docker - version: 1.13.x - package: google.golang.org/grpc - version: v1.0.4 -- package: github.com/nats-io/go-nats-streaming - version: ^0.3.0 -- package: github.com/fatih/structs -- package: github.com/grpc-ecosystem/grpc-gateway - version: ^1.1.0 + version: ^1.0.4 subpackages: - - runtime + - codes + - grpclog + - metadata +- package: gopkg.in/olivere/elastic.v3 + version: ^5.0.13 +- package: gopkg.in/yaml.v2 testImport: - package: github.com/stretchr/testify version: ^1.1.4 diff --git a/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md b/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md deleted file mode 100644 index b0f107cbc..000000000 --- a/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing - -Contributions are always welcome, both reporting issues and submitting pull requests! - -### Reporting issues - -Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth. - -- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version. -- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description. -- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it. - -Also, please include the following information about your environment, so we can help you faster: - -- What version of Kafka are you using? -- What version of Go are you using? -- What are the values of your Producer/Consumer/Client configuration? - - -### Submitting pull requests - -We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following. - -- If you plan to work on something major, please open an issue to discuss the design first. -- Don't break backwards compatibility. If you really have to, open an issue to discuss this first. -- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving. -- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs. -- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors. -- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems. -- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions. -- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions diff --git a/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md b/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index ee6b6f785..000000000 --- a/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,19 +0,0 @@ -##### Versions - -Sarama Version: -Kafka Version: -Go Version: - -##### Configuration - -What configuration values are you using for Sarama and Kafka? - -##### Logs - -When filing an issue please provide logs from Sarama and Kafka if at all -possible. You can set `sarama.Logger` to a `log.Logger` to capture Sarama debug -output. - -##### Problem Description - - diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore deleted file mode 100644 index 3591f9ff3..000000000 --- a/vendor/github.com/Shopify/sarama/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so -*.test - -# Folders -_obj -_test -.vagrant - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml deleted file mode 100644 index 4ca4870a8..000000000 --- a/vendor/github.com/Shopify/sarama/.travis.yml +++ /dev/null @@ -1,33 +0,0 @@ -language: go -go: -- 1.5.4 -- 1.6.2 - -env: - global: - - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 - - TOXIPROXY_ADDR=http://localhost:8474 - - KAFKA_INSTALL_ROOT=/home/travis/kafka - - KAFKA_HOSTNAME=localhost - - DEBUG=true - matrix: - - KAFKA_VERSION=0.8.2.2 - - KAFKA_VERSION=0.9.0.1 - - KAFKA_VERSION=0.10.0.0 - -before_install: -- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} -- vagrant/install_cluster.sh -- vagrant/boot_cluster.sh -- vagrant/create_topics.sh - -install: -- make install_dependencies - -script: -- make test -- make vet -- make errcheck -- make fmt - -sudo: false diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md deleted file mode 100644 index 49ff92165..000000000 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ /dev/null @@ -1,323 +0,0 @@ -# Changelog - -#### Version 1.10.1 (2016-08-30) - -Bug Fixes: - - Fix the documentation for `HashPartitioner` which was incorrect - ([#717](https://github.com/Shopify/sarama/pull/717)). - - Permit client creation even when it is limited by ACLs - ([#722](https://github.com/Shopify/sarama/pull/722)). - - Several fixes to the consumer timer optimization code, regressions introduced - in v1.10.0. Go's timers are finicky - ([#730](https://github.com/Shopify/sarama/pull/730), - [#733](https://github.com/Shopify/sarama/pull/733), - [#734](https://github.com/Shopify/sarama/pull/734)). - - Handle consuming compressed relative offsets with Kafka 0.10 - ([#735](https://github.com/Shopify/sarama/pull/735)). - -#### Version 1.10.0 (2016-08-02) - -_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of -Kafka you are running against (via the `config.Version` value) in order to use -features that may not be compatible with old Kafka versions. If you don't -specify this value it will default to 0.8.2 (the minimum supported), and trying -to use more recent features (like the offset manager) will fail with an error. - -_Also:_ The offset-manager's behaviour has been changed to match the upstream -java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and -[#713](https://github.com/Shopify/sarama/pull/713)). If you use the -offset-manager, please ensure that you are committing one *greater* than the -last consumed message offset or else you may end up consuming duplicate -messages. - -New Features: - - Support for Kafka 0.10 - ([#672](https://github.com/Shopify/sarama/pull/672), - [#678](https://github.com/Shopify/sarama/pull/678), - [#681](https://github.com/Shopify/sarama/pull/681), and others). - - Support for configuring the target Kafka version - ([#676](https://github.com/Shopify/sarama/pull/676)). - - Batch producing support in the SyncProducer - ([#677](https://github.com/Shopify/sarama/pull/677)). - - Extend producer mock to allow setting expectations on message contents - ([#667](https://github.com/Shopify/sarama/pull/667)). - -Improvements: - - Support `nil` compressed messages for deleting in compacted topics - ([#634](https://github.com/Shopify/sarama/pull/634)). - - Pre-allocate decoding errors, greatly reducing heap usage and GC time against - misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). - - Re-use consumer expiry timers, removing one allocation per consumed message - ([#707](https://github.com/Shopify/sarama/pull/707)). - -Bug Fixes: - - Actually default the client ID to "sarama" like we say we do - ([#664](https://github.com/Shopify/sarama/pull/664)). - - Fix a rare issue where `Client.Leader` could return the wrong error - ([#685](https://github.com/Shopify/sarama/pull/685)). - - Fix a possible tight loop in the consumer - ([#693](https://github.com/Shopify/sarama/pull/693)). - - Match upstream's offset-tracking behaviour - ([#705](https://github.com/Shopify/sarama/pull/705)). - - Report UnknownTopicOrPartition errors from the offset manager - ([#706](https://github.com/Shopify/sarama/pull/706)). - - Fix possible negative partition value from the HashPartitioner - ([#709](https://github.com/Shopify/sarama/pull/709)). - -#### Version 1.9.0 (2016-05-16) - -New Features: - - Add support for custom offset manager retention durations - ([#602](https://github.com/Shopify/sarama/pull/602)). - - Publish low-level mocks to enable testing of third-party producer/consumer - implementations ([#570](https://github.com/Shopify/sarama/pull/570)). - - Declare support for Golang 1.6 - ([#611](https://github.com/Shopify/sarama/pull/611)). - - Support for SASL plain-text auth - ([#648](https://github.com/Shopify/sarama/pull/648)). - -Improvements: - - Simplified broker locking scheme slightly - ([#604](https://github.com/Shopify/sarama/pull/604)). - - Documentation cleanup - ([#605](https://github.com/Shopify/sarama/pull/605), - [#621](https://github.com/Shopify/sarama/pull/621), - [#654](https://github.com/Shopify/sarama/pull/654)). - -Bug Fixes: - - Fix race condition shutting down the OffsetManager - ([#658](https://github.com/Shopify/sarama/pull/658)). - -#### Version 1.8.0 (2016-02-01) - -New Features: - - Full support for Kafka 0.9: - - All protocol messages and fields - ([#586](https://github.com/Shopify/sarama/pull/586), - [#588](https://github.com/Shopify/sarama/pull/588), - [#590](https://github.com/Shopify/sarama/pull/590)). - - Verified that TLS support works - ([#581](https://github.com/Shopify/sarama/pull/581)). - - Fixed the OffsetManager compatibility - ([#585](https://github.com/Shopify/sarama/pull/585)). - -Improvements: - - Optimize for fewer system calls when reading from the network - ([#584](https://github.com/Shopify/sarama/pull/584)). - - Automatically retry `InvalidMessage` errors to match upstream behaviour - ([#589](https://github.com/Shopify/sarama/pull/589)). - -#### Version 1.7.0 (2015-12-11) - -New Features: - - Preliminary support for Kafka 0.9 - ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several - caveats: - - Protocol-layer support is mostly in place - ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 - renamed some messages and fields, which we did not in order to preserve API - compatibility. - - The producer and consumer work against 0.9, but the offset manager does - not ([#573](https://github.com/Shopify/sarama/pull/573)). - - TLS support may or may not work - ([#581](https://github.com/Shopify/sarama/pull/581)). - -Improvements: - - Don't wait for request timeouts on dead brokers, greatly speeding recovery - when the TCP connection is left hanging - ([#548](https://github.com/Shopify/sarama/pull/548)). - - Refactored part of the producer. The new version provides a much more elegant - solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also - slightly more efficient, and much more precise in calculating batch sizes - when compression is used - ([#549](https://github.com/Shopify/sarama/pull/549), - [#550](https://github.com/Shopify/sarama/pull/550), - [#551](https://github.com/Shopify/sarama/pull/551)). - -Bug Fixes: - - Fix race condition in consumer test mock - ([#553](https://github.com/Shopify/sarama/pull/553)). - -#### Version 1.6.1 (2015-09-25) - -Bug Fixes: - - Fix panic that could occur if a user-supplied message value failed to encode - ([#449](https://github.com/Shopify/sarama/pull/449)). - -#### Version 1.6.0 (2015-09-04) - -New Features: - - Implementation of a consumer offset manager using the APIs introduced in - Kafka 0.8.2. The API is designed mainly for integration into a future - high-level consumer, not for direct use, although it is *possible* to use it - directly. - ([#461](https://github.com/Shopify/sarama/pull/461)). - -Improvements: - - CRC32 calculation is much faster on machines with SSE4.2 instructions, - removing a major hotspot from most profiles - ([#255](https://github.com/Shopify/sarama/pull/255)). - -Bug Fixes: - - Make protocol decoding more robust against some malformed packets generated - by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), - [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways - ([#528](https://github.com/Shopify/sarama/pull/528)). - - Fix a potential race condition panic in the consumer on shutdown - ([#529](https://github.com/Shopify/sarama/pull/529)). - -#### Version 1.5.0 (2015-08-17) - -New Features: - - TLS-encrypted network connections are now supported. This feature is subject - to change when Kafka releases built-in TLS support, but for now this is - enough to work with TLS-terminating proxies - ([#154](https://github.com/Shopify/sarama/pull/154)). - -Improvements: - - The consumer will not block if a single partition is not drained by the user; - all other partitions will continue to consume normally - ([#485](https://github.com/Shopify/sarama/pull/485)). - - Formatting of error strings has been much improved - ([#495](https://github.com/Shopify/sarama/pull/495)). - - Internal refactoring of the producer for code cleanliness and to enable - future work ([#300](https://github.com/Shopify/sarama/pull/300)). - -Bug Fixes: - - Fix a potential deadlock in the consumer on shutdown - ([#475](https://github.com/Shopify/sarama/pull/475)). - -#### Version 1.4.3 (2015-07-21) - -Bug Fixes: - - Don't include the partitioner in the producer's "fetch partitions" - circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). - - Don't retry messages until the broker is closed when abandoning a broker in - the producer ([#468](https://github.com/Shopify/sarama/pull/468)). - - Update the import path for snappy-go, it has moved again and the API has - changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). - -#### Version 1.4.2 (2015-05-27) - -Bug Fixes: - - Update the import path for snappy-go, it has moved from google code to github - ([#456](https://github.com/Shopify/sarama/pull/456)). - -#### Version 1.4.1 (2015-05-25) - -Improvements: - - Optimizations when decoding snappy messages, thanks to John Potocny - ([#446](https://github.com/Shopify/sarama/pull/446)). - -Bug Fixes: - - Fix hypothetical race conditions on producer shutdown - ([#450](https://github.com/Shopify/sarama/pull/450), - [#451](https://github.com/Shopify/sarama/pull/451)). - -#### Version 1.4.0 (2015-05-01) - -New Features: - - The consumer now implements `Topics()` and `Partitions()` methods to enable - users to dynamically choose what topics/partitions to consume without - instantiating a full client - ([#431](https://github.com/Shopify/sarama/pull/431)). - - The partition-consumer now exposes the high water mark offset value returned - by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). - - Added a `kafka-console-consumer` tool capable of handling multiple - partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` - ([#439](https://github.com/Shopify/sarama/pull/439), - [#442](https://github.com/Shopify/sarama/pull/442)). - -Improvements: - - The producer's logging during retry scenarios is more consistent, more - useful, and slightly less verbose - ([#429](https://github.com/Shopify/sarama/pull/429)). - - The client now shuffles its initial list of seed brokers in order to prevent - thundering herd on the first broker in the list - ([#441](https://github.com/Shopify/sarama/pull/441)). - -Bug Fixes: - - The producer now correctly manages its state if retries occur when it is - shutting down, fixing several instances of confusing behaviour and at least - one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). - - The consumer now handles messages for different partitions asynchronously, - making it much more resilient to specific user code ordering - ([#325](https://github.com/Shopify/sarama/pull/325)). - -#### Version 1.3.0 (2015-04-16) - -New Features: - - The client now tracks consumer group coordinators using - ConsumerMetadataRequests similar to how it tracks partition leadership using - regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). - This adds two methods to the client API: - - `Coordinator(consumerGroup string) (*Broker, error)` - - `RefreshCoordinator(consumerGroup string) error` - -Improvements: - - ConsumerMetadataResponses now automatically create a Broker object out of the - ID/address/port combination for the Coordinator; accessing the fields - individually has been deprecated - ([#413](https://github.com/Shopify/sarama/pull/413)). - - Much improved handling of `OffsetOutOfRange` errors in the consumer. - Consumers will fail to start if the provided offset is out of range - ([#418](https://github.com/Shopify/sarama/pull/418)) - and they will automatically shut down if the offset falls out of range - ([#424](https://github.com/Shopify/sarama/pull/424)). - - Small performance improvement in encoding and decoding protocol messages - ([#427](https://github.com/Shopify/sarama/pull/427)). - -Bug Fixes: - - Fix a rare race condition in the client's background metadata refresher if - it happens to be activated while the client is being closed - ([#422](https://github.com/Shopify/sarama/pull/422)). - -#### Version 1.2.0 (2015-04-07) - -Improvements: - - The producer's behaviour when `Flush.Frequency` is set is now more intuitive - ([#389](https://github.com/Shopify/sarama/pull/389)). - - The producer is now somewhat more memory-efficient during and after retrying - messages due to an improved queue implementation - ([#396](https://github.com/Shopify/sarama/pull/396)). - - The consumer produces much more useful logging output when leadership - changes ([#385](https://github.com/Shopify/sarama/pull/385)). - - The client's `GetOffset` method will now automatically refresh metadata and - retry once in the event of stale information or similar - ([#394](https://github.com/Shopify/sarama/pull/394)). - - Broker connections now have support for using TCP keepalives - ([#407](https://github.com/Shopify/sarama/issues/407)). - -Bug Fixes: - - The OffsetCommitRequest message now correctly implements all three possible - API versions ([#390](https://github.com/Shopify/sarama/pull/390), - [#400](https://github.com/Shopify/sarama/pull/400)). - -#### Version 1.1.0 (2015-03-20) - -Improvements: - - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly - broken topics don't choke throughput - ([#373](https://github.com/Shopify/sarama/pull/373)). - -Bug Fixes: - - Fix the producer's internal reference counting in certain unusual scenarios - ([#367](https://github.com/Shopify/sarama/pull/367)). - - Fix the consumer's internal reference counting in certain unusual scenarios - ([#369](https://github.com/Shopify/sarama/pull/369)). - - Fix a condition where the producer's internal control messages could have - gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). - - Fix an issue where invalid partition lists would be cached when asking for - metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). - - -#### Version 1.0.0 (2015-03-17) - -Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: - -- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. -- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. -- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. -- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. -- All the configuration values have been unified in the `Config` struct. -- Much improved test suite. diff --git a/vendor/github.com/Shopify/sarama/MIT-LICENSE b/vendor/github.com/Shopify/sarama/MIT-LICENSE deleted file mode 100644 index 8121b63b1..000000000 --- a/vendor/github.com/Shopify/sarama/MIT-LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2013 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile deleted file mode 100644 index 626b09a54..000000000 --- a/vendor/github.com/Shopify/sarama/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -default: fmt vet errcheck test - -test: - go test -v -timeout 60s -race ./... - -vet: - go vet ./... - -errcheck: - errcheck github.com/Shopify/sarama/... - -fmt: - @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi - -install_dependencies: install_errcheck get - -install_errcheck: - go get github.com/kisielk/errcheck - -get: - go get -t diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md deleted file mode 100644 index bcbd3e9c1..000000000 --- a/vendor/github.com/Shopify/sarama/README.md +++ /dev/null @@ -1,36 +0,0 @@ -sarama -====== - -[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) -[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) - -Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). - -### Getting started - -- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama). -- Mocks for testing are available in the [mocks](./mocks) subpackage. -- The [examples](./examples) directory contains more elaborate example applications. -- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. - -### Compatibility and API stability - -Sarama provides a "2 releases + 2 months" compatibility guarantee: we support -the two latest stable releases of Kafka and Go, and we provide a two month -grace period for older releases. This means we currently officially support -Go 1.6 and 1.5, and Kafka 0.10.0, 0.9.0 and 0.8.2, although older releases are -still likely to work. - -Sarama follows semantic versioning and provides API stability via the gopkg.in service. -You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. -A changelog is available [here](CHANGELOG.md). - -### Contributing - -* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/CONTRIBUTING.md). -* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more - technical and design details. -* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) - contains a wealth of useful information. -* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. -* If you have any questions, just ask! diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile deleted file mode 100644 index 4586d9ae8..000000000 --- a/vendor/github.com/Shopify/sarama/Vagrantfile +++ /dev/null @@ -1,19 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - -MEMORY = 3072 - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "ubuntu/trusty64" - - config.vm.provision :shell, path: "vagrant/provision.sh" - - config.vm.network "private_network", ip: "192.168.100.67" - - config.vm.provider "virtualbox" do |v| - v.memory = MEMORY - end -end diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go deleted file mode 100644 index ab65f01cc..000000000 --- a/vendor/github.com/Shopify/sarama/api_versions_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -type ApiVersionsRequest struct { -} - -func (r *ApiVersionsRequest) encode(pe packetEncoder) error { - return nil -} - -func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { - return nil -} - -func (r *ApiVersionsRequest) key() int16 { - return 18 -} - -func (r *ApiVersionsRequest) version() int16 { - return 0 -} - -func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { - return V0_10_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request_test.go b/vendor/github.com/Shopify/sarama/api_versions_request_test.go deleted file mode 100644 index 5ab4fa71c..000000000 --- a/vendor/github.com/Shopify/sarama/api_versions_request_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package sarama - -import "testing" - -var ( - apiVersionRequest = []byte{} -) - -func TestApiVersionsRequest(t *testing.T) { - var request *ApiVersionsRequest - - request = new(ApiVersionsRequest) - testRequest(t, "basic", request, apiVersionRequest) -} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go deleted file mode 100644 index 16d62db2d..000000000 --- a/vendor/github.com/Shopify/sarama/api_versions_response.go +++ /dev/null @@ -1,86 +0,0 @@ -package sarama - -type ApiVersionsResponseBlock struct { - ApiKey int16 - MinVersion int16 - MaxVersion int16 -} - -func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { - pe.putInt16(b.ApiKey) - pe.putInt16(b.MinVersion) - pe.putInt16(b.MaxVersion) - return nil -} - -func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { - var err error - - if b.ApiKey, err = pd.getInt16(); err != nil { - return err - } - - if b.MinVersion, err = pd.getInt16(); err != nil { - return err - } - - if b.MaxVersion, err = pd.getInt16(); err != nil { - return err - } - - return nil -} - -type ApiVersionsResponse struct { - Err KError - ApiVersions []*ApiVersionsResponseBlock -} - -func (r *ApiVersionsResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { - return err - } - for _, apiVersion := range r.ApiVersions { - if err := apiVersion.encode(pe); err != nil { - return err - } - } - return nil -} - -func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { - if kerr, err := pd.getInt16(); err != nil { - return err - } else { - r.Err = KError(kerr) - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) - for i := 0; i < numBlocks; i++ { - block := new(ApiVersionsResponseBlock) - if err := block.decode(pd); err != nil { - return err - } - r.ApiVersions[i] = block - } - - return nil -} - -func (r *ApiVersionsResponse) key() int16 { - return 18 -} - -func (r *ApiVersionsResponse) version() int16 { - return 0 -} - -func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { - return V0_10_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response_test.go b/vendor/github.com/Shopify/sarama/api_versions_response_test.go deleted file mode 100644 index 675a65a7d..000000000 --- a/vendor/github.com/Shopify/sarama/api_versions_response_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package sarama - -import "testing" - -var ( - apiVersionResponse = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x03, - 0x00, 0x02, - 0x00, 0x01, - } -) - -func TestApiVersionsResponse(t *testing.T) { - var response *ApiVersionsResponse - - response = new(ApiVersionsResponse) - testVersionDecodable(t, "no error", response, apiVersionResponse, 0) - if response.Err != ErrNoError { - t.Error("Decoding error failed: no error expected but found", response.Err) - } - if response.ApiVersions[0].ApiKey != 0x03 { - t.Error("Decoding error: expected 0x03 but got", response.ApiVersions[0].ApiKey) - } - if response.ApiVersions[0].MinVersion != 0x02 { - t.Error("Decoding error: expected 0x02 but got", response.ApiVersions[0].MinVersion) - } - if response.ApiVersions[0].MaxVersion != 0x01 { - t.Error("Decoding error: expected 0x01 but got", response.ApiVersions[0].MaxVersion) - } -} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go deleted file mode 100644 index e1ae5b0da..000000000 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ /dev/null @@ -1,903 +0,0 @@ -package sarama - -import ( - "fmt" - "sync" - "time" - - "github.com/eapache/go-resiliency/breaker" - "github.com/eapache/queue" -) - -// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages -// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, -// and parses responses for errors. You must read from the Errors() channel or the -// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid -// leaks: it will not be garbage-collected automatically when it passes out of -// scope. -type AsyncProducer interface { - - // AsyncClose triggers a shutdown of the producer, flushing any messages it may - // have buffered. The shutdown has completed when both the Errors and Successes - // channels have been closed. When calling AsyncClose, you *must* continue to - // read from those channels in order to drain the results of any messages in - // flight. - AsyncClose() - - // Close shuts down the producer and flushes any messages it may have buffered. - // You must call this function before a producer object passes out of scope, as - // it may otherwise leak memory. You must call this before calling Close on the - // underlying client. - Close() error - - // Input is the input channel for the user to write messages to that they - // wish to send. - Input() chan<- *ProducerMessage - - // Successes is the success output channel back to the user when AckSuccesses is - // enabled. If Return.Successes is true, you MUST read from this channel or the - // Producer will deadlock. It is suggested that you send and read messages - // together in a single select statement. - Successes() <-chan *ProducerMessage - - // Errors is the error output channel back to the user. You MUST read from this - // channel or the Producer will deadlock when the channel is full. Alternatively, - // you can set Producer.Return.Errors in your config to false, which prevents - // errors to be returned. - Errors() <-chan *ProducerError -} - -type asyncProducer struct { - client Client - conf *Config - ownClient bool - - errors chan *ProducerError - input, successes, retries chan *ProducerMessage - inFlight sync.WaitGroup - - brokers map[*Broker]chan<- *ProducerMessage - brokerRefs map[chan<- *ProducerMessage]int - brokerLock sync.Mutex -} - -// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. -func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { - client, err := NewClient(addrs, conf) - if err != nil { - return nil, err - } - - p, err := NewAsyncProducerFromClient(client) - if err != nil { - return nil, err - } - p.(*asyncProducer).ownClient = true - return p, nil -} - -// NewAsyncProducerFromClient creates a new Producer using the given client. It is still -// necessary to call Close() on the underlying client when shutting down this producer. -func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { - // Check that we are not dealing with a closed Client before processing any other arguments - if client.Closed() { - return nil, ErrClosedClient - } - - p := &asyncProducer{ - client: client, - conf: client.Config(), - errors: make(chan *ProducerError), - input: make(chan *ProducerMessage), - successes: make(chan *ProducerMessage), - retries: make(chan *ProducerMessage), - brokers: make(map[*Broker]chan<- *ProducerMessage), - brokerRefs: make(map[chan<- *ProducerMessage]int), - } - - // launch our singleton dispatchers - go withRecover(p.dispatcher) - go withRecover(p.retryHandler) - - return p, nil -} - -type flagSet int8 - -const ( - syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer - fin // final message from partitionProducer to brokerProducer and back - shutdown // start the shutdown process -) - -// ProducerMessage is the collection of elements passed to the Producer in order to send a message. -type ProducerMessage struct { - Topic string // The Kafka topic for this message. - // The partitioning key for this message. Pre-existing Encoders include - // StringEncoder and ByteEncoder. - Key Encoder - // The actual message to store in Kafka. Pre-existing Encoders include - // StringEncoder and ByteEncoder. - Value Encoder - - // This field is used to hold arbitrary data you wish to include so it - // will be available when receiving on the Successes and Errors channels. - // Sarama completely ignores this field and is only to be used for - // pass-through data. - Metadata interface{} - - // Below this point are filled in by the producer as the message is processed - - // Offset is the offset of the message stored on the broker. This is only - // guaranteed to be defined if the message was successfully delivered and - // RequiredAcks is not NoResponse. - Offset int64 - // Partition is the partition that the message was sent to. This is only - // guaranteed to be defined if the message was successfully delivered. - Partition int32 - // Timestamp is the timestamp assigned to the message by the broker. This - // is only guaranteed to be defined if the message was successfully - // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at - // least version 0.10.0. - Timestamp time.Time - - retries int - flags flagSet -} - -const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. - -func (m *ProducerMessage) byteSize() int { - size := producerMessageOverhead - if m.Key != nil { - size += m.Key.Length() - } - if m.Value != nil { - size += m.Value.Length() - } - return size -} - -func (m *ProducerMessage) clear() { - m.flags = 0 - m.retries = 0 -} - -// ProducerError is the type of error generated when the producer fails to deliver a message. -// It contains the original ProducerMessage as well as the actual error value. -type ProducerError struct { - Msg *ProducerMessage - Err error -} - -func (pe ProducerError) Error() string { - return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) -} - -// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. -// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel -// when closing a producer. -type ProducerErrors []*ProducerError - -func (pe ProducerErrors) Error() string { - return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) -} - -func (p *asyncProducer) Errors() <-chan *ProducerError { - return p.errors -} - -func (p *asyncProducer) Successes() <-chan *ProducerMessage { - return p.successes -} - -func (p *asyncProducer) Input() chan<- *ProducerMessage { - return p.input -} - -func (p *asyncProducer) Close() error { - p.AsyncClose() - - if p.conf.Producer.Return.Successes { - go withRecover(func() { - for _ = range p.successes { - } - }) - } - - var errors ProducerErrors - if p.conf.Producer.Return.Errors { - for event := range p.errors { - errors = append(errors, event) - } - } - - if len(errors) > 0 { - return errors - } - return nil -} - -func (p *asyncProducer) AsyncClose() { - go withRecover(p.shutdown) -} - -// singleton -// dispatches messages by topic -func (p *asyncProducer) dispatcher() { - handlers := make(map[string]chan<- *ProducerMessage) - shuttingDown := false - - for msg := range p.input { - if msg == nil { - Logger.Println("Something tried to send a nil message, it was ignored.") - continue - } - - if msg.flags&shutdown != 0 { - shuttingDown = true - p.inFlight.Done() - continue - } else if msg.retries == 0 { - if shuttingDown { - // we can't just call returnError here because that decrements the wait group, - // which hasn't been incremented yet for this message, and shouldn't be - pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} - if p.conf.Producer.Return.Errors { - p.errors <- pErr - } else { - Logger.Println(pErr) - } - continue - } - p.inFlight.Add(1) - } - - if msg.byteSize() > p.conf.Producer.MaxMessageBytes { - p.returnError(msg, ErrMessageSizeTooLarge) - continue - } - - handler := handlers[msg.Topic] - if handler == nil { - handler = p.newTopicProducer(msg.Topic) - handlers[msg.Topic] = handler - } - - handler <- msg - } - - for _, handler := range handlers { - close(handler) - } -} - -// one per topic -// partitions messages, then dispatches them by partition -type topicProducer struct { - parent *asyncProducer - topic string - input <-chan *ProducerMessage - - breaker *breaker.Breaker - handlers map[int32]chan<- *ProducerMessage - partitioner Partitioner -} - -func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { - input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) - tp := &topicProducer{ - parent: p, - topic: topic, - input: input, - breaker: breaker.New(3, 1, 10*time.Second), - handlers: make(map[int32]chan<- *ProducerMessage), - partitioner: p.conf.Producer.Partitioner(topic), - } - go withRecover(tp.dispatch) - return input -} - -func (tp *topicProducer) dispatch() { - for msg := range tp.input { - if msg.retries == 0 { - if err := tp.partitionMessage(msg); err != nil { - tp.parent.returnError(msg, err) - continue - } - } - - handler := tp.handlers[msg.Partition] - if handler == nil { - handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) - tp.handlers[msg.Partition] = handler - } - - handler <- msg - } - - for _, handler := range tp.handlers { - close(handler) - } -} - -func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { - var partitions []int32 - - err := tp.breaker.Run(func() (err error) { - if tp.partitioner.RequiresConsistency() { - partitions, err = tp.parent.client.Partitions(msg.Topic) - } else { - partitions, err = tp.parent.client.WritablePartitions(msg.Topic) - } - return - }) - - if err != nil { - return err - } - - numPartitions := int32(len(partitions)) - - if numPartitions == 0 { - return ErrLeaderNotAvailable - } - - choice, err := tp.partitioner.Partition(msg, numPartitions) - - if err != nil { - return err - } else if choice < 0 || choice >= numPartitions { - return ErrInvalidPartition - } - - msg.Partition = partitions[choice] - - return nil -} - -// one per partition per topic -// dispatches messages to the appropriate broker -// also responsible for maintaining message order during retries -type partitionProducer struct { - parent *asyncProducer - topic string - partition int32 - input <-chan *ProducerMessage - - leader *Broker - breaker *breaker.Breaker - output chan<- *ProducerMessage - - // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, - // all other messages get buffered in retryState[msg.retries].buf to preserve ordering - // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and - // therefore whether our buffer is complete and safe to flush) - highWatermark int - retryState []partitionRetryState -} - -type partitionRetryState struct { - buf []*ProducerMessage - expectChaser bool -} - -func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { - input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) - pp := &partitionProducer{ - parent: p, - topic: topic, - partition: partition, - input: input, - - breaker: breaker.New(3, 1, 10*time.Second), - retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), - } - go withRecover(pp.dispatch) - return input -} - -func (pp *partitionProducer) dispatch() { - // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` - // on the first message - pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) - if pp.leader != nil { - pp.output = pp.parent.getBrokerProducer(pp.leader) - pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} - } - - for msg := range pp.input { - if msg.retries > pp.highWatermark { - // a new, higher, retry level; handle it and then back off - pp.newHighWatermark(msg.retries) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) - } else if pp.highWatermark > 0 { - // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level - if msg.retries < pp.highWatermark { - // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin) - if msg.flags&fin == fin { - pp.retryState[msg.retries].expectChaser = false - pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected - } else { - pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) - } - continue - } else if msg.flags&fin == fin { - // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set, - // meaning this retry level is done and we can go down (at least) one level and flush that - pp.retryState[pp.highWatermark].expectChaser = false - pp.flushRetryBuffers() - pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected - continue - } - } - - // if we made it this far then the current msg contains real data, and can be sent to the next goroutine - // without breaking any of our ordering guarantees - - if pp.output == nil { - if err := pp.updateLeader(); err != nil { - pp.parent.returnError(msg, err) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) - continue - } - Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - } - - pp.output <- msg - } - - if pp.output != nil { - pp.parent.unrefBrokerProducer(pp.leader, pp.output) - } -} - -func (pp *partitionProducer) newHighWatermark(hwm int) { - Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) - pp.highWatermark = hwm - - // send off a fin so that we know when everything "in between" has made it - // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) - pp.retryState[pp.highWatermark].expectChaser = true - pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} - - // a new HWM means that our current broker selection is out of date - Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - pp.parent.unrefBrokerProducer(pp.leader, pp.output) - pp.output = nil -} - -func (pp *partitionProducer) flushRetryBuffers() { - Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) - for { - pp.highWatermark-- - - if pp.output == nil { - if err := pp.updateLeader(); err != nil { - pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) - goto flushDone - } - Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - } - - for _, msg := range pp.retryState[pp.highWatermark].buf { - pp.output <- msg - } - - flushDone: - pp.retryState[pp.highWatermark].buf = nil - if pp.retryState[pp.highWatermark].expectChaser { - Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) - break - } else if pp.highWatermark == 0 { - Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) - break - } - } -} - -func (pp *partitionProducer) updateLeader() error { - return pp.breaker.Run(func() (err error) { - if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { - return err - } - - if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { - return err - } - - pp.output = pp.parent.getBrokerProducer(pp.leader) - pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} - - return nil - }) -} - -// one per broker; also constructs an associated flusher -func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { - var ( - input = make(chan *ProducerMessage) - bridge = make(chan *produceSet) - responses = make(chan *brokerProducerResponse) - ) - - bp := &brokerProducer{ - parent: p, - broker: broker, - input: input, - output: bridge, - responses: responses, - buffer: newProduceSet(p), - currentRetries: make(map[string]map[int32]error), - } - go withRecover(bp.run) - - // minimal bridge to make the network response `select`able - go withRecover(func() { - for set := range bridge { - request := set.buildRequest() - - response, err := broker.Produce(request) - - responses <- &brokerProducerResponse{ - set: set, - err: err, - res: response, - } - } - close(responses) - }) - - return input -} - -type brokerProducerResponse struct { - set *produceSet - err error - res *ProduceResponse -} - -// groups messages together into appropriately-sized batches for sending to the broker -// handles state related to retries etc -type brokerProducer struct { - parent *asyncProducer - broker *Broker - - input <-chan *ProducerMessage - output chan<- *produceSet - responses <-chan *brokerProducerResponse - - buffer *produceSet - timer <-chan time.Time - timerFired bool - - closing error - currentRetries map[string]map[int32]error -} - -func (bp *brokerProducer) run() { - var output chan<- *produceSet - Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) - - for { - select { - case msg := <-bp.input: - if msg == nil { - bp.shutdown() - return - } - - if msg.flags&syn == syn { - Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", - bp.broker.ID(), msg.Topic, msg.Partition) - if bp.currentRetries[msg.Topic] == nil { - bp.currentRetries[msg.Topic] = make(map[int32]error) - } - bp.currentRetries[msg.Topic][msg.Partition] = nil - bp.parent.inFlight.Done() - continue - } - - if reason := bp.needsRetry(msg); reason != nil { - bp.parent.retryMessage(msg, reason) - - if bp.closing == nil && msg.flags&fin == fin { - // we were retrying this partition but we can start processing again - delete(bp.currentRetries[msg.Topic], msg.Partition) - Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n", - bp.broker.ID(), msg.Topic, msg.Partition) - } - - continue - } - - if bp.buffer.wouldOverflow(msg) { - if err := bp.waitForSpace(msg); err != nil { - bp.parent.retryMessage(msg, err) - continue - } - } - - if err := bp.buffer.add(msg); err != nil { - bp.parent.returnError(msg, err) - continue - } - - if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { - bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) - } - case <-bp.timer: - bp.timerFired = true - case output <- bp.buffer: - bp.rollOver() - case response := <-bp.responses: - bp.handleResponse(response) - } - - if bp.timerFired || bp.buffer.readyToFlush() { - output = bp.output - } else { - output = nil - } - } -} - -func (bp *brokerProducer) shutdown() { - for !bp.buffer.empty() { - select { - case response := <-bp.responses: - bp.handleResponse(response) - case bp.output <- bp.buffer: - bp.rollOver() - } - } - close(bp.output) - for response := range bp.responses { - bp.handleResponse(response) - } - - Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) -} - -func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { - if bp.closing != nil { - return bp.closing - } - - return bp.currentRetries[msg.Topic][msg.Partition] -} - -func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { - Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) - - for { - select { - case response := <-bp.responses: - bp.handleResponse(response) - // handling a response can change our state, so re-check some things - if reason := bp.needsRetry(msg); reason != nil { - return reason - } else if !bp.buffer.wouldOverflow(msg) { - return nil - } - case bp.output <- bp.buffer: - bp.rollOver() - return nil - } - } -} - -func (bp *brokerProducer) rollOver() { - bp.timer = nil - bp.timerFired = false - bp.buffer = newProduceSet(bp.parent) -} - -func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { - if response.err != nil { - bp.handleError(response.set, response.err) - } else { - bp.handleSuccess(response.set, response.res) - } - - if bp.buffer.empty() { - bp.rollOver() // this can happen if the response invalidated our buffer - } -} - -func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { - // we iterate through the blocks in the request set, not the response, so that we notice - // if the response is missing a block completely - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - if response == nil { - // this only happens when RequiredAcks is NoResponse, so we have to assume success - bp.parent.returnSuccesses(msgs) - return - } - - block := response.GetBlock(topic, partition) - if block == nil { - bp.parent.returnErrors(msgs, ErrIncompleteResponse) - return - } - - switch block.Err { - // Success - case ErrNoError: - if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { - for _, msg := range msgs { - msg.Timestamp = block.Timestamp - } - } - for i, msg := range msgs { - msg.Offset = block.Offset + int64(i) - } - bp.parent.returnSuccesses(msgs) - // Retriable errors - case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: - Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", - bp.broker.ID(), topic, partition, block.Err) - bp.currentRetries[topic][partition] = block.Err - bp.parent.retryMessages(msgs, block.Err) - bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) - // Other non-retriable errors - default: - bp.parent.returnErrors(msgs, block.Err) - } - }) -} - -func (bp *brokerProducer) handleError(sent *produceSet, err error) { - switch err.(type) { - case PacketEncodingError: - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.returnErrors(msgs, err) - }) - default: - Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) - bp.parent.abandonBrokerConnection(bp.broker) - _ = bp.broker.Close() - bp.closing = err - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.retryMessages(msgs, err) - }) - bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.retryMessages(msgs, err) - }) - bp.rollOver() - } -} - -// singleton -// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock -// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel -func (p *asyncProducer) retryHandler() { - var msg *ProducerMessage - buf := queue.New() - - for { - if buf.Length() == 0 { - msg = <-p.retries - } else { - select { - case msg = <-p.retries: - case p.input <- buf.Peek().(*ProducerMessage): - buf.Remove() - continue - } - } - - if msg == nil { - return - } - - buf.Add(msg) - } -} - -// utility functions - -func (p *asyncProducer) shutdown() { - Logger.Println("Producer shutting down.") - p.inFlight.Add(1) - p.input <- &ProducerMessage{flags: shutdown} - - p.inFlight.Wait() - - if p.ownClient { - err := p.client.Close() - if err != nil { - Logger.Println("producer/shutdown failed to close the embedded client:", err) - } - } - - close(p.input) - close(p.retries) - close(p.errors) - close(p.successes) -} - -func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { - msg.clear() - pErr := &ProducerError{Msg: msg, Err: err} - if p.conf.Producer.Return.Errors { - p.errors <- pErr - } else { - Logger.Println(pErr) - } - p.inFlight.Done() -} - -func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { - for _, msg := range batch { - p.returnError(msg, err) - } -} - -func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { - for _, msg := range batch { - if p.conf.Producer.Return.Successes { - msg.clear() - p.successes <- msg - } - p.inFlight.Done() - } -} - -func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) { - if msg.retries >= p.conf.Producer.Retry.Max { - p.returnError(msg, err) - } else { - msg.retries++ - p.retries <- msg - } -} - -func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { - for _, msg := range batch { - p.retryMessage(msg, err) - } -} - -func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { - p.brokerLock.Lock() - defer p.brokerLock.Unlock() - - bp := p.brokers[broker] - - if bp == nil { - bp = p.newBrokerProducer(broker) - p.brokers[broker] = bp - p.brokerRefs[bp] = 0 - } - - p.brokerRefs[bp]++ - - return bp -} - -func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { - p.brokerLock.Lock() - defer p.brokerLock.Unlock() - - p.brokerRefs[bp]-- - if p.brokerRefs[bp] == 0 { - close(bp) - delete(p.brokerRefs, bp) - - if p.brokers[broker] == bp { - delete(p.brokers, broker) - } - } -} - -func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { - p.brokerLock.Lock() - defer p.brokerLock.Unlock() - - delete(p.brokers, broker) -} diff --git a/vendor/github.com/Shopify/sarama/async_producer_test.go b/vendor/github.com/Shopify/sarama/async_producer_test.go deleted file mode 100644 index 517ef2a34..000000000 --- a/vendor/github.com/Shopify/sarama/async_producer_test.go +++ /dev/null @@ -1,801 +0,0 @@ -package sarama - -import ( - "errors" - "log" - "os" - "os/signal" - "sync" - "testing" - "time" -) - -const TestMessage = "ABC THE MESSAGE" - -func closeProducer(t *testing.T, p AsyncProducer) { - var wg sync.WaitGroup - p.AsyncClose() - - wg.Add(2) - go func() { - for _ = range p.Successes() { - t.Error("Unexpected message on Successes()") - } - wg.Done() - }() - go func() { - for msg := range p.Errors() { - t.Error(msg.Err) - } - wg.Done() - }() - wg.Wait() -} - -func expectResults(t *testing.T, p AsyncProducer, successes, errors int) { - expect := successes + errors - for expect > 0 { - select { - case msg := <-p.Errors(): - if msg.Msg.flags != 0 { - t.Error("Message had flags set") - } - errors-- - expect-- - if errors < 0 { - t.Error(msg.Err) - } - case msg := <-p.Successes(): - if msg.flags != 0 { - t.Error("Message had flags set") - } - successes-- - expect-- - if successes < 0 { - t.Error("Too many successes") - } - } - } - if successes != 0 || errors != 0 { - t.Error("Unexpected successes", successes, "or errors", errors) - } -} - -type testPartitioner chan *int32 - -func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) { - part := <-p - if part == nil { - return 0, errors.New("BOOM") - } - - return *part, nil -} - -func (p testPartitioner) RequiresConsistency() bool { - return true -} - -func (p testPartitioner) feed(partition int32) { - p <- &partition -} - -type flakyEncoder bool - -func (f flakyEncoder) Length() int { - return len(TestMessage) -} - -func (f flakyEncoder) Encode() ([]byte, error) { - if !bool(f) { - return nil, errors.New("flaky encoding error") - } - return []byte(TestMessage), nil -} - -func TestAsyncProducer(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i} - } - for i := 0; i < 10; i++ { - select { - case msg := <-producer.Errors(): - t.Error(msg.Err) - if msg.Msg.flags != 0 { - t.Error("Message had flags set") - } - case msg := <-producer.Successes(): - if msg.flags != 0 { - t.Error("Message had flags set") - } - if msg.Metadata.(int) != i { - t.Error("Message metadata did not match") - } - } - } - - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestAsyncProducerMultipleFlushes(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - leader.Returns(prodSuccess) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 5 - config.Producer.Return.Successes = true - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for flush := 0; flush < 3; flush++ { - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - expectResults(t, producer, 5, 0) - } - - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestAsyncProducerMultipleBrokers(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader0 := NewMockBroker(t, 2) - leader1 := NewMockBroker(t, 3) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID()) - metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodResponse0 := new(ProduceResponse) - prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError) - leader0.Returns(prodResponse0) - - prodResponse1 := new(ProduceResponse) - prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError) - leader1.Returns(prodResponse1) - - config := NewConfig() - config.Producer.Flush.Messages = 5 - config.Producer.Return.Successes = true - config.Producer.Partitioner = NewRoundRobinPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - expectResults(t, producer, 10, 0) - - closeProducer(t, producer) - leader1.Close() - leader0.Close() - seedBroker.Close() -} - -func TestAsyncProducerCustomPartitioner(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodResponse := new(ProduceResponse) - prodResponse.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodResponse) - - config := NewConfig() - config.Producer.Flush.Messages = 2 - config.Producer.Return.Successes = true - config.Producer.Partitioner = func(topic string) Partitioner { - p := make(testPartitioner) - go func() { - p.feed(0) - p <- nil - p <- nil - p <- nil - p.feed(0) - }() - return p - } - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - expectResults(t, producer, 2, 3) - - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestAsyncProducerFailureRetry(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader1 := NewMockBroker(t, 2) - leader2 := NewMockBroker(t, 3) - - metadataLeader1 := new(MetadataResponse) - metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader1) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - seedBroker.Close() - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader1.Returns(prodNotLeader) - - metadataLeader2 := new(MetadataResponse) - metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) - metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) - leader1.Returns(metadataLeader2) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - leader1.Close() - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - leader2.Close() - closeProducer(t, producer) -} - -func TestAsyncProducerEncoderFailures(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - leader.Returns(prodSuccess) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 1 - config.Producer.Return.Successes = true - config.Producer.Partitioner = NewManualPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for flush := 0; flush < 3; flush++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(false)} - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(false), Value: flakyEncoder(true)} - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(true)} - expectResults(t, producer, 1, 2) - } - - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -// If a Kafka broker becomes unavailable and then returns back in service, then -// producer reconnects to it and continues sending messages. -func TestAsyncProducerBrokerBounce(t *testing.T) { - // Given - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - leaderAddr := leader.Addr() - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - - config := NewConfig() - config.Producer.Flush.Messages = 1 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // When: a broker connection gets reset by a broker (network glitch, restart, you name it). - leader.Close() // producer should get EOF - leader = NewMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles - seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again - - // Then: a produced message goes through the new broker connection. - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - closeProducer(t, producer) - seedBroker.Close() - leader.Close() -} - -func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader1 := NewMockBroker(t, 2) - leader2 := NewMockBroker(t, 3) - - metadataLeader1 := new(MetadataResponse) - metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader1) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Max = 3 - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - leader1.Close() // producer should get EOF - seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down - seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down - - // ok fine, tell it to go to leader2 finally - metadataLeader2 := new(MetadataResponse) - metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) - metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader2) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - seedBroker.Close() - leader2.Close() - - closeProducer(t, producer) -} - -func TestAsyncProducerMultipleRetries(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader1 := NewMockBroker(t, 2) - leader2 := NewMockBroker(t, 3) - - metadataLeader1 := new(MetadataResponse) - metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader1) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Max = 4 - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader1.Returns(prodNotLeader) - - metadataLeader2 := new(MetadataResponse) - metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) - metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader2) - leader2.Returns(prodNotLeader) - seedBroker.Returns(metadataLeader1) - leader1.Returns(prodNotLeader) - seedBroker.Returns(metadataLeader1) - leader1.Returns(prodNotLeader) - seedBroker.Returns(metadataLeader2) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - seedBroker.Close() - leader1.Close() - leader2.Close() - closeProducer(t, producer) -} - -func TestAsyncProducerOutOfRetries(t *testing.T) { - t.Skip("Enable once bug #294 is fixed.") - - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - config.Producer.Retry.Max = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader.Returns(prodNotLeader) - - for i := 0; i < 10; i++ { - select { - case msg := <-producer.Errors(): - if msg.Err != ErrNotLeaderForPartition { - t.Error(msg.Err) - } - case <-producer.Successes(): - t.Error("Unexpected success") - } - } - - seedBroker.Returns(metadataResponse) - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - expectResults(t, producer, 10, 0) - - leader.Close() - seedBroker.Close() - safeClose(t, producer) -} - -func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - leaderAddr := leader.Addr() - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - config.Producer.Retry.Max = 1 - config.Producer.Partitioner = NewRoundRobinPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - // prime partition 0 - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // prime partition 1 - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - prodSuccess = new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // reboot the broker (the producer will get EOF on its existing connection) - leader.Close() - leader = NewMockBrokerAddr(t, 2, leaderAddr) - - // send another message on partition 0 to trigger the EOF and retry - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - - // tell partition 0 to go to that broker again - seedBroker.Returns(metadataResponse) - - // succeed this time - prodSuccess = new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // shutdown - closeProducer(t, producer) - seedBroker.Close() - leader.Close() -} - -func TestAsyncProducerFlusherRetryCondition(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Producer.Flush.Messages = 5 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - config.Producer.Retry.Max = 1 - config.Producer.Partitioner = NewManualPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - // prime partitions - for p := int32(0); p < 2; p++ { - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p} - } - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", p, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 5, 0) - } - - // send more messages on partition 0 - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} - } - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader.Returns(prodNotLeader) - - time.Sleep(50 * time.Millisecond) - - leader.SetHandlerByMap(map[string]MockResponse{ - "ProduceRequest": NewMockProduceResponse(t). - SetError("my_topic", 0, ErrNoError), - }) - - // tell partition 0 to go to that broker again - seedBroker.Returns(metadataResponse) - - // succeed this time - expectResults(t, producer, 5, 0) - - // put five more through - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} - } - expectResults(t, producer, 5, 0) - - // shutdown - closeProducer(t, producer) - seedBroker.Close() - leader.Close() -} - -func TestAsyncProducerRetryShutdown(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataLeader := new(MetadataResponse) - metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) - metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - producer.AsyncClose() - time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in - - producer.Input() <- &ProducerMessage{Topic: "FOO"} - if err := <-producer.Errors(); err.Err != ErrShuttingDown { - t.Error(err) - } - - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader.Returns(prodNotLeader) - - seedBroker.Returns(metadataLeader) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - seedBroker.Close() - leader.Close() - - // wait for the async-closed producer to shut down fully - for err := range producer.Errors() { - t.Error(err) - } -} - -// This example shows how to use the producer while simultaneously -// reading the Errors channel to know about any failures. -func ExampleAsyncProducer_select() { - producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil) - if err != nil { - panic(err) - } - - defer func() { - if err := producer.Close(); err != nil { - log.Fatalln(err) - } - }() - - // Trap SIGINT to trigger a shutdown. - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - - var enqueued, errors int -ProducerLoop: - for { - select { - case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}: - enqueued++ - case err := <-producer.Errors(): - log.Println("Failed to produce message", err) - errors++ - case <-signals: - break ProducerLoop - } - } - - log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors) -} - -// This example shows how to use the producer with separate goroutines -// reading from the Successes and Errors channels. Note that in order -// for the Successes channel to be populated, you have to set -// config.Producer.Return.Successes to true. -func ExampleAsyncProducer_goroutines() { - config := NewConfig() - config.Producer.Return.Successes = true - producer, err := NewAsyncProducer([]string{"localhost:9092"}, config) - if err != nil { - panic(err) - } - - // Trap SIGINT to trigger a graceful shutdown. - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - - var ( - wg sync.WaitGroup - enqueued, successes, errors int - ) - - wg.Add(1) - go func() { - defer wg.Done() - for _ = range producer.Successes() { - successes++ - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for err := range producer.Errors() { - log.Println(err) - errors++ - } - }() - -ProducerLoop: - for { - message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} - select { - case producer.Input() <- message: - enqueued++ - - case <-signals: - producer.AsyncClose() // Trigger a shutdown of the producer. - break ProducerLoop - } - } - - wg.Wait() - - log.Printf("Successfully produced: %d; errors: %d\n", successes, errors) -} diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go deleted file mode 100644 index bfcb82f37..000000000 --- a/vendor/github.com/Shopify/sarama/broker.go +++ /dev/null @@ -1,526 +0,0 @@ -package sarama - -import ( - "crypto/tls" - "encoding/binary" - "fmt" - "io" - "net" - "strconv" - "sync" - "sync/atomic" - "time" -) - -// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. -type Broker struct { - id int32 - addr string - - conf *Config - correlationID int32 - conn net.Conn - connErr error - lock sync.Mutex - opened int32 - - responses chan responsePromise - done chan bool -} - -type responsePromise struct { - correlationID int32 - packets chan []byte - errors chan error -} - -// NewBroker creates and returns a Broker targetting the given host:port address. -// This does not attempt to actually connect, you have to call Open() for that. -func NewBroker(addr string) *Broker { - return &Broker{id: -1, addr: addr} -} - -// Open tries to connect to the Broker if it is not already connected or connecting, but does not block -// waiting for the connection to complete. This means that any subsequent operations on the broker will -// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, -// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or -// AlreadyConnected. If conf is nil, the result of NewConfig() is used. -func (b *Broker) Open(conf *Config) error { - if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { - return ErrAlreadyConnected - } - - if conf == nil { - conf = NewConfig() - } - - err := conf.Validate() - if err != nil { - return err - } - - b.lock.Lock() - - go withRecover(func() { - defer b.lock.Unlock() - - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - } - - if conf.Net.TLS.Enable { - b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) - } else { - b.conn, b.connErr = dialer.Dial("tcp", b.addr) - } - if b.connErr != nil { - Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) - b.conn = nil - atomic.StoreInt32(&b.opened, 0) - return - } - b.conn = newBufConn(b.conn) - - b.conf = conf - - if conf.Net.SASL.Enable { - b.connErr = b.sendAndReceiveSASLPlainAuth() - if b.connErr != nil { - err = b.conn.Close() - if err == nil { - Logger.Printf("Closed connection to broker %s\n", b.addr) - } else { - Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) - } - b.conn = nil - atomic.StoreInt32(&b.opened, 0) - return - } - } - - b.done = make(chan bool) - b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) - - if b.id >= 0 { - Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) - } else { - Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) - } - go withRecover(b.responseReceiver) - }) - - return nil -} - -// Connected returns true if the broker is connected and false otherwise. If the broker is not -// connected but it had tried to connect, the error from that connection attempt is also returned. -func (b *Broker) Connected() (bool, error) { - b.lock.Lock() - defer b.lock.Unlock() - - return b.conn != nil, b.connErr -} - -func (b *Broker) Close() error { - b.lock.Lock() - defer b.lock.Unlock() - - if b.conn == nil { - return ErrNotConnected - } - - close(b.responses) - <-b.done - - err := b.conn.Close() - - b.conn = nil - b.connErr = nil - b.done = nil - b.responses = nil - - if err == nil { - Logger.Printf("Closed connection to broker %s\n", b.addr) - } else { - Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) - } - - atomic.StoreInt32(&b.opened, 0) - - return err -} - -// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. -func (b *Broker) ID() int32 { - return b.id -} - -// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. -func (b *Broker) Addr() string { - return b.addr -} - -func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { - response := new(MetadataResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { - response := new(ConsumerMetadataResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { - response := new(OffsetResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { - var response *ProduceResponse - var err error - - if request.RequiredAcks == NoResponse { - err = b.sendAndReceive(request, nil) - } else { - response = new(ProduceResponse) - err = b.sendAndReceive(request, response) - } - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { - response := new(FetchResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { - response := new(OffsetCommitResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { - response := new(OffsetFetchResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { - response := new(JoinGroupResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { - response := new(SyncGroupResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { - response := new(LeaveGroupResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { - response := new(HeartbeatResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { - response := new(ListGroupsResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { - response := new(DescribeGroupsResponse) - - err := b.sendAndReceive(request, response) - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { - b.lock.Lock() - defer b.lock.Unlock() - - if b.conn == nil { - if b.connErr != nil { - return nil, b.connErr - } - return nil, ErrNotConnected - } - - if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { - return nil, ErrUnsupportedVersion - } - - req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req) - if err != nil { - return nil, err - } - - err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) - if err != nil { - return nil, err - } - - _, err = b.conn.Write(buf) - if err != nil { - return nil, err - } - b.correlationID++ - - if !promiseResponse { - return nil, nil - } - - promise := responsePromise{req.correlationID, make(chan []byte), make(chan error)} - b.responses <- promise - - return &promise, nil -} - -func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { - promise, err := b.send(req, res != nil) - - if err != nil { - return err - } - - if promise == nil { - return nil - } - - select { - case buf := <-promise.packets: - return versionedDecode(buf, res, req.version()) - case err = <-promise.errors: - return err - } -} - -func (b *Broker) decode(pd packetDecoder) (err error) { - b.id, err = pd.getInt32() - if err != nil { - return err - } - - host, err := pd.getString() - if err != nil { - return err - } - - port, err := pd.getInt32() - if err != nil { - return err - } - - b.addr = net.JoinHostPort(host, fmt.Sprint(port)) - if _, _, err := net.SplitHostPort(b.addr); err != nil { - return err - } - - return nil -} - -func (b *Broker) encode(pe packetEncoder) (err error) { - - host, portstr, err := net.SplitHostPort(b.addr) - if err != nil { - return err - } - port, err := strconv.Atoi(portstr) - if err != nil { - return err - } - - pe.putInt32(b.id) - - err = pe.putString(host) - if err != nil { - return err - } - - pe.putInt32(int32(port)) - - return nil -} - -func (b *Broker) responseReceiver() { - var dead error - header := make([]byte, 8) - for response := range b.responses { - if dead != nil { - response.errors <- dead - continue - } - - err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)) - if err != nil { - dead = err - response.errors <- err - continue - } - - _, err = io.ReadFull(b.conn, header) - if err != nil { - dead = err - response.errors <- err - continue - } - - decodedHeader := responseHeader{} - err = decode(header, &decodedHeader) - if err != nil { - dead = err - response.errors <- err - continue - } - if decodedHeader.correlationID != response.correlationID { - // TODO if decoded ID < cur ID, discard until we catch up - // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response - dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} - response.errors <- dead - continue - } - - buf := make([]byte, decodedHeader.length-4) - _, err = io.ReadFull(b.conn, buf) - if err != nil { - dead = err - response.errors <- err - continue - } - - response.packets <- buf - } - close(b.done) -} - -// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149) -// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9 -// -// In SASL Plain, Kafka expects the auth header to be in the following format -// Message format (from https://tools.ietf.org/html/rfc4616): -// -// message = [authzid] UTF8NUL authcid UTF8NUL passwd -// authcid = 1*SAFE ; MUST accept up to 255 octets -// authzid = 1*SAFE ; MUST accept up to 255 octets -// passwd = 1*SAFE ; MUST accept up to 255 octets -// UTF8NUL = %x00 ; UTF-8 encoded NUL character -// -// SAFE = UTF1 / UTF2 / UTF3 / UTF4 -// ;; any UTF-8 encoded Unicode character except NUL -// -// When credentials are valid, Kafka returns a 4 byte array of null characters. -// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way -// of responding to bad credentials but thats how its being done today. -func (b *Broker) sendAndReceiveSASLPlainAuth() error { - length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) - authBytes := make([]byte, length+4) //4 byte length header + auth data - binary.BigEndian.PutUint32(authBytes, uint32(length)) - copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) - - err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) - if err != nil { - Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error()) - return err - } - - _, err = b.conn.Write(authBytes) - if err != nil { - Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) - return err - } - - header := make([]byte, 4) - n, err := io.ReadFull(b.conn, header) - // If the credentials are valid, we would get a 4 byte response filled with null characters. - // Otherwise, the broker closes the connection and we get an EOF - if err != nil { - Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) - return err - } - - Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) - return nil -} diff --git a/vendor/github.com/Shopify/sarama/broker_test.go b/vendor/github.com/Shopify/sarama/broker_test.go deleted file mode 100644 index 53e8baf49..000000000 --- a/vendor/github.com/Shopify/sarama/broker_test.go +++ /dev/null @@ -1,253 +0,0 @@ -package sarama - -import ( - "fmt" - "testing" -) - -func ExampleBroker() { - broker := NewBroker("localhost:9092") - err := broker.Open(nil) - if err != nil { - panic(err) - } - - request := MetadataRequest{Topics: []string{"myTopic"}} - response, err := broker.GetMetadata(&request) - if err != nil { - _ = broker.Close() - panic(err) - } - - fmt.Println("There are", len(response.Topics), "topics active in the cluster.") - - if err = broker.Close(); err != nil { - panic(err) - } -} - -type mockEncoder struct { - bytes []byte -} - -func (m mockEncoder) encode(pe packetEncoder) error { - return pe.putRawBytes(m.bytes) -} - -func TestBrokerAccessors(t *testing.T) { - broker := NewBroker("abc:123") - - if broker.ID() != -1 { - t.Error("New broker didn't have an ID of -1.") - } - - if broker.Addr() != "abc:123" { - t.Error("New broker didn't have the correct address") - } - - broker.id = 34 - if broker.ID() != 34 { - t.Error("Manually setting broker ID did not take effect.") - } -} - -func TestSimpleBrokerCommunication(t *testing.T) { - mb := NewMockBroker(t, 0) - defer mb.Close() - - broker := NewBroker(mb.Addr()) - conf := NewConfig() - conf.Version = V0_10_0_0 - err := broker.Open(conf) - if err != nil { - t.Fatal(err) - } - - for _, tt := range brokerTestTable { - mb.Returns(&mockEncoder{tt.response}) - } - for _, tt := range brokerTestTable { - tt.runner(t, broker) - } - - err = broker.Close() - if err != nil { - t.Error(err) - } -} - -// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake -var brokerTestTable = []struct { - response []byte - runner func(*testing.T, *Broker) -}{ - {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := MetadataRequest{} - response, err := broker.GetMetadata(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Metadata request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := ConsumerMetadataRequest{} - response, err := broker.GetConsumerMetadata(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Consumer Metadata request got no response!") - } - }}, - - {[]byte{}, - func(t *testing.T, broker *Broker) { - request := ProduceRequest{} - request.RequiredAcks = NoResponse - response, err := broker.Produce(&request) - if err != nil { - t.Error(err) - } - if response != nil { - t.Error("Produce request with NoResponse got a response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := ProduceRequest{} - request.RequiredAcks = WaitForLocal - response, err := broker.Produce(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Produce request without NoResponse got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := FetchRequest{} - response, err := broker.Fetch(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Fetch request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := OffsetFetchRequest{} - response, err := broker.FetchOffset(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("OffsetFetch request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := OffsetCommitRequest{} - response, err := broker.CommitOffset(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("OffsetCommit request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := OffsetRequest{} - response, err := broker.GetAvailableOffsets(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Offset request got no response!") - } - }}, - - {[]byte{0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := JoinGroupRequest{} - response, err := broker.JoinGroup(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("JoinGroup request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := SyncGroupRequest{} - response, err := broker.SyncGroup(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("SyncGroup request got no response!") - } - }}, - - {[]byte{0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := LeaveGroupRequest{} - response, err := broker.LeaveGroup(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("LeaveGroup request got no response!") - } - }}, - - {[]byte{0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := HeartbeatRequest{} - response, err := broker.Heartbeat(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Heartbeat request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := ListGroupsRequest{} - response, err := broker.ListGroups(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("ListGroups request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := DescribeGroupsRequest{} - response, err := broker.DescribeGroups(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("DescribeGroups request got no response!") - } - }}, -} diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go deleted file mode 100644 index e9a9ea77e..000000000 --- a/vendor/github.com/Shopify/sarama/client.go +++ /dev/null @@ -1,733 +0,0 @@ -package sarama - -import ( - "math/rand" - "sort" - "sync" - "time" -) - -// Client is a generic Kafka client. It manages connections to one or more Kafka brokers. -// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected -// automatically when it passes out of scope. It is safe to share a client amongst many -// users, however Kafka will process requests from a single client strictly in serial, -// so it is generally more efficient to use the default one client per producer/consumer. -type Client interface { - // Config returns the Config struct of the client. This struct should not be - // altered after it has been created. - Config() *Config - - // Topics returns the set of available topics as retrieved from cluster metadata. - Topics() ([]string, error) - - // Partitions returns the sorted list of all partition IDs for the given topic. - Partitions(topic string) ([]int32, error) - - // WritablePartitions returns the sorted list of all writable partition IDs for - // the given topic, where "writable" means "having a valid leader accepting - // writes". - WritablePartitions(topic string) ([]int32, error) - - // Leader returns the broker object that is the leader of the current - // topic/partition, as determined by querying the cluster metadata. - Leader(topic string, partitionID int32) (*Broker, error) - - // Replicas returns the set of all replica IDs for the given partition. - Replicas(topic string, partitionID int32) ([]int32, error) - - // RefreshMetadata takes a list of topics and queries the cluster to refresh the - // available metadata for those topics. If no topics are provided, it will refresh - // metadata for all topics. - RefreshMetadata(topics ...string) error - - // GetOffset queries the cluster to get the most recent available offset at the - // given time on the topic/partition combination. Time should be OffsetOldest for - // the earliest available offset, OffsetNewest for the offset of the message that - // will be produced next, or a time. - GetOffset(topic string, partitionID int32, time int64) (int64, error) - - // Coordinator returns the coordinating broker for a consumer group. It will - // return a locally cached value if it's available. You can call - // RefreshCoordinator to update the cached value. This function only works on - // Kafka 0.8.2 and higher. - Coordinator(consumerGroup string) (*Broker, error) - - // RefreshCoordinator retrieves the coordinator for a consumer group and stores it - // in local cache. This function only works on Kafka 0.8.2 and higher. - RefreshCoordinator(consumerGroup string) error - - // Close shuts down all broker connections managed by this client. It is required - // to call this function before a client object passes out of scope, as it will - // otherwise leak memory. You must close any Producers or Consumers using a client - // before you close the client. - Close() error - - // Closed returns true if the client has already had Close called on it - Closed() bool -} - -const ( - // OffsetNewest stands for the log head offset, i.e. the offset that will be - // assigned to the next message that will be produced to the partition. You - // can send this to a client's GetOffset method to get this offset, or when - // calling ConsumePartition to start consuming new messages. - OffsetNewest int64 = -1 - // OffsetOldest stands for the oldest offset available on the broker for a - // partition. You can send this to a client's GetOffset method to get this - // offset, or when calling ConsumePartition to start consuming from the - // oldest offset that is still available on the broker. - OffsetOldest int64 = -2 -) - -type client struct { - conf *Config - closer, closed chan none // for shutting down background metadata updater - - // the broker addresses given to us through the constructor are not guaranteed to be returned in - // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) - // so we store them separately - seedBrokers []*Broker - deadSeeds []*Broker - - brokers map[int32]*Broker // maps broker ids to brokers - metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata - coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs - - // If the number of partitions is large, we can get some churn calling cachedPartitions, - // so the result is cached. It is important to update this value whenever metadata is changed - cachedPartitionsResults map[string][maxPartitionIndex][]int32 - - lock sync.RWMutex // protects access to the maps that hold cluster state. -} - -// NewClient creates a new Client. It connects to one of the given broker addresses -// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot -// be retrieved from any of the given broker addresses, the client is not created. -func NewClient(addrs []string, conf *Config) (Client, error) { - Logger.Println("Initializing new client") - - if conf == nil { - conf = NewConfig() - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - if len(addrs) < 1 { - return nil, ConfigurationError("You must provide at least one broker address") - } - - client := &client{ - conf: conf, - closer: make(chan none), - closed: make(chan none), - brokers: make(map[int32]*Broker), - metadata: make(map[string]map[int32]*PartitionMetadata), - cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), - coordinators: make(map[string]int32), - } - - random := rand.New(rand.NewSource(time.Now().UnixNano())) - for _, index := range random.Perm(len(addrs)) { - client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) - } - - // do an initial fetch of all cluster metadata by specifing an empty list of topics - err := client.RefreshMetadata() - switch err { - case nil: - break - case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: - // indicates that maybe part of the cluster is down, but is not fatal to creating the client - Logger.Println(err) - default: - close(client.closed) // we haven't started the background updater yet, so we have to do this manually - _ = client.Close() - return nil, err - } - go withRecover(client.backgroundMetadataUpdater) - - Logger.Println("Successfully initialized new client") - - return client, nil -} - -func (client *client) Config() *Config { - return client.conf -} - -func (client *client) Close() error { - if client.Closed() { - // Chances are this is being called from a defer() and the error will go unobserved - // so we go ahead and log the event in this case. - Logger.Printf("Close() called on already closed client") - return ErrClosedClient - } - - // shutdown and wait for the background thread before we take the lock, to avoid races - close(client.closer) - <-client.closed - - client.lock.Lock() - defer client.lock.Unlock() - Logger.Println("Closing Client") - - for _, broker := range client.brokers { - safeAsyncClose(broker) - } - - for _, broker := range client.seedBrokers { - safeAsyncClose(broker) - } - - client.brokers = nil - client.metadata = nil - - return nil -} - -func (client *client) Closed() bool { - return client.brokers == nil -} - -func (client *client) Topics() ([]string, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - client.lock.RLock() - defer client.lock.RUnlock() - - ret := make([]string, 0, len(client.metadata)) - for topic := range client.metadata { - ret = append(ret, topic) - } - - return ret, nil -} - -func (client *client) Partitions(topic string) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - partitions := client.cachedPartitions(topic, allPartitions) - - if len(partitions) == 0 { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - partitions = client.cachedPartitions(topic, allPartitions) - } - - if partitions == nil { - return nil, ErrUnknownTopicOrPartition - } - - return partitions, nil -} - -func (client *client) WritablePartitions(topic string) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - partitions := client.cachedPartitions(topic, writablePartitions) - - // len==0 catches when it's nil (no such topic) and the odd case when every single - // partition is undergoing leader election simultaneously. Callers have to be able to handle - // this function returning an empty slice (which is a valid return value) but catching it - // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers - // a metadata refresh as a nicety so callers can just try again and don't have to manually - // trigger a refresh (otherwise they'd just keep getting a stale cached copy). - if len(partitions) == 0 { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - partitions = client.cachedPartitions(topic, writablePartitions) - } - - if partitions == nil { - return nil, ErrUnknownTopicOrPartition - } - - return partitions, nil -} - -func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - metadata := client.cachedMetadata(topic, partitionID) - - if metadata == nil { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - metadata = client.cachedMetadata(topic, partitionID) - } - - if metadata == nil { - return nil, ErrUnknownTopicOrPartition - } - - if metadata.Err == ErrReplicaNotAvailable { - return nil, metadata.Err - } - return dupeAndSort(metadata.Replicas), nil -} - -func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - leader, err := client.cachedLeader(topic, partitionID) - - if leader == nil { - err = client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - leader, err = client.cachedLeader(topic, partitionID) - } - - return leader, err -} - -func (client *client) RefreshMetadata(topics ...string) error { - if client.Closed() { - return ErrClosedClient - } - - // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper - // error. This handles the case by returning an error instead of sending it - // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 - for _, topic := range topics { - if len(topic) == 0 { - return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return - } - } - - return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max) -} - -func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { - if client.Closed() { - return -1, ErrClosedClient - } - - offset, err := client.getOffset(topic, partitionID, time) - - if err != nil { - if err := client.RefreshMetadata(topic); err != nil { - return -1, err - } - return client.getOffset(topic, partitionID, time) - } - - return offset, err -} - -func (client *client) Coordinator(consumerGroup string) (*Broker, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - coordinator := client.cachedCoordinator(consumerGroup) - - if coordinator == nil { - if err := client.RefreshCoordinator(consumerGroup); err != nil { - return nil, err - } - coordinator = client.cachedCoordinator(consumerGroup) - } - - if coordinator == nil { - return nil, ErrConsumerCoordinatorNotAvailable - } - - _ = coordinator.Open(client.conf) - return coordinator, nil -} - -func (client *client) RefreshCoordinator(consumerGroup string) error { - if client.Closed() { - return ErrClosedClient - } - - response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) - if err != nil { - return err - } - - client.lock.Lock() - defer client.lock.Unlock() - client.registerBroker(response.Coordinator) - client.coordinators[consumerGroup] = response.Coordinator.ID() - return nil -} - -// private broker management helpers - -// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered -// in the brokers map. It returns the broker that is registered, which may be the provided broker, -// or a previously registered Broker instance. You must hold the write lock before calling this function. -func (client *client) registerBroker(broker *Broker) { - if client.brokers[broker.ID()] == nil { - client.brokers[broker.ID()] = broker - Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) - } else if broker.Addr() != client.brokers[broker.ID()].Addr() { - safeAsyncClose(client.brokers[broker.ID()]) - client.brokers[broker.ID()] = broker - Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) - } -} - -// deregisterBroker removes a broker from the seedsBroker list, and if it's -// not the seedbroker, removes it from brokers map completely. -func (client *client) deregisterBroker(broker *Broker) { - client.lock.Lock() - defer client.lock.Unlock() - - if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { - client.deadSeeds = append(client.deadSeeds, broker) - client.seedBrokers = client.seedBrokers[1:] - } else { - // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, - // but we really shouldn't have to; once that loop is made better this case can be - // removed, and the function generally can be renamed from `deregisterBroker` to - // `nextSeedBroker` or something - Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) - delete(client.brokers, broker.ID()) - } -} - -func (client *client) resurrectDeadBrokers() { - client.lock.Lock() - defer client.lock.Unlock() - - Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds)) - client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) - client.deadSeeds = nil -} - -func (client *client) any() *Broker { - client.lock.RLock() - defer client.lock.RUnlock() - - if len(client.seedBrokers) > 0 { - _ = client.seedBrokers[0].Open(client.conf) - return client.seedBrokers[0] - } - - // not guaranteed to be random *or* deterministic - for _, broker := range client.brokers { - _ = broker.Open(client.conf) - return broker - } - - return nil -} - -// private caching/lazy metadata helpers - -type partitionType int - -const ( - allPartitions partitionType = iota - writablePartitions - // If you add any more types, update the partition cache in update() - - // Ensure this is the last partition type value - maxPartitionIndex -) - -func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { - client.lock.RLock() - defer client.lock.RUnlock() - - partitions := client.metadata[topic] - if partitions != nil { - return partitions[partitionID] - } - - return nil -} - -func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { - client.lock.RLock() - defer client.lock.RUnlock() - - partitions, exists := client.cachedPartitionsResults[topic] - - if !exists { - return nil - } - return partitions[partitionSet] -} - -func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { - partitions := client.metadata[topic] - - if partitions == nil { - return nil - } - - ret := make([]int32, 0, len(partitions)) - for _, partition := range partitions { - if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { - continue - } - ret = append(ret, partition.ID) - } - - sort.Sort(int32Slice(ret)) - return ret -} - -func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { - client.lock.RLock() - defer client.lock.RUnlock() - - partitions := client.metadata[topic] - if partitions != nil { - metadata, ok := partitions[partitionID] - if ok { - if metadata.Err == ErrLeaderNotAvailable { - return nil, ErrLeaderNotAvailable - } - b := client.brokers[metadata.Leader] - if b == nil { - return nil, ErrLeaderNotAvailable - } - _ = b.Open(client.conf) - return b, nil - } - } - - return nil, ErrUnknownTopicOrPartition -} - -func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { - broker, err := client.Leader(topic, partitionID) - if err != nil { - return -1, err - } - - request := &OffsetRequest{} - request.AddBlock(topic, partitionID, time, 1) - - response, err := broker.GetAvailableOffsets(request) - if err != nil { - _ = broker.Close() - return -1, err - } - - block := response.GetBlock(topic, partitionID) - if block == nil { - _ = broker.Close() - return -1, ErrIncompleteResponse - } - if block.Err != ErrNoError { - return -1, block.Err - } - if len(block.Offsets) != 1 { - return -1, ErrOffsetOutOfRange - } - - return block.Offsets[0], nil -} - -// core metadata update logic - -func (client *client) backgroundMetadataUpdater() { - defer close(client.closed) - - if client.conf.Metadata.RefreshFrequency == time.Duration(0) { - return - } - - ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if err := client.RefreshMetadata(); err != nil { - Logger.Println("Client background metadata update:", err) - } - case <-client.closer: - return - } - } -} - -func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { - retry := func(err error) error { - if attemptsRemaining > 0 { - Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) - return client.tryRefreshMetadata(topics, attemptsRemaining-1) - } - return err - } - - for broker := client.any(); broker != nil; broker = client.any() { - if len(topics) > 0 { - Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) - } else { - Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) - } - response, err := broker.GetMetadata(&MetadataRequest{Topics: topics}) - - switch err.(type) { - case nil: - // valid response, use it - if shouldRetry, err := client.updateMetadata(response); shouldRetry { - Logger.Println("client/metadata found some partitions to be leaderless") - return retry(err) // note: err can be nil - } else { - return err - } - - case PacketEncodingError: - // didn't even send, return the error - return err - default: - // some other error, remove that broker and try again - Logger.Println("client/metadata got error from broker while fetching metadata:", err) - _ = broker.Close() - client.deregisterBroker(broker) - } - } - - Logger.Println("client/metadata no available broker to send metadata request to") - client.resurrectDeadBrokers() - return retry(ErrOutOfBrokers) -} - -// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable -func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) { - client.lock.Lock() - defer client.lock.Unlock() - - // For all the brokers we received: - // - if it is a new ID, save it - // - if it is an existing ID, but the address we have is stale, discard the old one and save it - // - otherwise ignore it, replacing our existing one would just bounce the connection - for _, broker := range data.Brokers { - client.registerBroker(broker) - } - - for _, topic := range data.Topics { - delete(client.metadata, topic.Name) - delete(client.cachedPartitionsResults, topic.Name) - - switch topic.Err { - case ErrNoError: - break - case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results - err = topic.Err - continue - case ErrUnknownTopicOrPartition: // retry, do not store partial partition results - err = topic.Err - retry = true - continue - case ErrLeaderNotAvailable: // retry, but store partial partition results - retry = true - break - default: // don't retry, don't store partial results - Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) - err = topic.Err - continue - } - - client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) - for _, partition := range topic.Partitions { - client.metadata[topic.Name][partition.ID] = partition - if partition.Err == ErrLeaderNotAvailable { - retry = true - } - } - - var partitionCache [maxPartitionIndex][]int32 - partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) - partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) - client.cachedPartitionsResults[topic.Name] = partitionCache - } - - return -} - -func (client *client) cachedCoordinator(consumerGroup string) *Broker { - client.lock.RLock() - defer client.lock.RUnlock() - if coordinatorID, ok := client.coordinators[consumerGroup]; ok { - return client.brokers[coordinatorID] - } - return nil -} - -func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) { - retry := func(err error) (*ConsumerMetadataResponse, error) { - if attemptsRemaining > 0 { - Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) - return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) - } - return nil, err - } - - for broker := client.any(); broker != nil; broker = client.any() { - Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) - - request := new(ConsumerMetadataRequest) - request.ConsumerGroup = consumerGroup - - response, err := broker.GetConsumerMetadata(request) - - if err != nil { - Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) - - switch err.(type) { - case PacketEncodingError: - return nil, err - default: - _ = broker.Close() - client.deregisterBroker(broker) - continue - } - } - - switch response.Err { - case ErrNoError: - Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) - return response, nil - - case ErrConsumerCoordinatorNotAvailable: - Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) - - // This is very ugly, but this scenario will only happen once per cluster. - // The __consumer_offsets topic only has to be created one time. - // The number of partitions not configurable, but partition 0 should always exist. - if _, err := client.Leader("__consumer_offsets", 0); err != nil { - Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") - time.Sleep(2 * time.Second) - } - - return retry(ErrConsumerCoordinatorNotAvailable) - default: - return nil, response.Err - } - } - - Logger.Println("client/coordinator no available broker to send consumer metadata request to") - client.resurrectDeadBrokers() - return retry(ErrOutOfBrokers) -} diff --git a/vendor/github.com/Shopify/sarama/client_test.go b/vendor/github.com/Shopify/sarama/client_test.go deleted file mode 100644 index b0559466f..000000000 --- a/vendor/github.com/Shopify/sarama/client_test.go +++ /dev/null @@ -1,608 +0,0 @@ -package sarama - -import ( - "io" - "sync" - "testing" - "time" -) - -func safeClose(t testing.TB, c io.Closer) { - err := c.Close() - if err != nil { - t.Error(err) - } -} - -func TestSimpleClient(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - seedBroker.Returns(new(MetadataResponse)) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestCachedPartitions(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - replicas := []int32{3, 1, 5} - isr := []int32{5, 1} - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker("localhost:12345", 2) - metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, ErrLeaderNotAvailable) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - c, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - client := c.(*client) - - // Verify they aren't cached the same - allP := client.cachedPartitionsResults["my_topic"][allPartitions] - writeP := client.cachedPartitionsResults["my_topic"][writablePartitions] - if len(allP) == len(writeP) { - t.Fatal("Invalid lengths!") - } - - tmp := client.cachedPartitionsResults["my_topic"] - // Verify we actually use the cache at all! - tmp[allPartitions] = []int32{1, 2, 3, 4} - client.cachedPartitionsResults["my_topic"] = tmp - if 4 != len(client.cachedPartitions("my_topic", allPartitions)) { - t.Fatal("Not using the cache!") - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - replicas := []int32{seedBroker.BrokerID()} - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - metadataResponse = new(MetadataResponse) - metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataResponse) - - partitions, err := client.Partitions("unknown") - - if err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - if partitions != nil { - t.Errorf("Should return nil as partition list, found %v", partitions) - } - - // Should still use the cache of a known topic - partitions, err = client.Partitions("my_topic") - if err != nil { - t.Errorf("Expected no error, found %v", err) - } - - metadataResponse = new(MetadataResponse) - metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataResponse) - - // Should not use cache for unknown topic - partitions, err = client.Partitions("unknown") - if err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - if partitions != nil { - t.Errorf("Should return nil as partition list, found %v", partitions) - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestClientSeedBrokers(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker("localhost:12345", 2) - seedBroker.Returns(metadataResponse) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestClientMetadata(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 5) - - replicas := []int32{3, 1, 5} - isr := []int32{5, 1} - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, ErrLeaderNotAvailable) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - topics, err := client.Topics() - if err != nil { - t.Error(err) - } else if len(topics) != 1 || topics[0] != "my_topic" { - t.Error("Client returned incorrect topics:", topics) - } - - parts, err := client.Partitions("my_topic") - if err != nil { - t.Error(err) - } else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 { - t.Error("Client returned incorrect partitions for my_topic:", parts) - } - - parts, err = client.WritablePartitions("my_topic") - if err != nil { - t.Error(err) - } else if len(parts) != 1 || parts[0] != 0 { - t.Error("Client returned incorrect writable partitions for my_topic:", parts) - } - - tst, err := client.Leader("my_topic", 0) - if err != nil { - t.Error(err) - } else if tst.ID() != 5 { - t.Error("Leader for my_topic had incorrect ID.") - } - - replicas, err = client.Replicas("my_topic", 0) - if err != nil { - t.Error(err) - } else if replicas[0] != 1 { - t.Error("Incorrect (or unsorted) replica") - } else if replicas[1] != 3 { - t.Error("Incorrect (or unsorted) replica") - } else if replicas[2] != 5 { - t.Error("Incorrect (or unsorted) replica") - } - - leader.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientGetOffset(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - leaderAddr := leader.Addr() - - metadata := new(MetadataResponse) - metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, ErrNoError) - metadata.AddBroker(leaderAddr, leader.BrokerID()) - seedBroker.Returns(metadata) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - offsetResponse := new(OffsetResponse) - offsetResponse.AddTopicPartition("foo", 0, 123) - leader.Returns(offsetResponse) - - offset, err := client.GetOffset("foo", 0, OffsetNewest) - if err != nil { - t.Error(err) - } - if offset != 123 { - t.Error("Unexpected offset, got ", offset) - } - - leader.Close() - seedBroker.Returns(metadata) - - leader = NewMockBrokerAddr(t, 2, leaderAddr) - offsetResponse = new(OffsetResponse) - offsetResponse.AddTopicPartition("foo", 0, 456) - leader.Returns(offsetResponse) - - offset, err = client.GetOffset("foo", 0, OffsetNewest) - if err != nil { - t.Error(err) - } - if offset != 456 { - t.Error("Unexpected offset, got ", offset) - } - - seedBroker.Close() - leader.Close() - safeClose(t, client) -} - -func TestClientReceivingUnknownTopic(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - metadataResponse1 := new(MetadataResponse) - seedBroker.Returns(metadataResponse1) - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Metadata.Retry.Backoff = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - metadataUnknownTopic := new(MetadataResponse) - metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataUnknownTopic) - seedBroker.Returns(metadataUnknownTopic) - - if err := client.RefreshMetadata("new_topic"); err != ErrUnknownTopicOrPartition { - t.Error("ErrUnknownTopicOrPartition expected, got", err) - } - - // If we are asking for the leader of a partition of the non-existing topic. - // we will request metadata again. - seedBroker.Returns(metadataUnknownTopic) - seedBroker.Returns(metadataUnknownTopic) - - if _, err = client.Leader("new_topic", 1); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - safeClose(t, client) - seedBroker.Close() -} - -func TestClientReceivingPartialMetadata(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 5) - - metadataResponse1 := new(MetadataResponse) - metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) - seedBroker.Returns(metadataResponse1) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()} - - metadataPartial := new(MetadataResponse) - metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable) - metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, ErrNoError) - metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, ErrLeaderNotAvailable) - seedBroker.Returns(metadataPartial) - - if err := client.RefreshMetadata("new_topic"); err != nil { - t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error") - } - - // Even though the metadata was incomplete, we should be able to get the leader of a partition - // for which we did get a useful response, without doing additional requests. - - partition0Leader, err := client.Leader("new_topic", 0) - if err != nil { - t.Error(err) - } else if partition0Leader.Addr() != leader.Addr() { - t.Error("Unexpected leader returned", partition0Leader.Addr()) - } - - // If we are asking for the leader of a partition that didn't have a leader before, - // we will do another metadata request. - - seedBroker.Returns(metadataPartial) - - // Still no leader for the partition, so asking for it should return an error. - _, err = client.Leader("new_topic", 1) - if err != ErrLeaderNotAvailable { - t.Error("Expected ErrLeaderNotAvailable, got", err) - } - - safeClose(t, client) - seedBroker.Close() - leader.Close() -} - -func TestClientRefreshBehaviour(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 5) - - metadataResponse1 := new(MetadataResponse) - metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) - seedBroker.Returns(metadataResponse1) - - metadataResponse2 := new(MetadataResponse) - metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse2) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - parts, err := client.Partitions("my_topic") - if err != nil { - t.Error(err) - } else if len(parts) != 1 || parts[0] != 0xb { - t.Error("Client returned incorrect partitions for my_topic:", parts) - } - - tst, err := client.Leader("my_topic", 0xb) - if err != nil { - t.Error(err) - } else if tst.ID() != 5 { - t.Error("Leader for my_topic had incorrect ID.") - } - - leader.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientResurrectDeadSeeds(t *testing.T) { - initialSeed := NewMockBroker(t, 0) - emptyMetadata := new(MetadataResponse) - initialSeed.Returns(emptyMetadata) - - conf := NewConfig() - conf.Metadata.Retry.Backoff = 0 - conf.Metadata.RefreshFrequency = 0 - c, err := NewClient([]string{initialSeed.Addr()}, conf) - if err != nil { - t.Fatal(err) - } - initialSeed.Close() - - client := c.(*client) - - seed1 := NewMockBroker(t, 1) - seed2 := NewMockBroker(t, 2) - seed3 := NewMockBroker(t, 3) - addr1 := seed1.Addr() - addr2 := seed2.Addr() - addr3 := seed3.Addr() - - // Overwrite the seed brokers with a fixed ordering to make this test deterministic. - safeClose(t, client.seedBrokers[0]) - client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)} - client.deadSeeds = []*Broker{} - - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - if err := client.RefreshMetadata(); err != nil { - t.Error(err) - } - wg.Done() - }() - seed1.Close() - seed2.Close() - - seed1 = NewMockBrokerAddr(t, 1, addr1) - seed2 = NewMockBrokerAddr(t, 2, addr2) - - seed3.Close() - - seed1.Close() - seed2.Returns(emptyMetadata) - - wg.Wait() - - if len(client.seedBrokers) != 2 { - t.Error("incorrect number of live seeds") - } - if len(client.deadSeeds) != 1 { - t.Error("incorrect number of dead seeds") - } - - safeClose(t, c) -} - -func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - staleCoordinator := NewMockBroker(t, 2) - freshCoordinator := NewMockBroker(t, 3) - - replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()} - metadataResponse1 := new(MetadataResponse) - metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID()) - metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID()) - metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError) - seedBroker.Returns(metadataResponse1) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - coordinatorResponse1 := new(ConsumerMetadataResponse) - coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable - seedBroker.Returns(coordinatorResponse1) - - coordinatorResponse2 := new(ConsumerMetadataResponse) - coordinatorResponse2.CoordinatorID = staleCoordinator.BrokerID() - coordinatorResponse2.CoordinatorHost = "127.0.0.1" - coordinatorResponse2.CoordinatorPort = staleCoordinator.Port() - - seedBroker.Returns(coordinatorResponse2) - - broker, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if staleCoordinator.Addr() != broker.Addr() { - t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr()) - } - - if staleCoordinator.BrokerID() != broker.ID() { - t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID()) - } - - // Grab the cached value - broker2, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if broker2.Addr() != broker.Addr() { - t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr()) - } - - coordinatorResponse3 := new(ConsumerMetadataResponse) - coordinatorResponse3.CoordinatorID = freshCoordinator.BrokerID() - coordinatorResponse3.CoordinatorHost = "127.0.0.1" - coordinatorResponse3.CoordinatorPort = freshCoordinator.Port() - - seedBroker.Returns(coordinatorResponse3) - - // Refresh the locally cahced value because it's stale - if err := client.RefreshCoordinator("my_group"); err != nil { - t.Error(err) - } - - // Grab the fresh value - broker3, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if broker3.Addr() != freshCoordinator.Addr() { - t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr()) - } - - freshCoordinator.Close() - staleCoordinator.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - coordinator := NewMockBroker(t, 2) - - metadataResponse1 := new(MetadataResponse) - seedBroker.Returns(metadataResponse1) - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Metadata.Retry.Backoff = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - coordinatorResponse1 := new(ConsumerMetadataResponse) - coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable - seedBroker.Returns(coordinatorResponse1) - - metadataResponse2 := new(MetadataResponse) - metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataResponse2) - - replicas := []int32{coordinator.BrokerID()} - metadataResponse3 := new(MetadataResponse) - metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError) - seedBroker.Returns(metadataResponse3) - - coordinatorResponse2 := new(ConsumerMetadataResponse) - coordinatorResponse2.CoordinatorID = coordinator.BrokerID() - coordinatorResponse2.CoordinatorHost = "127.0.0.1" - coordinatorResponse2.CoordinatorPort = coordinator.Port() - - seedBroker.Returns(coordinatorResponse2) - - broker, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if coordinator.Addr() != broker.Addr() { - t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr()) - } - - if coordinator.BrokerID() != broker.ID() { - t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID()) - } - - coordinator.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientAutorefreshShutdownRace(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - metadataResponse := new(MetadataResponse) - seedBroker.Returns(metadataResponse) - - conf := NewConfig() - conf.Metadata.RefreshFrequency = 100 * time.Millisecond - client, err := NewClient([]string{seedBroker.Addr()}, conf) - if err != nil { - t.Fatal(err) - } - - // Wait for the background refresh to kick in - time.Sleep(110 * time.Millisecond) - - done := make(chan none) - go func() { - // Close the client - if err := client.Close(); err != nil { - t.Fatal(err) - } - close(done) - }() - - // Wait for the Close to kick in - time.Sleep(10 * time.Millisecond) - - // Then return some metadata to the still-running background thread - leader := NewMockBroker(t, 2) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, ErrNoError) - seedBroker.Returns(metadataResponse) - - <-done - - seedBroker.Close() - - // give the update time to happen so we get a panic if it's still running (which it shouldn't) - time.Sleep(10 * time.Millisecond) -} diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go deleted file mode 100644 index b61bf7ea4..000000000 --- a/vendor/github.com/Shopify/sarama/config.go +++ /dev/null @@ -1,399 +0,0 @@ -package sarama - -import ( - "crypto/tls" - "regexp" - "time" -) - -const defaultClientID = "sarama" - -var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) - -// Config is used to pass multiple configuration options to Sarama's constructors. -type Config struct { - // Net is the namespace for network-level properties used by the Broker, and - // shared by the Client/Producer/Consumer. - Net struct { - // How many outstanding requests a connection is allowed to have before - // sending on it blocks (default 5). - MaxOpenRequests int - - // All three of the below configurations are similar to the - // `socket.timeout.ms` setting in JVM kafka. All of them default - // to 30 seconds. - DialTimeout time.Duration // How long to wait for the initial connection. - ReadTimeout time.Duration // How long to wait for a response. - WriteTimeout time.Duration // How long to wait for a transmit. - - TLS struct { - // Whether or not to use TLS when connecting to the broker - // (defaults to false). - Enable bool - // The TLS configuration to use for secure connections if - // enabled (defaults to nil). - Config *tls.Config - } - - // SASL based authentication with broker. While there are multiple SASL authentication methods - // the current implementation is limited to plaintext (SASL/PLAIN) authentication - SASL struct { - // Whether or not to use SASL authentication when connecting to the broker - // (defaults to false). - Enable bool - //username and password for SASL/PLAIN authentication - User string - Password string - } - - // KeepAlive specifies the keep-alive period for an active network connection. - // If zero, keep-alives are disabled. (default is 0: disabled). - KeepAlive time.Duration - } - - // Metadata is the namespace for metadata management properties used by the - // Client, and shared by the Producer/Consumer. - Metadata struct { - Retry struct { - // The total number of times to retry a metadata request when the - // cluster is in the middle of a leader election (default 3). - Max int - // How long to wait for leader election to occur before retrying - // (default 250ms). Similar to the JVM's `retry.backoff.ms`. - Backoff time.Duration - } - // How frequently to refresh the cluster metadata in the background. - // Defaults to 10 minutes. Set to 0 to disable. Similar to - // `topic.metadata.refresh.interval.ms` in the JVM version. - RefreshFrequency time.Duration - } - - // Producer is the namespace for configuration related to producing messages, - // used by the Producer. - Producer struct { - // The maximum permitted size of a message (defaults to 1000000). Should be - // set equal to or smaller than the broker's `message.max.bytes`. - MaxMessageBytes int - // The level of acknowledgement reliability needed from the broker (defaults - // to WaitForLocal). Equivalent to the `request.required.acks` setting of the - // JVM producer. - RequiredAcks RequiredAcks - // The maximum duration the broker will wait the receipt of the number of - // RequiredAcks (defaults to 10 seconds). This is only relevant when - // RequiredAcks is set to WaitForAll or a number > 1. Only supports - // millisecond resolution, nanoseconds will be truncated. Equivalent to - // the JVM producer's `request.timeout.ms` setting. - Timeout time.Duration - // The type of compression to use on messages (defaults to no compression). - // Similar to `compression.codec` setting of the JVM producer. - Compression CompressionCodec - // Generates partitioners for choosing the partition to send messages to - // (defaults to hashing the message key). Similar to the `partitioner.class` - // setting for the JVM producer. - Partitioner PartitionerConstructor - - // Return specifies what channels will be populated. If they are set to true, - // you must read from the respective channels to prevent deadlock. - Return struct { - // If enabled, successfully delivered messages will be returned on the - // Successes channel (default disabled). - Successes bool - - // If enabled, messages that failed to deliver will be returned on the - // Errors channel, including error (default enabled). - Errors bool - } - - // The following config options control how often messages are batched up and - // sent to the broker. By default, messages are sent as fast as possible, and - // all messages received while the current batch is in-flight are placed - // into the subsequent batch. - Flush struct { - // The best-effort number of bytes needed to trigger a flush. Use the - // global sarama.MaxRequestSize to set a hard upper limit. - Bytes int - // The best-effort number of messages needed to trigger a flush. Use - // `MaxMessages` to set a hard upper limit. - Messages int - // The best-effort frequency of flushes. Equivalent to - // `queue.buffering.max.ms` setting of JVM producer. - Frequency time.Duration - // The maximum number of messages the producer will send in a single - // broker request. Defaults to 0 for unlimited. Similar to - // `queue.buffering.max.messages` in the JVM producer. - MaxMessages int - } - - Retry struct { - // The total number of times to retry sending a message (default 3). - // Similar to the `message.send.max.retries` setting of the JVM producer. - Max int - // How long to wait for the cluster to settle between retries - // (default 100ms). Similar to the `retry.backoff.ms` setting of the - // JVM producer. - Backoff time.Duration - } - } - - // Consumer is the namespace for configuration related to consuming messages, - // used by the Consumer. - // - // Note that Sarama's Consumer type does not currently support automatic - // consumer-group rebalancing and offset tracking. For Zookeeper-based - // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka - // library builds on Sarama to add this support. For Kafka-based tracking - // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library - // builds on Sarama to add this support. - Consumer struct { - Retry struct { - // How long to wait after a failing to read from a partition before - // trying again (default 2s). - Backoff time.Duration - } - - // Fetch is the namespace for controlling how many bytes are retrieved by any - // given request. - Fetch struct { - // The minimum number of message bytes to fetch in a request - the broker - // will wait until at least this many are available. The default is 1, - // as 0 causes the consumer to spin when no messages are available. - // Equivalent to the JVM's `fetch.min.bytes`. - Min int32 - // The default number of message bytes to fetch from the broker in each - // request (default 32768). This should be larger than the majority of - // your messages, or else the consumer will spend a lot of time - // negotiating sizes and not actually consuming. Similar to the JVM's - // `fetch.message.max.bytes`. - Default int32 - // The maximum number of message bytes to fetch from the broker in a - // single request. Messages larger than this will return - // ErrMessageTooLarge and will not be consumable, so you must be sure - // this is at least as large as your largest message. Defaults to 0 - // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The - // global `sarama.MaxResponseSize` still applies. - Max int32 - } - // The maximum amount of time the broker will wait for Consumer.Fetch.Min - // bytes to become available before it returns fewer than that anyways. The - // default is 250ms, since 0 causes the consumer to spin when no events are - // available. 100-500ms is a reasonable range for most cases. Kafka only - // supports precision up to milliseconds; nanoseconds will be truncated. - // Equivalent to the JVM's `fetch.wait.max.ms`. - MaxWaitTime time.Duration - - // The maximum amount of time the consumer expects a message takes to process - // for the user. If writing to the Messages channel takes longer than this, - // that partition will stop fetching more messages until it can proceed again. - // Note that, since the Messages channel is buffered, the actual grace time is - // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. - MaxProcessingTime time.Duration - - // Return specifies what channels will be populated. If they are set to true, - // you must read from them to prevent deadlock. - Return struct { - // If enabled, any errors that occurred while consuming are returned on - // the Errors channel (default disabled). - Errors bool - } - - // Offsets specifies configuration for how and when to commit consumed - // offsets. This currently requires the manual use of an OffsetManager - // but will eventually be automated. - Offsets struct { - // How frequently to commit updated offsets. Defaults to 1s. - CommitInterval time.Duration - - // The initial offset to use if no offset was previously committed. - // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. - Initial int64 - - // The retention duration for committed offsets. If zero, disabled - // (in which case the `offsets.retention.minutes` option on the - // broker will be used). Kafka only supports precision up to - // milliseconds; nanoseconds will be truncated. Requires Kafka - // broker version 0.9.0 or later. - // (default is 0: disabled). - Retention time.Duration - } - } - - // A user-provided string sent with every request to the brokers for logging, - // debugging, and auditing purposes. Defaults to "sarama", but you should - // probably set it to something specific to your application. - ClientID string - // The number of events to buffer in internal and external channels. This - // permits the producer and consumer to continue processing some messages - // in the background while user code is working, greatly improving throughput. - // Defaults to 256. - ChannelBufferSize int - // The version of Kafka that Sarama will assume it is running against. - // Defaults to the oldest supported stable version. Since Kafka provides - // backwards-compatibility, setting it to a version older than you have - // will not break anything, although it may prevent you from using the - // latest features. Setting it to a version greater than you are actually - // running may lead to random breakage. - Version KafkaVersion -} - -// NewConfig returns a new configuration instance with sane defaults. -func NewConfig() *Config { - c := &Config{} - - c.Net.MaxOpenRequests = 5 - c.Net.DialTimeout = 30 * time.Second - c.Net.ReadTimeout = 30 * time.Second - c.Net.WriteTimeout = 30 * time.Second - - c.Metadata.Retry.Max = 3 - c.Metadata.Retry.Backoff = 250 * time.Millisecond - c.Metadata.RefreshFrequency = 10 * time.Minute - - c.Producer.MaxMessageBytes = 1000000 - c.Producer.RequiredAcks = WaitForLocal - c.Producer.Timeout = 10 * time.Second - c.Producer.Partitioner = NewHashPartitioner - c.Producer.Retry.Max = 3 - c.Producer.Retry.Backoff = 100 * time.Millisecond - c.Producer.Return.Errors = true - - c.Consumer.Fetch.Min = 1 - c.Consumer.Fetch.Default = 32768 - c.Consumer.Retry.Backoff = 2 * time.Second - c.Consumer.MaxWaitTime = 250 * time.Millisecond - c.Consumer.MaxProcessingTime = 100 * time.Millisecond - c.Consumer.Return.Errors = false - c.Consumer.Offsets.CommitInterval = 1 * time.Second - c.Consumer.Offsets.Initial = OffsetNewest - - c.ClientID = defaultClientID - c.ChannelBufferSize = 256 - c.Version = minVersion - - return c -} - -// Validate checks a Config instance. It will return a -// ConfigurationError if the specified values don't make sense. -func (c *Config) Validate() error { - // some configuration values should be warned on but not fail completely, do those first - if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { - Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") - } - if c.Net.SASL.Enable == false { - if c.Net.SASL.User != "" { - Logger.Println("Net.SASL is disabled but a non-empty username was provided.") - } - if c.Net.SASL.Password != "" { - Logger.Println("Net.SASL is disabled but a non-empty password was provided.") - } - } - if c.Producer.RequiredAcks > 1 { - Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") - } - if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { - Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.") - } - if c.Producer.Flush.Bytes >= int(MaxRequestSize) { - Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.") - } - if c.Producer.Timeout%time.Millisecond != 0 { - Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") - } - if c.Consumer.MaxWaitTime < 100*time.Millisecond { - Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.") - } - if c.Consumer.MaxWaitTime%time.Millisecond != 0 { - Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.") - } - if c.Consumer.Offsets.Retention%time.Millisecond != 0 { - Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") - } - if c.ClientID == defaultClientID { - Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") - } - - // validate Net values - switch { - case c.Net.MaxOpenRequests <= 0: - return ConfigurationError("Net.MaxOpenRequests must be > 0") - case c.Net.DialTimeout <= 0: - return ConfigurationError("Net.DialTimeout must be > 0") - case c.Net.ReadTimeout <= 0: - return ConfigurationError("Net.ReadTimeout must be > 0") - case c.Net.WriteTimeout <= 0: - return ConfigurationError("Net.WriteTimeout must be > 0") - case c.Net.KeepAlive < 0: - return ConfigurationError("Net.KeepAlive must be >= 0") - case c.Net.SASL.Enable == true && c.Net.SASL.User == "": - return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") - case c.Net.SASL.Enable == true && c.Net.SASL.Password == "": - return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") - } - - // validate the Metadata values - switch { - case c.Metadata.Retry.Max < 0: - return ConfigurationError("Metadata.Retry.Max must be >= 0") - case c.Metadata.Retry.Backoff < 0: - return ConfigurationError("Metadata.Retry.Backoff must be >= 0") - case c.Metadata.RefreshFrequency < 0: - return ConfigurationError("Metadata.RefreshFrequency must be >= 0") - } - - // validate the Producer values - switch { - case c.Producer.MaxMessageBytes <= 0: - return ConfigurationError("Producer.MaxMessageBytes must be > 0") - case c.Producer.RequiredAcks < -1: - return ConfigurationError("Producer.RequiredAcks must be >= -1") - case c.Producer.Timeout <= 0: - return ConfigurationError("Producer.Timeout must be > 0") - case c.Producer.Partitioner == nil: - return ConfigurationError("Producer.Partitioner must not be nil") - case c.Producer.Flush.Bytes < 0: - return ConfigurationError("Producer.Flush.Bytes must be >= 0") - case c.Producer.Flush.Messages < 0: - return ConfigurationError("Producer.Flush.Messages must be >= 0") - case c.Producer.Flush.Frequency < 0: - return ConfigurationError("Producer.Flush.Frequency must be >= 0") - case c.Producer.Flush.MaxMessages < 0: - return ConfigurationError("Producer.Flush.MaxMessages must be >= 0") - case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: - return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set") - case c.Producer.Retry.Max < 0: - return ConfigurationError("Producer.Retry.Max must be >= 0") - case c.Producer.Retry.Backoff < 0: - return ConfigurationError("Producer.Retry.Backoff must be >= 0") - } - - // validate the Consumer values - switch { - case c.Consumer.Fetch.Min <= 0: - return ConfigurationError("Consumer.Fetch.Min must be > 0") - case c.Consumer.Fetch.Default <= 0: - return ConfigurationError("Consumer.Fetch.Default must be > 0") - case c.Consumer.Fetch.Max < 0: - return ConfigurationError("Consumer.Fetch.Max must be >= 0") - case c.Consumer.MaxWaitTime < 1*time.Millisecond: - return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms") - case c.Consumer.MaxProcessingTime <= 0: - return ConfigurationError("Consumer.MaxProcessingTime must be > 0") - case c.Consumer.Retry.Backoff < 0: - return ConfigurationError("Consumer.Retry.Backoff must be >= 0") - case c.Consumer.Offsets.CommitInterval <= 0: - return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0") - case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: - return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") - - } - - // validate misc shared values - switch { - case c.ChannelBufferSize < 0: - return ConfigurationError("ChannelBufferSize must be >= 0") - case !validID.MatchString(c.ClientID): - return ConfigurationError("ClientID is invalid") - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/config_test.go b/vendor/github.com/Shopify/sarama/config_test.go deleted file mode 100644 index 08bcaa421..000000000 --- a/vendor/github.com/Shopify/sarama/config_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package sarama - -import "testing" - -func TestDefaultConfigValidates(t *testing.T) { - config := NewConfig() - if err := config.Validate(); err != nil { - t.Error(err) - } -} - -func TestInvalidClientIDConfigValidates(t *testing.T) { - config := NewConfig() - config.ClientID = "foo:bar" - if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" { - t.Error("Expected invalid ClientID, got ", err) - } -} - -func TestEmptyClientIDConfigValidates(t *testing.T) { - config := NewConfig() - config.ClientID = "" - if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" { - t.Error("Expected invalid ClientID, got ", err) - } -} diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go deleted file mode 100644 index 5271e21de..000000000 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ /dev/null @@ -1,715 +0,0 @@ -package sarama - -import ( - "errors" - "fmt" - "sync" - "sync/atomic" - "time" -) - -// ConsumerMessage encapsulates a Kafka message returned by the consumer. -type ConsumerMessage struct { - Key, Value []byte - Topic string - Partition int32 - Offset int64 - Timestamp time.Time // only set if kafka is version 0.10+ -} - -// ConsumerError is what is provided to the user when an error occurs. -// It wraps an error and includes the topic and partition. -type ConsumerError struct { - Topic string - Partition int32 - Err error -} - -func (ce ConsumerError) Error() string { - return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) -} - -// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. -// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors -// when stopping. -type ConsumerErrors []*ConsumerError - -func (ce ConsumerErrors) Error() string { - return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) -} - -// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() -// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of -// scope. -// -// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. -// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library -// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the -// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. -type Consumer interface { - - // Topics returns the set of available topics as retrieved from the cluster - // metadata. This method is the same as Client.Topics(), and is provided for - // convenience. - Topics() ([]string, error) - - // Partitions returns the sorted list of all partition IDs for the given topic. - // This method is the same as Client.Partitions(), and is provided for convenience. - Partitions(topic string) ([]int32, error) - - // ConsumePartition creates a PartitionConsumer on the given topic/partition with - // the given offset. It will return an error if this Consumer is already consuming - // on the given topic/partition. Offset can be a literal offset, or OffsetNewest - // or OffsetOldest - ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) - - // Close shuts down the consumer. It must be called after all child - // PartitionConsumers have already been closed. - Close() error -} - -type consumer struct { - client Client - conf *Config - ownClient bool - - lock sync.Mutex - children map[string]map[int32]*partitionConsumer - brokerConsumers map[*Broker]*brokerConsumer -} - -// NewConsumer creates a new consumer using the given broker addresses and configuration. -func NewConsumer(addrs []string, config *Config) (Consumer, error) { - client, err := NewClient(addrs, config) - if err != nil { - return nil, err - } - - c, err := NewConsumerFromClient(client) - if err != nil { - return nil, err - } - c.(*consumer).ownClient = true - return c, nil -} - -// NewConsumerFromClient creates a new consumer using the given client. It is still -// necessary to call Close() on the underlying client when shutting down this consumer. -func NewConsumerFromClient(client Client) (Consumer, error) { - // Check that we are not dealing with a closed Client before processing any other arguments - if client.Closed() { - return nil, ErrClosedClient - } - - c := &consumer{ - client: client, - conf: client.Config(), - children: make(map[string]map[int32]*partitionConsumer), - brokerConsumers: make(map[*Broker]*brokerConsumer), - } - - return c, nil -} - -func (c *consumer) Close() error { - if c.ownClient { - return c.client.Close() - } - return nil -} - -func (c *consumer) Topics() ([]string, error) { - return c.client.Topics() -} - -func (c *consumer) Partitions(topic string) ([]int32, error) { - return c.client.Partitions(topic) -} - -func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { - child := &partitionConsumer{ - consumer: c, - conf: c.conf, - topic: topic, - partition: partition, - messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), - errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), - feeder: make(chan *FetchResponse, 1), - trigger: make(chan none, 1), - dying: make(chan none), - fetchSize: c.conf.Consumer.Fetch.Default, - } - - if err := child.chooseStartingOffset(offset); err != nil { - return nil, err - } - - var leader *Broker - var err error - if leader, err = c.client.Leader(child.topic, child.partition); err != nil { - return nil, err - } - - if err := c.addChild(child); err != nil { - return nil, err - } - - go withRecover(child.dispatcher) - go withRecover(child.responseFeeder) - - child.broker = c.refBrokerConsumer(leader) - child.broker.input <- child - - return child, nil -} - -func (c *consumer) addChild(child *partitionConsumer) error { - c.lock.Lock() - defer c.lock.Unlock() - - topicChildren := c.children[child.topic] - if topicChildren == nil { - topicChildren = make(map[int32]*partitionConsumer) - c.children[child.topic] = topicChildren - } - - if topicChildren[child.partition] != nil { - return ConfigurationError("That topic/partition is already being consumed") - } - - topicChildren[child.partition] = child - return nil -} - -func (c *consumer) removeChild(child *partitionConsumer) { - c.lock.Lock() - defer c.lock.Unlock() - - delete(c.children[child.topic], child.partition) -} - -func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { - c.lock.Lock() - defer c.lock.Unlock() - - bc := c.brokerConsumers[broker] - if bc == nil { - bc = c.newBrokerConsumer(broker) - c.brokerConsumers[broker] = bc - } - - bc.refs++ - - return bc -} - -func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { - c.lock.Lock() - defer c.lock.Unlock() - - brokerWorker.refs-- - - if brokerWorker.refs == 0 { - close(brokerWorker.input) - if c.brokerConsumers[brokerWorker.broker] == brokerWorker { - delete(c.brokerConsumers, brokerWorker.broker) - } - } -} - -func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { - c.lock.Lock() - defer c.lock.Unlock() - - delete(c.brokerConsumers, brokerWorker.broker) -} - -// PartitionConsumer - -// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close() -// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically -// when it passes out of scope. -// -// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range -// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported -// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, -// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. -// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set -// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement -// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. -type PartitionConsumer interface { - - // AsyncClose initiates a shutdown of the PartitionConsumer. This method will - // return immediately, after which you should wait until the 'messages' and - // 'errors' channel are drained. It is required to call this function, or - // Close before a consumer object passes out of scope, as it will otherwise - // leak memory. You must call this before calling Close on the underlying client. - AsyncClose() - - // Close stops the PartitionConsumer from fetching messages. It is required to - // call this function (or AsyncClose) before a consumer object passes out of - // scope, as it will otherwise leak memory. You must call this before calling - // Close on the underlying client. - Close() error - - // Messages returns the read channel for the messages that are returned by - // the broker. - Messages() <-chan *ConsumerMessage - - // Errors returns a read channel of errors that occurred during consuming, if - // enabled. By default, errors are logged and not returned over this channel. - // If you want to implement any custom error handling, set your config's - // Consumer.Return.Errors setting to true, and read from this channel. - Errors() <-chan *ConsumerError - - // HighWaterMarkOffset returns the high water mark offset of the partition, - // i.e. the offset that will be used for the next message that will be produced. - // You can use this to determine how far behind the processing is. - HighWaterMarkOffset() int64 -} - -type partitionConsumer struct { - consumer *consumer - conf *Config - topic string - partition int32 - - broker *brokerConsumer - messages chan *ConsumerMessage - errors chan *ConsumerError - feeder chan *FetchResponse - - trigger, dying chan none - responseResult error - - fetchSize int32 - offset int64 - highWaterMarkOffset int64 -} - -var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing - -func (child *partitionConsumer) sendError(err error) { - cErr := &ConsumerError{ - Topic: child.topic, - Partition: child.partition, - Err: err, - } - - if child.conf.Consumer.Return.Errors { - child.errors <- cErr - } else { - Logger.Println(cErr) - } -} - -func (child *partitionConsumer) dispatcher() { - for _ = range child.trigger { - select { - case <-child.dying: - close(child.trigger) - case <-time.After(child.conf.Consumer.Retry.Backoff): - if child.broker != nil { - child.consumer.unrefBrokerConsumer(child.broker) - child.broker = nil - } - - Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) - if err := child.dispatch(); err != nil { - child.sendError(err) - child.trigger <- none{} - } - } - } - - if child.broker != nil { - child.consumer.unrefBrokerConsumer(child.broker) - } - child.consumer.removeChild(child) - close(child.feeder) -} - -func (child *partitionConsumer) dispatch() error { - if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { - return err - } - - var leader *Broker - var err error - if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { - return err - } - - child.broker = child.consumer.refBrokerConsumer(leader) - - child.broker.input <- child - - return nil -} - -func (child *partitionConsumer) chooseStartingOffset(offset int64) error { - newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) - if err != nil { - return err - } - oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) - if err != nil { - return err - } - - switch { - case offset == OffsetNewest: - child.offset = newestOffset - case offset == OffsetOldest: - child.offset = oldestOffset - case offset >= oldestOffset && offset <= newestOffset: - child.offset = offset - default: - return ErrOffsetOutOfRange - } - - return nil -} - -func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { - return child.messages -} - -func (child *partitionConsumer) Errors() <-chan *ConsumerError { - return child.errors -} - -func (child *partitionConsumer) AsyncClose() { - // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes - // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and - // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will - // also just close itself) - close(child.dying) -} - -func (child *partitionConsumer) Close() error { - child.AsyncClose() - - go withRecover(func() { - for _ = range child.messages { - // drain - } - }) - - var errors ConsumerErrors - for err := range child.errors { - errors = append(errors, err) - } - - if len(errors) > 0 { - return errors - } - return nil -} - -func (child *partitionConsumer) HighWaterMarkOffset() int64 { - return atomic.LoadInt64(&child.highWaterMarkOffset) -} - -func (child *partitionConsumer) responseFeeder() { - var msgs []*ConsumerMessage - expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime) - expireTimedOut := false - -feederLoop: - for response := range child.feeder { - msgs, child.responseResult = child.parseResponse(response) - - for i, msg := range msgs { - if !expiryTimer.Stop() && !expireTimedOut { - // expiryTimer was expired; clear out the waiting msg - <-expiryTimer.C - } - expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime) - expireTimedOut = false - - select { - case child.messages <- msg: - case <-expiryTimer.C: - expireTimedOut = true - child.responseResult = errTimedOut - child.broker.acks.Done() - for _, msg = range msgs[i:] { - child.messages <- msg - } - child.broker.input <- child - continue feederLoop - } - } - - child.broker.acks.Done() - } - - close(child.messages) - close(child.errors) -} - -func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { - block := response.GetBlock(child.topic, child.partition) - if block == nil { - return nil, ErrIncompleteResponse - } - - if block.Err != ErrNoError { - return nil, block.Err - } - - if len(block.MsgSet.Messages) == 0 { - // We got no messages. If we got a trailing one then we need to ask for more data. - // Otherwise we just poll again and wait for one to be produced... - if block.MsgSet.PartialTrailingMessage { - if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { - // we can't ask for more data, we've hit the configured limit - child.sendError(ErrMessageTooLarge) - child.offset++ // skip this one so we can keep processing future messages - } else { - child.fetchSize *= 2 - if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { - child.fetchSize = child.conf.Consumer.Fetch.Max - } - } - } - - return nil, nil - } - - // we got messages, reset our fetch size in case it was increased for a previous request - child.fetchSize = child.conf.Consumer.Fetch.Default - atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) - - incomplete := false - prelude := true - var messages []*ConsumerMessage - for _, msgBlock := range block.MsgSet.Messages { - - for _, msg := range msgBlock.Messages() { - offset := msg.Offset - if msg.Msg.Version >= 1 { - baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset - offset += baseOffset - } - if prelude && offset < child.offset { - continue - } - prelude = false - - if offset >= child.offset { - messages = append(messages, &ConsumerMessage{ - Topic: child.topic, - Partition: child.partition, - Key: msg.Msg.Key, - Value: msg.Msg.Value, - Offset: offset, - Timestamp: msg.Msg.Timestamp, - }) - child.offset = offset + 1 - } else { - incomplete = true - } - } - - } - - if incomplete || len(messages) == 0 { - return nil, ErrIncompleteResponse - } - return messages, nil -} - -// brokerConsumer - -type brokerConsumer struct { - consumer *consumer - broker *Broker - input chan *partitionConsumer - newSubscriptions chan []*partitionConsumer - wait chan none - subscriptions map[*partitionConsumer]none - acks sync.WaitGroup - refs int -} - -func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { - bc := &brokerConsumer{ - consumer: c, - broker: broker, - input: make(chan *partitionConsumer), - newSubscriptions: make(chan []*partitionConsumer), - wait: make(chan none), - subscriptions: make(map[*partitionConsumer]none), - refs: 0, - } - - go withRecover(bc.subscriptionManager) - go withRecover(bc.subscriptionConsumer) - - return bc -} - -func (bc *brokerConsumer) subscriptionManager() { - var buffer []*partitionConsumer - - // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer - // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks - // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give - // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, - // so the main goroutine can block waiting for work if it has none. - for { - if len(buffer) > 0 { - select { - case event, ok := <-bc.input: - if !ok { - goto done - } - buffer = append(buffer, event) - case bc.newSubscriptions <- buffer: - buffer = nil - case bc.wait <- none{}: - } - } else { - select { - case event, ok := <-bc.input: - if !ok { - goto done - } - buffer = append(buffer, event) - case bc.newSubscriptions <- nil: - } - } - } - -done: - close(bc.wait) - if len(buffer) > 0 { - bc.newSubscriptions <- buffer - } - close(bc.newSubscriptions) -} - -func (bc *brokerConsumer) subscriptionConsumer() { - <-bc.wait // wait for our first piece of work - - // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available - for newSubscriptions := range bc.newSubscriptions { - bc.updateSubscriptions(newSubscriptions) - - if len(bc.subscriptions) == 0 { - // We're about to be shut down or we're about to receive more subscriptions. - // Either way, the signal just hasn't propagated to our goroutine yet. - <-bc.wait - continue - } - - response, err := bc.fetchNewMessages() - - if err != nil { - Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) - bc.abort(err) - return - } - - bc.acks.Add(len(bc.subscriptions)) - for child := range bc.subscriptions { - child.feeder <- response - } - bc.acks.Wait() - bc.handleResponses() - } -} - -func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) { - for _, child := range newSubscriptions { - bc.subscriptions[child] = none{} - Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) - } - - for child := range bc.subscriptions { - select { - case <-child.dying: - Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) - close(child.trigger) - delete(bc.subscriptions, child) - default: - break - } - } -} - -func (bc *brokerConsumer) handleResponses() { - // handles the response codes left for us by our subscriptions, and abandons ones that have been closed - for child := range bc.subscriptions { - result := child.responseResult - child.responseResult = nil - - switch result { - case nil: - break - case errTimedOut: - Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", - bc.broker.ID(), child.topic, child.partition) - delete(bc.subscriptions, child) - case ErrOffsetOutOfRange: - // there's no point in retrying this it will just fail the same way again - // shut it down and force the user to choose what to do - child.sendError(result) - Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) - close(child.trigger) - delete(bc.subscriptions, child) - case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: - // not an error, but does need redispatching - Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", - bc.broker.ID(), child.topic, child.partition, result) - child.trigger <- none{} - delete(bc.subscriptions, child) - default: - // dunno, tell the user and try redispatching - child.sendError(result) - Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", - bc.broker.ID(), child.topic, child.partition, result) - child.trigger <- none{} - delete(bc.subscriptions, child) - } - } -} - -func (bc *brokerConsumer) abort(err error) { - bc.consumer.abandonBrokerConsumer(bc) - _ = bc.broker.Close() // we don't care about the error this might return, we already have one - - for child := range bc.subscriptions { - child.sendError(err) - child.trigger <- none{} - } - - for newSubscriptions := range bc.newSubscriptions { - if len(newSubscriptions) == 0 { - <-bc.wait - continue - } - for _, child := range newSubscriptions { - child.sendError(err) - child.trigger <- none{} - } - } -} - -func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { - request := &FetchRequest{ - MinBytes: bc.consumer.conf.Consumer.Fetch.Min, - MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), - } - if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { - request.Version = 2 - } - - for child := range bc.subscriptions { - request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) - } - - return bc.broker.Fetch(request) -} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go deleted file mode 100644 index 9d92d350a..000000000 --- a/vendor/github.com/Shopify/sarama/consumer_group_members.go +++ /dev/null @@ -1,94 +0,0 @@ -package sarama - -type ConsumerGroupMemberMetadata struct { - Version int16 - Topics []string - UserData []byte -} - -func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { - pe.putInt16(m.Version) - - if err := pe.putStringArray(m.Topics); err != nil { - return err - } - - if err := pe.putBytes(m.UserData); err != nil { - return err - } - - return nil -} - -func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { - if m.Version, err = pd.getInt16(); err != nil { - return - } - - if m.Topics, err = pd.getStringArray(); err != nil { - return - } - - if m.UserData, err = pd.getBytes(); err != nil { - return - } - - return nil -} - -type ConsumerGroupMemberAssignment struct { - Version int16 - Topics map[string][]int32 - UserData []byte -} - -func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error { - pe.putInt16(m.Version) - - if err := pe.putArrayLength(len(m.Topics)); err != nil { - return err - } - - for topic, partitions := range m.Topics { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putInt32Array(partitions); err != nil { - return err - } - } - - if err := pe.putBytes(m.UserData); err != nil { - return err - } - - return nil -} - -func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) { - if m.Version, err = pd.getInt16(); err != nil { - return - } - - var topicLen int - if topicLen, err = pd.getArrayLength(); err != nil { - return - } - - m.Topics = make(map[string][]int32, topicLen) - for i := 0; i < topicLen; i++ { - var topic string - if topic, err = pd.getString(); err != nil { - return - } - if m.Topics[topic], err = pd.getInt32Array(); err != nil { - return - } - } - - if m.UserData, err = pd.getBytes(); err != nil { - return - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members_test.go b/vendor/github.com/Shopify/sarama/consumer_group_members_test.go deleted file mode 100644 index 1c1d154ab..000000000 --- a/vendor/github.com/Shopify/sarama/consumer_group_members_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package sarama - -import ( - "bytes" - "reflect" - "testing" -) - -var ( - groupMemberMetadata = []byte{ - 0, 1, // Version - 0, 0, 0, 2, // Topic array length - 0, 3, 'o', 'n', 'e', // Topic one - 0, 3, 't', 'w', 'o', // Topic two - 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata - } - groupMemberAssignment = []byte{ - 0, 1, // Version - 0, 0, 0, 1, // Topic array length - 0, 3, 'o', 'n', 'e', // Topic one - 0, 0, 0, 3, // Topic one, partition array length - 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 4, // 0, 2, 4 - 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata - } -) - -func TestConsumerGroupMemberMetadata(t *testing.T) { - meta := &ConsumerGroupMemberMetadata{ - Version: 1, - Topics: []string{"one", "two"}, - UserData: []byte{0x01, 0x02, 0x03}, - } - - buf, err := encode(meta) - if err != nil { - t.Error("Failed to encode data", err) - } else if !bytes.Equal(groupMemberMetadata, buf) { - t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberMetadata, buf) - } - - meta2 := new(ConsumerGroupMemberMetadata) - err = decode(buf, meta2) - if err != nil { - t.Error("Failed to decode data", err) - } else if !reflect.DeepEqual(meta, meta2) { - t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", meta, meta2) - } -} - -func TestConsumerGroupMemberAssignment(t *testing.T) { - amt := &ConsumerGroupMemberAssignment{ - Version: 1, - Topics: map[string][]int32{ - "one": []int32{0, 2, 4}, - }, - UserData: []byte{0x01, 0x02, 0x03}, - } - - buf, err := encode(amt) - if err != nil { - t.Error("Failed to encode data", err) - } else if !bytes.Equal(groupMemberAssignment, buf) { - t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberAssignment, buf) - } - - amt2 := new(ConsumerGroupMemberAssignment) - err = decode(buf, amt2) - if err != nil { - t.Error("Failed to decode data", err) - } else if !reflect.DeepEqual(amt, amt2) { - t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", amt, amt2) - } -} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go deleted file mode 100644 index 483be3354..000000000 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go +++ /dev/null @@ -1,26 +0,0 @@ -package sarama - -type ConsumerMetadataRequest struct { - ConsumerGroup string -} - -func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { - return pe.putString(r.ConsumerGroup) -} - -func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { - r.ConsumerGroup, err = pd.getString() - return err -} - -func (r *ConsumerMetadataRequest) key() int16 { - return 10 -} - -func (r *ConsumerMetadataRequest) version() int16 { - return 0 -} - -func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { - return V0_8_2_0 -} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go deleted file mode 100644 index 4509631a0..000000000 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package sarama - -import "testing" - -var ( - consumerMetadataRequestEmpty = []byte{ - 0x00, 0x00} - - consumerMetadataRequestString = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'} -) - -func TestConsumerMetadataRequest(t *testing.T) { - request := new(ConsumerMetadataRequest) - testRequest(t, "empty string", request, consumerMetadataRequestEmpty) - - request.ConsumerGroup = "foobar" - testRequest(t, "with string", request, consumerMetadataRequestString) -} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go deleted file mode 100644 index 6b9632bba..000000000 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go +++ /dev/null @@ -1,85 +0,0 @@ -package sarama - -import ( - "net" - "strconv" -) - -type ConsumerMetadataResponse struct { - Err KError - Coordinator *Broker - CoordinatorID int32 // deprecated: use Coordinator.ID() - CoordinatorHost string // deprecated: use Coordinator.Addr() - CoordinatorPort int32 // deprecated: use Coordinator.Addr() -} - -func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - r.Err = KError(tmp) - - coordinator := new(Broker) - if err := coordinator.decode(pd); err != nil { - return err - } - if coordinator.addr == ":0" { - return nil - } - r.Coordinator = coordinator - - // this can all go away in 2.0, but we have to fill in deprecated fields to maintain - // backwards compatibility - host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) - if err != nil { - return err - } - port, err := strconv.ParseInt(portstr, 10, 32) - if err != nil { - return err - } - r.CoordinatorID = r.Coordinator.ID() - r.CoordinatorHost = host - r.CoordinatorPort = int32(port) - - return nil -} - -func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - if r.Coordinator != nil { - host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) - if err != nil { - return err - } - port, err := strconv.ParseInt(portstr, 10, 32) - if err != nil { - return err - } - pe.putInt32(r.Coordinator.ID()) - if err := pe.putString(host); err != nil { - return err - } - pe.putInt32(int32(port)) - return nil - } - pe.putInt32(r.CoordinatorID) - if err := pe.putString(r.CoordinatorHost); err != nil { - return err - } - pe.putInt32(r.CoordinatorPort) - return nil -} - -func (r *ConsumerMetadataResponse) key() int16 { - return 10 -} - -func (r *ConsumerMetadataResponse) version() int16 { - return 0 -} - -func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { - return V0_8_2_0 -} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go deleted file mode 100644 index b748784d7..000000000 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package sarama - -import "testing" - -var ( - consumerMetadataResponseError = []byte{ - 0x00, 0x0E, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - consumerMetadataResponseSuccess = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0xAB, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0xCC, 0xDD} -) - -func TestConsumerMetadataResponseError(t *testing.T) { - response := ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress} - testResponse(t, "error", &response, consumerMetadataResponseError) -} - -func TestConsumerMetadataResponseSuccess(t *testing.T) { - broker := NewBroker("foo:52445") - broker.id = 0xAB - response := ConsumerMetadataResponse{ - Coordinator: broker, - CoordinatorID: 0xAB, - CoordinatorHost: "foo", - CoordinatorPort: 0xCCDD, - Err: ErrNoError, - } - testResponse(t, "success", &response, consumerMetadataResponseSuccess) -} diff --git a/vendor/github.com/Shopify/sarama/consumer_test.go b/vendor/github.com/Shopify/sarama/consumer_test.go deleted file mode 100644 index 387ede314..000000000 --- a/vendor/github.com/Shopify/sarama/consumer_test.go +++ /dev/null @@ -1,854 +0,0 @@ -package sarama - -import ( - "log" - "os" - "os/signal" - "sync" - "testing" - "time" -) - -var testMsg = StringEncoder("Foo") - -// If a particular offset is provided then messages are consumed starting from -// that offset. -func TestConsumerOffsetManual(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - - mockFetchResponse := NewMockFetchResponse(t, 1) - for i := 0; i < 10; i++ { - mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg) - } - - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 2345), - "FetchRequest": mockFetchResponse, - }) - - // When - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - consumer, err := master.ConsumePartition("my_topic", 0, 1234) - if err != nil { - t.Fatal(err) - } - - // Then: messages starting from offset 1234 are consumed. - for i := 0; i < 10; i++ { - select { - case message := <-consumer.Messages(): - assertMessageOffset(t, message, int64(i+1234)) - case err := <-consumer.Errors(): - t.Error(err) - } - } - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// If `OffsetNewest` is passed as the initial offset then the first consumed -// message is indeed corresponds to the offset that broker claims to be the -// newest in its metadata response. -func TestConsumerOffsetNewest(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 10). - SetOffset("my_topic", 0, OffsetOldest, 7), - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 9, testMsg). - SetMessage("my_topic", 0, 10, testMsg). - SetMessage("my_topic", 0, 11, testMsg). - SetHighWaterMark("my_topic", 0, 14), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest) - if err != nil { - t.Fatal(err) - } - - // Then - assertMessageOffset(t, <-consumer.Messages(), 10) - if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 { - t.Errorf("Expected high water mark offset 14, found %d", hwmo) - } - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// It is possible to close a partition consumer and create the same anew. -func TestConsumerRecreate(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 10, testMsg), - }) - - c, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - pc, err := c.ConsumePartition("my_topic", 0, 10) - if err != nil { - t.Fatal(err) - } - assertMessageOffset(t, <-pc.Messages(), 10) - - // When - safeClose(t, pc) - pc, err = c.ConsumePartition("my_topic", 0, 10) - if err != nil { - t.Fatal(err) - } - - // Then - assertMessageOffset(t, <-pc.Messages(), 10) - - safeClose(t, pc) - safeClose(t, c) - broker0.Close() -} - -// An attempt to consume the same partition twice should fail. -func TestConsumerDuplicate(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": NewMockFetchResponse(t, 1), - }) - - config := NewConfig() - config.ChannelBufferSize = 0 - c, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - pc1, err := c.ConsumePartition("my_topic", 0, 0) - if err != nil { - t.Fatal(err) - } - - // When - pc2, err := c.ConsumePartition("my_topic", 0, 0) - - // Then - if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") { - t.Fatal("A partition cannot be consumed twice at the same time") - } - - safeClose(t, pc1) - safeClose(t, c) - broker0.Close() -} - -// If consumer fails to refresh metadata it keeps retrying with frequency -// specified by `Config.Consumer.Retry.Backoff`. -func TestConsumerLeaderRefreshError(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 100) - - // Stage 1: my_topic/0 served by broker0 - Logger.Printf(" STAGE 1") - - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 123). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 123, testMsg), - }) - - config := NewConfig() - config.Net.ReadTimeout = 100 * time.Millisecond - config.Consumer.Retry.Backoff = 200 * time.Millisecond - config.Consumer.Return.Errors = true - config.Metadata.Retry.Max = 0 - c, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) - if err != nil { - t.Fatal(err) - } - - assertMessageOffset(t, <-pc.Messages(), 123) - - // Stage 2: broker0 says that it is no longer the leader for my_topic/0, - // but the requests to retrieve metadata fail with network timeout. - Logger.Printf(" STAGE 2") - - fetchResponse2 := &FetchResponse{} - fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) - - broker0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": NewMockWrapper(fetchResponse2), - }) - - if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers { - t.Errorf("Unexpected error: %v", consErr.Err) - } - - // Stage 3: finally the metadata returned by broker0 tells that broker1 is - // a new leader for my_topic/0. Consumption resumes. - - Logger.Printf(" STAGE 3") - - broker1 := NewMockBroker(t, 101) - - broker1.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 124, testMsg), - }) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetBroker(broker1.Addr(), broker1.BrokerID()). - SetLeader("my_topic", 0, broker1.BrokerID()), - }) - - assertMessageOffset(t, <-pc.Messages(), 124) - - safeClose(t, pc) - safeClose(t, c) - broker1.Close() - broker0.Close() -} - -func TestConsumerInvalidTopic(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 100) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()), - }) - - c, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) - - // Then - if pc != nil || err != ErrUnknownTopicOrPartition { - t.Errorf("Should fail with, err=%v", err) - } - - safeClose(t, c) - broker0.Close() -} - -// Nothing bad happens if a partition consumer that has no leader assigned at -// the moment is closed. -func TestConsumerClosePartitionWithoutLeader(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 100) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 123). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 123, testMsg), - }) - - config := NewConfig() - config.Net.ReadTimeout = 100 * time.Millisecond - config.Consumer.Retry.Backoff = 100 * time.Millisecond - config.Consumer.Return.Errors = true - config.Metadata.Retry.Max = 0 - c, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) - if err != nil { - t.Fatal(err) - } - - assertMessageOffset(t, <-pc.Messages(), 123) - - // broker0 says that it is no longer the leader for my_topic/0, but the - // requests to retrieve metadata fail with network timeout. - fetchResponse2 := &FetchResponse{} - fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) - - broker0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": NewMockWrapper(fetchResponse2), - }) - - // When - if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers { - t.Errorf("Unexpected error: %v", consErr.Err) - } - - // Then: the partition consumer can be closed without any problem. - safeClose(t, pc) - safeClose(t, c) - broker0.Close() -} - -// If the initial offset passed on partition consumer creation is out of the -// actual offset range for the partition, then the partition consumer stops -// immediately closing its output channels. -func TestConsumerShutsDownOutOfRange(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - fetchResponse := new(FetchResponse) - fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 7), - "FetchRequest": NewMockWrapper(fetchResponse), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 101) - if err != nil { - t.Fatal(err) - } - - // Then: consumer should shut down closing its messages and errors channels. - if _, ok := <-consumer.Messages(); ok { - t.Error("Expected the consumer to shut down") - } - safeClose(t, consumer) - - safeClose(t, master) - broker0.Close() -} - -// If a fetch response contains messages with offsets that are smaller then -// requested, then such messages are ignored. -func TestConsumerExtraOffsets(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - fetchResponse1 := &FetchResponse{} - fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1) - fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2) - fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 3) - fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 4) - fetchResponse2 := &FetchResponse{} - fetchResponse2.AddError("my_topic", 0, ErrNoError) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 0), - "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 3) - if err != nil { - t.Fatal(err) - } - - // Then: messages with offsets 1 and 2 are not returned even though they - // are present in the response. - assertMessageOffset(t, <-consumer.Messages(), 3) - assertMessageOffset(t, <-consumer.Messages(), 4) - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// It is fine if offsets of fetched messages are not sequential (although -// strictly increasing!). -func TestConsumerNonSequentialOffsets(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - fetchResponse1 := &FetchResponse{} - fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 5) - fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 7) - fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 11) - fetchResponse2 := &FetchResponse{} - fetchResponse2.AddError("my_topic", 0, ErrNoError) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 0), - "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 3) - if err != nil { - t.Fatal(err) - } - - // Then: messages with offsets 1 and 2 are not returned even though they - // are present in the response. - assertMessageOffset(t, <-consumer.Messages(), 5) - assertMessageOffset(t, <-consumer.Messages(), 7) - assertMessageOffset(t, <-consumer.Messages(), 11) - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// If leadership for a partition is changing then consumer resolves the new -// leader and switches to it. -func TestConsumerRebalancingMultiplePartitions(t *testing.T) { - // initial setup - seedBroker := NewMockBroker(t, 10) - leader0 := NewMockBroker(t, 0) - leader1 := NewMockBroker(t, 1) - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(leader0.Addr(), leader0.BrokerID()). - SetBroker(leader1.Addr(), leader1.BrokerID()). - SetLeader("my_topic", 0, leader0.BrokerID()). - SetLeader("my_topic", 1, leader1.BrokerID()), - }) - - mockOffsetResponse1 := NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 1000). - SetOffset("my_topic", 1, OffsetOldest, 0). - SetOffset("my_topic", 1, OffsetNewest, 1000) - leader0.SetHandlerByMap(map[string]MockResponse{ - "OffsetRequest": mockOffsetResponse1, - "FetchRequest": NewMockFetchResponse(t, 1), - }) - leader1.SetHandlerByMap(map[string]MockResponse{ - "OffsetRequest": mockOffsetResponse1, - "FetchRequest": NewMockFetchResponse(t, 1), - }) - - // launch test goroutines - config := NewConfig() - config.Consumer.Retry.Backoff = 50 - master, err := NewConsumer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - // we expect to end up (eventually) consuming exactly ten messages on each partition - var wg sync.WaitGroup - for i := int32(0); i < 2; i++ { - consumer, err := master.ConsumePartition("my_topic", i, 0) - if err != nil { - t.Error(err) - } - - go func(c PartitionConsumer) { - for err := range c.Errors() { - t.Error(err) - } - }(consumer) - - wg.Add(1) - go func(partition int32, c PartitionConsumer) { - for i := 0; i < 10; i++ { - message := <-consumer.Messages() - if message.Offset != int64(i) { - t.Error("Incorrect message offset!", i, partition, message.Offset) - } - if message.Partition != partition { - t.Error("Incorrect message partition!") - } - } - safeClose(t, consumer) - wg.Done() - }(i, consumer) - } - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 1") - // Stage 1: - // * my_topic/0 -> leader0 serves 4 messages - // * my_topic/1 -> leader1 serves 0 messages - - mockFetchResponse := NewMockFetchResponse(t, 1) - for i := 0; i < 4; i++ { - mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg) - } - leader0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse, - }) - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 2") - // Stage 2: - // * leader0 says that it is no longer serving my_topic/0 - // * seedBroker tells that leader1 is serving my_topic/0 now - - // seed broker tells that the new partition 0 leader is leader1 - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetLeader("my_topic", 0, leader1.BrokerID()). - SetLeader("my_topic", 1, leader1.BrokerID()), - }) - - // leader0 says no longer leader of partition 0 - fetchResponse := new(FetchResponse) - fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition) - leader0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": NewMockWrapper(fetchResponse), - }) - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 3") - // Stage 3: - // * my_topic/0 -> leader1 serves 3 messages - // * my_topic/1 -> leader1 server 8 messages - - // leader1 provides 3 message on partition 0, and 8 messages on partition 1 - mockFetchResponse2 := NewMockFetchResponse(t, 2) - for i := 4; i < 7; i++ { - mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg) - } - for i := 0; i < 8; i++ { - mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg) - } - leader1.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse2, - }) - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 4") - // Stage 4: - // * my_topic/0 -> leader1 serves 3 messages - // * my_topic/1 -> leader1 tells that it is no longer the leader - // * seedBroker tells that leader0 is a new leader for my_topic/1 - - // metadata assigns 0 to leader1 and 1 to leader0 - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetLeader("my_topic", 0, leader1.BrokerID()). - SetLeader("my_topic", 1, leader0.BrokerID()), - }) - - // leader1 provides three more messages on partition0, says no longer leader of partition1 - mockFetchResponse3 := NewMockFetchResponse(t, 3). - SetMessage("my_topic", 0, int64(7), testMsg). - SetMessage("my_topic", 0, int64(8), testMsg). - SetMessage("my_topic", 0, int64(9), testMsg) - fetchResponse4 := new(FetchResponse) - fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition) - leader1.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4), - }) - - // leader0 provides two messages on partition 1 - mockFetchResponse4 := NewMockFetchResponse(t, 2) - for i := 8; i < 10; i++ { - mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg) - } - leader0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse4, - }) - - wg.Wait() - safeClose(t, master) - leader1.Close() - leader0.Close() - seedBroker.Close() -} - -// When two partitions have the same broker as the leader, if one partition -// consumer channel buffer is full then that does not affect the ability to -// read messages by the other consumer. -func TestConsumerInterleavedClose(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()). - SetLeader("my_topic", 1, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 1000). - SetOffset("my_topic", 0, OffsetNewest, 1100). - SetOffset("my_topic", 1, OffsetOldest, 2000). - SetOffset("my_topic", 1, OffsetNewest, 2100), - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 1000, testMsg). - SetMessage("my_topic", 0, 1001, testMsg). - SetMessage("my_topic", 0, 1002, testMsg). - SetMessage("my_topic", 1, 2000, testMsg), - }) - - config := NewConfig() - config.ChannelBufferSize = 0 - master, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - c0, err := master.ConsumePartition("my_topic", 0, 1000) - if err != nil { - t.Fatal(err) - } - - c1, err := master.ConsumePartition("my_topic", 1, 2000) - if err != nil { - t.Fatal(err) - } - - // When/Then: we can read from partition 0 even if nobody reads from partition 1 - assertMessageOffset(t, <-c0.Messages(), 1000) - assertMessageOffset(t, <-c0.Messages(), 1001) - assertMessageOffset(t, <-c0.Messages(), 1002) - - safeClose(t, c1) - safeClose(t, c0) - safeClose(t, master) - broker0.Close() -} - -func TestConsumerBounceWithReferenceOpen(t *testing.T) { - broker0 := NewMockBroker(t, 0) - broker0Addr := broker0.Addr() - broker1 := NewMockBroker(t, 1) - - mockMetadataResponse := NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetBroker(broker1.Addr(), broker1.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()). - SetLeader("my_topic", 1, broker1.BrokerID()) - - mockOffsetResponse := NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 1000). - SetOffset("my_topic", 0, OffsetNewest, 1100). - SetOffset("my_topic", 1, OffsetOldest, 2000). - SetOffset("my_topic", 1, OffsetNewest, 2100) - - mockFetchResponse := NewMockFetchResponse(t, 1) - for i := 0; i < 10; i++ { - mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg) - mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg) - } - - broker0.SetHandlerByMap(map[string]MockResponse{ - "OffsetRequest": mockOffsetResponse, - "FetchRequest": mockFetchResponse, - }) - broker1.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": mockMetadataResponse, - "OffsetRequest": mockOffsetResponse, - "FetchRequest": mockFetchResponse, - }) - - config := NewConfig() - config.Consumer.Return.Errors = true - config.Consumer.Retry.Backoff = 100 * time.Millisecond - config.ChannelBufferSize = 1 - master, err := NewConsumer([]string{broker1.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - c0, err := master.ConsumePartition("my_topic", 0, 1000) - if err != nil { - t.Fatal(err) - } - - c1, err := master.ConsumePartition("my_topic", 1, 2000) - if err != nil { - t.Fatal(err) - } - - // read messages from both partition to make sure that both brokers operate - // normally. - assertMessageOffset(t, <-c0.Messages(), 1000) - assertMessageOffset(t, <-c1.Messages(), 2000) - - // Simulate broker shutdown. Note that metadata response does not change, - // that is the leadership does not move to another broker. So partition - // consumer will keep retrying to restore the connection with the broker. - broker0.Close() - - // Make sure that while the partition/0 leader is down, consumer/partition/1 - // is capable of pulling messages from broker1. - for i := 1; i < 7; i++ { - offset := (<-c1.Messages()).Offset - if offset != int64(2000+i) { - t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i)) - } - } - - // Bring broker0 back to service. - broker0 = NewMockBrokerAddr(t, 0, broker0Addr) - broker0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse, - }) - - // Read the rest of messages from both partitions. - for i := 7; i < 10; i++ { - assertMessageOffset(t, <-c1.Messages(), int64(2000+i)) - } - for i := 1; i < 10; i++ { - assertMessageOffset(t, <-c0.Messages(), int64(1000+i)) - } - - select { - case <-c0.Errors(): - default: - t.Errorf("Partition consumer should have detected broker restart") - } - - safeClose(t, c1) - safeClose(t, c0) - safeClose(t, master) - broker0.Close() - broker1.Close() -} - -func TestConsumerOffsetOutOfRange(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 2) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 2345), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When/Then - if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange { - t.Fatal("Should return ErrOffsetOutOfRange, got:", err) - } - if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange { - t.Fatal("Should return ErrOffsetOutOfRange, got:", err) - } - if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange { - t.Fatal("Should return ErrOffsetOutOfRange, got:", err) - } - - safeClose(t, master) - broker0.Close() -} - -func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) { - if msg.Offset != expectedOffset { - t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset) - } -} - -// This example shows how to use the consumer to read messages -// from a single partition. -func ExampleConsumer() { - consumer, err := NewConsumer([]string{"localhost:9092"}, nil) - if err != nil { - panic(err) - } - - defer func() { - if err := consumer.Close(); err != nil { - log.Fatalln(err) - } - }() - - partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest) - if err != nil { - panic(err) - } - - defer func() { - if err := partitionConsumer.Close(); err != nil { - log.Fatalln(err) - } - }() - - // Trap SIGINT to trigger a shutdown. - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - - consumed := 0 -ConsumerLoop: - for { - select { - case msg := <-partitionConsumer.Messages(): - log.Printf("Consumed message offset %d\n", msg.Offset) - consumed++ - case <-signals: - break ConsumerLoop - } - } - - log.Printf("Consumed: %d\n", consumed) -} diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go deleted file mode 100644 index 5c2860790..000000000 --- a/vendor/github.com/Shopify/sarama/crc32_field.go +++ /dev/null @@ -1,36 +0,0 @@ -package sarama - -import ( - "encoding/binary" - - "github.com/klauspost/crc32" -) - -// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. -type crc32Field struct { - startOffset int -} - -func (c *crc32Field) saveOffset(in int) { - c.startOffset = in -} - -func (c *crc32Field) reserveLength() int { - return 4 -} - -func (c *crc32Field) run(curOffset int, buf []byte) error { - crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) - binary.BigEndian.PutUint32(buf[c.startOffset:], crc) - return nil -} - -func (c *crc32Field) check(curOffset int, buf []byte) error { - crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) - - if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) { - return PacketDecodingError{"CRC didn't match"} - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go deleted file mode 100644 index 1fb356777..000000000 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ /dev/null @@ -1,30 +0,0 @@ -package sarama - -type DescribeGroupsRequest struct { - Groups []string -} - -func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { - return pe.putStringArray(r.Groups) -} - -func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { - r.Groups, err = pd.getStringArray() - return -} - -func (r *DescribeGroupsRequest) key() int16 { - return 15 -} - -func (r *DescribeGroupsRequest) version() int16 { - return 0 -} - -func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} - -func (r *DescribeGroupsRequest) AddGroup(group string) { - r.Groups = append(r.Groups, group) -} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request_test.go b/vendor/github.com/Shopify/sarama/describe_groups_request_test.go deleted file mode 100644 index 7d45f3fee..000000000 --- a/vendor/github.com/Shopify/sarama/describe_groups_request_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyDescribeGroupsRequest = []byte{0, 0, 0, 0} - - singleDescribeGroupsRequest = []byte{ - 0, 0, 0, 1, // 1 group - 0, 3, 'f', 'o', 'o', // group name: foo - } - - doubleDescribeGroupsRequest = []byte{ - 0, 0, 0, 2, // 2 groups - 0, 3, 'f', 'o', 'o', // group name: foo - 0, 3, 'b', 'a', 'r', // group name: foo - } -) - -func TestDescribeGroupsRequest(t *testing.T) { - var request *DescribeGroupsRequest - - request = new(DescribeGroupsRequest) - testRequest(t, "no groups", request, emptyDescribeGroupsRequest) - - request = new(DescribeGroupsRequest) - request.AddGroup("foo") - testRequest(t, "one group", request, singleDescribeGroupsRequest) - - request = new(DescribeGroupsRequest) - request.AddGroup("foo") - request.AddGroup("bar") - testRequest(t, "two groups", request, doubleDescribeGroupsRequest) -} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go deleted file mode 100644 index e78b8ce02..000000000 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ /dev/null @@ -1,174 +0,0 @@ -package sarama - -type DescribeGroupsResponse struct { - Groups []*GroupDescription -} - -func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(r.Groups)); err != nil { - return err - } - - for _, groupDescription := range r.Groups { - if err := groupDescription.encode(pe); err != nil { - return err - } - } - - return nil -} - -func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { - n, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Groups = make([]*GroupDescription, n) - for i := 0; i < n; i++ { - r.Groups[i] = new(GroupDescription) - if err := r.Groups[i].decode(pd); err != nil { - return err - } - } - - return nil -} - -func (r *DescribeGroupsResponse) key() int16 { - return 15 -} - -func (r *DescribeGroupsResponse) version() int16 { - return 0 -} - -func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} - -type GroupDescription struct { - Err KError - GroupId string - State string - ProtocolType string - Protocol string - Members map[string]*GroupMemberDescription -} - -func (gd *GroupDescription) encode(pe packetEncoder) error { - pe.putInt16(int16(gd.Err)) - - if err := pe.putString(gd.GroupId); err != nil { - return err - } - if err := pe.putString(gd.State); err != nil { - return err - } - if err := pe.putString(gd.ProtocolType); err != nil { - return err - } - if err := pe.putString(gd.Protocol); err != nil { - return err - } - - if err := pe.putArrayLength(len(gd.Members)); err != nil { - return err - } - - for memberId, groupMemberDescription := range gd.Members { - if err := pe.putString(memberId); err != nil { - return err - } - if err := groupMemberDescription.encode(pe); err != nil { - return err - } - } - - return nil -} - -func (gd *GroupDescription) decode(pd packetDecoder) (err error) { - if kerr, err := pd.getInt16(); err != nil { - return err - } else { - gd.Err = KError(kerr) - } - - if gd.GroupId, err = pd.getString(); err != nil { - return - } - if gd.State, err = pd.getString(); err != nil { - return - } - if gd.ProtocolType, err = pd.getString(); err != nil { - return - } - if gd.Protocol, err = pd.getString(); err != nil { - return - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - gd.Members = make(map[string]*GroupMemberDescription) - for i := 0; i < n; i++ { - memberId, err := pd.getString() - if err != nil { - return err - } - - gd.Members[memberId] = new(GroupMemberDescription) - if err := gd.Members[memberId].decode(pd); err != nil { - return err - } - } - - return nil -} - -type GroupMemberDescription struct { - ClientId string - ClientHost string - MemberMetadata []byte - MemberAssignment []byte -} - -func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { - if err := pe.putString(gmd.ClientId); err != nil { - return err - } - if err := pe.putString(gmd.ClientHost); err != nil { - return err - } - if err := pe.putBytes(gmd.MemberMetadata); err != nil { - return err - } - if err := pe.putBytes(gmd.MemberAssignment); err != nil { - return err - } - - return nil -} - -func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { - if gmd.ClientId, err = pd.getString(); err != nil { - return - } - if gmd.ClientHost, err = pd.getString(); err != nil { - return - } - if gmd.MemberMetadata, err = pd.getBytes(); err != nil { - return - } - if gmd.MemberAssignment, err = pd.getBytes(); err != nil { - return - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response_test.go b/vendor/github.com/Shopify/sarama/describe_groups_response_test.go deleted file mode 100644 index dd3973191..000000000 --- a/vendor/github.com/Shopify/sarama/describe_groups_response_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package sarama - -import ( - "reflect" - "testing" -) - -var ( - describeGroupsResponseEmpty = []byte{ - 0, 0, 0, 0, // no groups - } - - describeGroupsResponsePopulated = []byte{ - 0, 0, 0, 2, // 2 groups - - 0, 0, // no error - 0, 3, 'f', 'o', 'o', // Group ID - 0, 3, 'b', 'a', 'r', // State - 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // ConsumerProtocol type - 0, 3, 'b', 'a', 'z', // Protocol name - 0, 0, 0, 1, // 1 member - 0, 2, 'i', 'd', // Member ID - 0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Client ID - 0, 9, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', // Client Host - 0, 0, 0, 3, 0x01, 0x02, 0x03, // MemberMetadata - 0, 0, 0, 3, 0x04, 0x05, 0x06, // MemberAssignment - - 0, 30, // ErrGroupAuthorizationFailed - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, 0, 0, - } -) - -func TestDescribeGroupsResponse(t *testing.T) { - var response *DescribeGroupsResponse - - response = new(DescribeGroupsResponse) - testVersionDecodable(t, "empty", response, describeGroupsResponseEmpty, 0) - if len(response.Groups) != 0 { - t.Error("Expected no groups") - } - - response = new(DescribeGroupsResponse) - testVersionDecodable(t, "populated", response, describeGroupsResponsePopulated, 0) - if len(response.Groups) != 2 { - t.Error("Expected two groups") - } - - group0 := response.Groups[0] - if group0.Err != ErrNoError { - t.Error("Unxpected groups[0].Err, found", group0.Err) - } - if group0.GroupId != "foo" { - t.Error("Unxpected groups[0].GroupId, found", group0.GroupId) - } - if group0.State != "bar" { - t.Error("Unxpected groups[0].State, found", group0.State) - } - if group0.ProtocolType != "consumer" { - t.Error("Unxpected groups[0].ProtocolType, found", group0.ProtocolType) - } - if group0.Protocol != "baz" { - t.Error("Unxpected groups[0].Protocol, found", group0.Protocol) - } - if len(group0.Members) != 1 { - t.Error("Unxpected groups[0].Members, found", group0.Members) - } - if group0.Members["id"].ClientId != "sarama" { - t.Error("Unxpected groups[0].Members[id].ClientId, found", group0.Members["id"].ClientId) - } - if group0.Members["id"].ClientHost != "localhost" { - t.Error("Unxpected groups[0].Members[id].ClientHost, found", group0.Members["id"].ClientHost) - } - if !reflect.DeepEqual(group0.Members["id"].MemberMetadata, []byte{0x01, 0x02, 0x03}) { - t.Error("Unxpected groups[0].Members[id].MemberMetadata, found", group0.Members["id"].MemberMetadata) - } - if !reflect.DeepEqual(group0.Members["id"].MemberAssignment, []byte{0x04, 0x05, 0x06}) { - t.Error("Unxpected groups[0].Members[id].MemberAssignment, found", group0.Members["id"].MemberAssignment) - } - - group1 := response.Groups[1] - if group1.Err != ErrGroupAuthorizationFailed { - t.Error("Unxpected groups[1].Err, found", group0.Err) - } - if len(group1.Members) != 0 { - t.Error("Unxpected groups[1].Members, found", group0.Members) - } -} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml deleted file mode 100644 index 61ab5e5f0..000000000 --- a/vendor/github.com/Shopify/sarama/dev.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: sarama - -up: - - go: 1.6.2 - -commands: - test: - run: make test - desc: 'run unit tests' - -packages: - - git@github.com:Shopify/dev-shopify.git - diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go deleted file mode 100644 index 35a24c2d9..000000000 --- a/vendor/github.com/Shopify/sarama/encoder_decoder.go +++ /dev/null @@ -1,84 +0,0 @@ -package sarama - -import "fmt" - -// Encoder is the interface that wraps the basic Encode method. -// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. -type encoder interface { - encode(pe packetEncoder) error -} - -// Encode takes an Encoder and turns it into bytes. -func encode(e encoder) ([]byte, error) { - if e == nil { - return nil, nil - } - - var prepEnc prepEncoder - var realEnc realEncoder - - err := e.encode(&prepEnc) - if err != nil { - return nil, err - } - - if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { - return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} - } - - realEnc.raw = make([]byte, prepEnc.length) - err = e.encode(&realEnc) - if err != nil { - return nil, err - } - - return realEnc.raw, nil -} - -// Decoder is the interface that wraps the basic Decode method. -// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. -type decoder interface { - decode(pd packetDecoder) error -} - -type versionedDecoder interface { - decode(pd packetDecoder, version int16) error -} - -// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, -// interpreted using Kafka's encoding rules. -func decode(buf []byte, in decoder) error { - if buf == nil { - return nil - } - - helper := realDecoder{raw: buf} - err := in.decode(&helper) - if err != nil { - return err - } - - if helper.off != len(buf) { - return PacketDecodingError{"invalid length"} - } - - return nil -} - -func versionedDecode(buf []byte, in versionedDecoder, version int16) error { - if buf == nil { - return nil - } - - helper := realDecoder{raw: buf} - err := in.decode(&helper, version) - if err != nil { - return err - } - - if helper.off != len(buf) { - return PacketDecodingError{"invalid length"} - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go deleted file mode 100644 index cfb7006f7..000000000 --- a/vendor/github.com/Shopify/sarama/errors.go +++ /dev/null @@ -1,194 +0,0 @@ -package sarama - -import ( - "errors" - "fmt" -) - -// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored -// or otherwise failed to respond. -var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") - -// ErrClosedClient is the error returned when a method is called on a client that has been closed. -var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") - -// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does -// not contain the expected information. -var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") - -// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index -// (meaning one outside of the range [0...numPartitions-1]). -var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") - -// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. -var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") - -// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. -var ErrNotConnected = errors.New("kafka: broker not connected") - -// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected -// when requesting messages, since as an optimization the server is allowed to return a partial message at the end -// of the message set. -var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") - -// ErrShuttingDown is returned when a producer receives a message during shutdown. -var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") - -// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max -var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") - -// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, -// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. -type PacketEncodingError struct { - Info string -} - -func (err PacketEncodingError) Error() string { - return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) -} - -// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. -// This can be a bad CRC or length field, or any other invalid value. -type PacketDecodingError struct { - Info string -} - -func (err PacketDecodingError) Error() string { - return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) -} - -// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) -// when the specified configuration is invalid. -type ConfigurationError string - -func (err ConfigurationError) Error() string { - return "kafka: invalid configuration (" + string(err) + ")" -} - -// KError is the type of error that can be returned directly by the Kafka broker. -// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes -type KError int16 - -// Numeric error codes returned by the Kafka server. -const ( - ErrNoError KError = 0 - ErrUnknown KError = -1 - ErrOffsetOutOfRange KError = 1 - ErrInvalidMessage KError = 2 - ErrUnknownTopicOrPartition KError = 3 - ErrInvalidMessageSize KError = 4 - ErrLeaderNotAvailable KError = 5 - ErrNotLeaderForPartition KError = 6 - ErrRequestTimedOut KError = 7 - ErrBrokerNotAvailable KError = 8 - ErrReplicaNotAvailable KError = 9 - ErrMessageSizeTooLarge KError = 10 - ErrStaleControllerEpochCode KError = 11 - ErrOffsetMetadataTooLarge KError = 12 - ErrNetworkException KError = 13 - ErrOffsetsLoadInProgress KError = 14 - ErrConsumerCoordinatorNotAvailable KError = 15 - ErrNotCoordinatorForConsumer KError = 16 - ErrInvalidTopic KError = 17 - ErrMessageSetSizeTooLarge KError = 18 - ErrNotEnoughReplicas KError = 19 - ErrNotEnoughReplicasAfterAppend KError = 20 - ErrInvalidRequiredAcks KError = 21 - ErrIllegalGeneration KError = 22 - ErrInconsistentGroupProtocol KError = 23 - ErrInvalidGroupId KError = 24 - ErrUnknownMemberId KError = 25 - ErrInvalidSessionTimeout KError = 26 - ErrRebalanceInProgress KError = 27 - ErrInvalidCommitOffsetSize KError = 28 - ErrTopicAuthorizationFailed KError = 29 - ErrGroupAuthorizationFailed KError = 30 - ErrClusterAuthorizationFailed KError = 31 - ErrInvalidTimestamp KError = 32 - ErrUnsupportedSASLMechanism KError = 33 - ErrIllegalSASLState KError = 34 - ErrUnsupportedVersion KError = 35 -) - -func (err KError) Error() string { - // Error messages stolen/adapted from - // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol - switch err { - case ErrNoError: - return "kafka server: Not an error, why are you printing me?" - case ErrUnknown: - return "kafka server: Unexpected (unknown?) server error." - case ErrOffsetOutOfRange: - return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." - case ErrInvalidMessage: - return "kafka server: Message contents does not match its CRC." - case ErrUnknownTopicOrPartition: - return "kafka server: Request was for a topic or partition that does not exist on this broker." - case ErrInvalidMessageSize: - return "kafka server: The message has a negative size." - case ErrLeaderNotAvailable: - return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." - case ErrNotLeaderForPartition: - return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." - case ErrRequestTimedOut: - return "kafka server: Request exceeded the user-specified time limit in the request." - case ErrBrokerNotAvailable: - return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" - case ErrReplicaNotAvailable: - return "kafka server: Replica infomation not available, one or more brokers are down." - case ErrMessageSizeTooLarge: - return "kafka server: Message was too large, server rejected it to avoid allocation error." - case ErrStaleControllerEpochCode: - return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." - case ErrOffsetMetadataTooLarge: - return "kafka server: Specified a string larger than the configured maximum for offset metadata." - case ErrNetworkException: - return "kafka server: The server disconnected before a response was received." - case ErrOffsetsLoadInProgress: - return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." - case ErrConsumerCoordinatorNotAvailable: - return "kafka server: Offset's topic has not yet been created." - case ErrNotCoordinatorForConsumer: - return "kafka server: Request was for a consumer group that is not coordinated by this broker." - case ErrInvalidTopic: - return "kafka server: The request attempted to perform an operation on an invalid topic." - case ErrMessageSetSizeTooLarge: - return "kafka server: The request included message batch larger than the configured segment size on the server." - case ErrNotEnoughReplicas: - return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." - case ErrNotEnoughReplicasAfterAppend: - return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." - case ErrInvalidRequiredAcks: - return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)." - case ErrIllegalGeneration: - return "kafka server: The provided generation id is not the current generation." - case ErrInconsistentGroupProtocol: - return "kafka server: The provider group protocol type is incompatible with the other members." - case ErrInvalidGroupId: - return "kafka server: The provided group id was empty." - case ErrUnknownMemberId: - return "kafka server: The provided member is not known in the current generation." - case ErrInvalidSessionTimeout: - return "kafka server: The provided session timeout is outside the allowed range." - case ErrRebalanceInProgress: - return "kafka server: A rebalance for the group is in progress. Please re-join the group." - case ErrInvalidCommitOffsetSize: - return "kafka server: The provided commit metadata was too large." - case ErrTopicAuthorizationFailed: - return "kafka server: The client is not authorized to access this topic." - case ErrGroupAuthorizationFailed: - return "kafka server: The client is not authorized to access this group." - case ErrClusterAuthorizationFailed: - return "kafka server: The client is not authorized to send this request type." - case ErrInvalidTimestamp: - return "kafka server: The timestamp of the message is out of acceptable range." - case ErrUnsupportedSASLMechanism: - return "kafka server: The broker does not support the requested SASL mechanism." - case ErrIllegalSASLState: - return "kafka server: Request is not valid given the current SASL state." - case ErrUnsupportedVersion: - return "kafka server: The version of API is not supported." - } - - return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) -} diff --git a/vendor/github.com/Shopify/sarama/examples/README.md b/vendor/github.com/Shopify/sarama/examples/README.md deleted file mode 100644 index b6588051e..000000000 --- a/vendor/github.com/Shopify/sarama/examples/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Sarama examples - -This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarams's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama) - -In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version. - -#### HTTP server - -[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both. diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore b/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore deleted file mode 100644 index 9f6ed425f..000000000 --- a/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -http_server -http_server.test diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/README.md b/vendor/github.com/Shopify/sarama/examples/http_server/README.md deleted file mode 100644 index 5ff2bc253..000000000 --- a/vendor/github.com/Shopify/sarama/examples/http_server/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# HTTP server example - -This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background. - -If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background. - -One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together. diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go b/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go deleted file mode 100644 index 03e47b6b2..000000000 --- a/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go +++ /dev/null @@ -1,246 +0,0 @@ -package main - -import ( - "github.com/Shopify/sarama" - - "crypto/tls" - "crypto/x509" - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "log" - "net/http" - "os" - "strings" - "time" -) - -var ( - addr = flag.String("addr", ":8080", "The address to bind to") - brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list") - verbose = flag.Bool("verbose", false, "Turn on Sarama logging") - certFile = flag.String("certificate", "", "The optional certificate file for client authentication") - keyFile = flag.String("key", "", "The optional key file for client authentication") - caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication") - verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain") -) - -func main() { - flag.Parse() - - if *verbose { - sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) - } - - if *brokers == "" { - flag.PrintDefaults() - os.Exit(1) - } - - brokerList := strings.Split(*brokers, ",") - log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", ")) - - server := &Server{ - DataCollector: newDataCollector(brokerList), - AccessLogProducer: newAccessLogProducer(brokerList), - } - defer func() { - if err := server.Close(); err != nil { - log.Println("Failed to close server", err) - } - }() - - log.Fatal(server.Run(*addr)) -} - -func createTlsConfiguration() (t *tls.Config) { - if *certFile != "" && *keyFile != "" && *caFile != "" { - cert, err := tls.LoadX509KeyPair(*certFile, *keyFile) - if err != nil { - log.Fatal(err) - } - - caCert, err := ioutil.ReadFile(*caFile) - if err != nil { - log.Fatal(err) - } - - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - - t = &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caCertPool, - InsecureSkipVerify: *verifySsl, - } - } - // will be nil by default if nothing is provided - return t -} - -type Server struct { - DataCollector sarama.SyncProducer - AccessLogProducer sarama.AsyncProducer -} - -func (s *Server) Close() error { - if err := s.DataCollector.Close(); err != nil { - log.Println("Failed to shut down data collector cleanly", err) - } - - if err := s.AccessLogProducer.Close(); err != nil { - log.Println("Failed to shut down access log producer cleanly", err) - } - - return nil -} - -func (s *Server) Handler() http.Handler { - return s.withAccessLog(s.collectQueryStringData()) -} - -func (s *Server) Run(addr string) error { - httpServer := &http.Server{ - Addr: addr, - Handler: s.Handler(), - } - - log.Printf("Listening for requests on %s...\n", addr) - return httpServer.ListenAndServe() -} - -func (s *Server) collectQueryStringData() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.NotFound(w, r) - return - } - - // We are not setting a message key, which means that all messages will - // be distributed randomly over the different partitions. - partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{ - Topic: "important", - Value: sarama.StringEncoder(r.URL.RawQuery), - }) - - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "Failed to store your data:, %s", err) - } else { - // The tuple (topic, partition, offset) can be used as a unique identifier - // for a message in a Kafka cluster. - fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset) - } - }) -} - -type accessLogEntry struct { - Method string `json:"method"` - Host string `json:"host"` - Path string `json:"path"` - IP string `json:"ip"` - ResponseTime float64 `json:"response_time"` - - encoded []byte - err error -} - -func (ale *accessLogEntry) ensureEncoded() { - if ale.encoded == nil && ale.err == nil { - ale.encoded, ale.err = json.Marshal(ale) - } -} - -func (ale *accessLogEntry) Length() int { - ale.ensureEncoded() - return len(ale.encoded) -} - -func (ale *accessLogEntry) Encode() ([]byte, error) { - ale.ensureEncoded() - return ale.encoded, ale.err -} - -func (s *Server) withAccessLog(next http.Handler) http.Handler { - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - started := time.Now() - - next.ServeHTTP(w, r) - - entry := &accessLogEntry{ - Method: r.Method, - Host: r.Host, - Path: r.RequestURI, - IP: r.RemoteAddr, - ResponseTime: float64(time.Since(started)) / float64(time.Second), - } - - // We will use the client's IP address as key. This will cause - // all the access log entries of the same IP address to end up - // on the same partition. - s.AccessLogProducer.Input() <- &sarama.ProducerMessage{ - Topic: "access_log", - Key: sarama.StringEncoder(r.RemoteAddr), - Value: entry, - } - }) -} - -func newDataCollector(brokerList []string) sarama.SyncProducer { - - // For the data collector, we are looking for strong consistency semantics. - // Because we don't change the flush settings, sarama will try to produce messages - // as fast as possible to keep latency low. - config := sarama.NewConfig() - config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message - config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message - tlsConfig := createTlsConfiguration() - if tlsConfig != nil { - config.Net.TLS.Config = tlsConfig - config.Net.TLS.Enable = true - } - - // On the broker side, you may want to change the following settings to get - // stronger consistency guarantees: - // - For your broker, set `unclean.leader.election.enable` to false - // - For the topic, you could increase `min.insync.replicas`. - - producer, err := sarama.NewSyncProducer(brokerList, config) - if err != nil { - log.Fatalln("Failed to start Sarama producer:", err) - } - - return producer -} - -func newAccessLogProducer(brokerList []string) sarama.AsyncProducer { - - // For the access log, we are looking for AP semantics, with high throughput. - // By creating batches of compressed messages, we reduce network I/O at a cost of more latency. - config := sarama.NewConfig() - tlsConfig := createTlsConfiguration() - if tlsConfig != nil { - config.Net.TLS.Enable = true - config.Net.TLS.Config = tlsConfig - } - config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack - config.Producer.Compression = sarama.CompressionSnappy // Compress messages - config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms - - producer, err := sarama.NewAsyncProducer(brokerList, config) - if err != nil { - log.Fatalln("Failed to start Sarama producer:", err) - } - - // We will just log to STDOUT if we're not able to produce messages. - // Note: messages will only be returned here after all retry attempts are exhausted. - go func() { - for err := range producer.Errors() { - log.Println("Failed to write access log entry:", err) - } - }() - - return producer -} diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go b/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go deleted file mode 100644 index 7b2451e28..000000000 --- a/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package main - -import ( - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/Shopify/sarama" - "github.com/Shopify/sarama/mocks" -) - -// In normal operation, we expect one access log entry, -// and one data collector entry. Let's assume both will succeed. -// We should return a HTTP 200 status. -func TestCollectSuccessfully(t *testing.T) { - dataCollectorMock := mocks.NewSyncProducer(t, nil) - dataCollectorMock.ExpectSendMessageAndSucceed() - - accessLogProducerMock := mocks.NewAsyncProducer(t, nil) - accessLogProducerMock.ExpectInputAndSucceed() - - // Now, use dependency injection to use the mocks. - s := &Server{ - DataCollector: dataCollectorMock, - AccessLogProducer: accessLogProducerMock, - } - - // The Server's Close call is important; it will call Close on - // the two mock producers, which will then validate whether all - // expectations are resolved. - defer safeClose(t, s) - - req, err := http.NewRequest("GET", "http://example.com/?data", nil) - if err != nil { - t.Fatal(err) - } - res := httptest.NewRecorder() - s.Handler().ServeHTTP(res, req) - - if res.Code != 200 { - t.Errorf("Expected HTTP status 200, found %d", res.Code) - } - - if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" { - t.Error("Unexpected response body", res.Body) - } -} - -// Now, let's see if we handle the case of not being able to produce -// to the data collector properly. In this case we should return a 500 status. -func TestCollectionFailure(t *testing.T) { - dataCollectorMock := mocks.NewSyncProducer(t, nil) - dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut) - - accessLogProducerMock := mocks.NewAsyncProducer(t, nil) - accessLogProducerMock.ExpectInputAndSucceed() - - s := &Server{ - DataCollector: dataCollectorMock, - AccessLogProducer: accessLogProducerMock, - } - defer safeClose(t, s) - - req, err := http.NewRequest("GET", "http://example.com/?data", nil) - if err != nil { - t.Fatal(err) - } - res := httptest.NewRecorder() - s.Handler().ServeHTTP(res, req) - - if res.Code != 500 { - t.Errorf("Expected HTTP status 500, found %d", res.Code) - } -} - -// We don't expect any data collector calls because the path is wrong, -// so we are not setting any expectations on the dataCollectorMock. It -// will still generate an access log entry though. -func TestWrongPath(t *testing.T) { - dataCollectorMock := mocks.NewSyncProducer(t, nil) - - accessLogProducerMock := mocks.NewAsyncProducer(t, nil) - accessLogProducerMock.ExpectInputAndSucceed() - - s := &Server{ - DataCollector: dataCollectorMock, - AccessLogProducer: accessLogProducerMock, - } - defer safeClose(t, s) - - req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil) - if err != nil { - t.Fatal(err) - } - res := httptest.NewRecorder() - - s.Handler().ServeHTTP(res, req) - - if res.Code != 404 { - t.Errorf("Expected HTTP status 404, found %d", res.Code) - } -} - -func safeClose(t *testing.T, o io.Closer) { - if err := o.Close(); err != nil { - t.Error(err) - } -} diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go deleted file mode 100644 index ae701a3f2..000000000 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ /dev/null @@ -1,136 +0,0 @@ -package sarama - -type fetchRequestBlock struct { - fetchOffset int64 - maxBytes int32 -} - -func (b *fetchRequestBlock) encode(pe packetEncoder) error { - pe.putInt64(b.fetchOffset) - pe.putInt32(b.maxBytes) - return nil -} - -func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { - if b.fetchOffset, err = pd.getInt64(); err != nil { - return err - } - if b.maxBytes, err = pd.getInt32(); err != nil { - return err - } - return nil -} - -type FetchRequest struct { - MaxWaitTime int32 - MinBytes int32 - Version int16 - blocks map[string]map[int32]*fetchRequestBlock -} - -func (r *FetchRequest) encode(pe packetEncoder) (err error) { - pe.putInt32(-1) // replica ID is always -1 for clients - pe.putInt32(r.MaxWaitTime) - pe.putInt32(r.MinBytes) - err = pe.putArrayLength(len(r.blocks)) - if err != nil { - return err - } - for topic, blocks := range r.blocks { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(blocks)) - if err != nil { - return err - } - for partition, block := range blocks { - pe.putInt32(partition) - err = block.encode(pe) - if err != nil { - return err - } - } - } - return nil -} - -func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - if _, err = pd.getInt32(); err != nil { - return err - } - if r.MaxWaitTime, err = pd.getInt32(); err != nil { - return err - } - if r.MinBytes, err = pd.getInt32(); err != nil { - return err - } - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - r.blocks = make(map[string]map[int32]*fetchRequestBlock) - for i := 0; i < topicCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - r.blocks[topic] = make(map[int32]*fetchRequestBlock) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - fetchBlock := &fetchRequestBlock{} - if err = fetchBlock.decode(pd); err != nil { - return nil - } - r.blocks[topic][partition] = fetchBlock - } - } - return nil -} - -func (r *FetchRequest) key() int16 { - return 1 -} - -func (r *FetchRequest) version() int16 { - return r.Version -} - -func (r *FetchRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - default: - return minVersion - } -} - -func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { - if r.blocks == nil { - r.blocks = make(map[string]map[int32]*fetchRequestBlock) - } - - if r.blocks[topic] == nil { - r.blocks[topic] = make(map[int32]*fetchRequestBlock) - } - - tmp := new(fetchRequestBlock) - tmp.maxBytes = maxBytes - tmp.fetchOffset = fetchOffset - - r.blocks[topic][partitionID] = tmp -} diff --git a/vendor/github.com/Shopify/sarama/fetch_request_test.go b/vendor/github.com/Shopify/sarama/fetch_request_test.go deleted file mode 100644 index 32c083c7d..000000000 --- a/vendor/github.com/Shopify/sarama/fetch_request_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package sarama - -import "testing" - -var ( - fetchRequestNoBlocks = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - fetchRequestWithProperties = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF, - 0x00, 0x00, 0x00, 0x00} - - fetchRequestOneBlock = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56} -) - -func TestFetchRequest(t *testing.T) { - request := new(FetchRequest) - testRequest(t, "no blocks", request, fetchRequestNoBlocks) - - request.MaxWaitTime = 0x20 - request.MinBytes = 0xEF - testRequest(t, "with properties", request, fetchRequestWithProperties) - - request.MaxWaitTime = 0 - request.MinBytes = 0 - request.AddBlock("topic", 0x12, 0x34, 0x56) - testRequest(t, "one block", request, fetchRequestOneBlock) -} diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go deleted file mode 100644 index b56b166c2..000000000 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ /dev/null @@ -1,210 +0,0 @@ -package sarama - -import "time" - -type FetchResponseBlock struct { - Err KError - HighWaterMarkOffset int64 - MsgSet MessageSet -} - -func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - b.Err = KError(tmp) - - b.HighWaterMarkOffset, err = pd.getInt64() - if err != nil { - return err - } - - msgSetSize, err := pd.getInt32() - if err != nil { - return err - } - - msgSetDecoder, err := pd.getSubset(int(msgSetSize)) - if err != nil { - return err - } - err = (&b.MsgSet).decode(msgSetDecoder) - - return err -} - -func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(b.Err)) - - pe.putInt64(b.HighWaterMarkOffset) - - pe.push(&lengthField{}) - err = b.MsgSet.encode(pe) - if err != nil { - return err - } - return pe.pop() -} - -type FetchResponse struct { - Blocks map[string]map[int32]*FetchResponseBlock - ThrottleTime time.Duration - Version int16 // v1 requires 0.9+, v2 requires 0.10+ -} - -func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - - if r.Version >= 1 { - throttle, err := pd.getInt32() - if err != nil { - return err - } - r.ThrottleTime = time.Duration(throttle) * time.Millisecond - } - - numTopics, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(FetchResponseBlock) - err = block.decode(pd) - if err != nil { - return err - } - r.Blocks[name][id] = block - } - } - - return nil -} - -func (r *FetchResponse) encode(pe packetEncoder) (err error) { - if r.Version >= 1 { - pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) - } - - err = pe.putArrayLength(len(r.Blocks)) - if err != nil { - return err - } - - for topic, partitions := range r.Blocks { - err = pe.putString(topic) - if err != nil { - return err - } - - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - - for id, block := range partitions { - pe.putInt32(id) - err = block.encode(pe) - if err != nil { - return err - } - } - - } - return nil -} - -func (r *FetchResponse) key() int16 { - return 1 -} - -func (r *FetchResponse) version() int16 { - return r.Version -} - -func (r *FetchResponse) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - default: - return minVersion - } -} - -func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { - if r.Blocks == nil { - return nil - } - - if r.Blocks[topic] == nil { - return nil - } - - return r.Blocks[topic][partition] -} - -func (r *FetchResponse) AddError(topic string, partition int32, err KError) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*FetchResponseBlock) - } - partitions, ok := r.Blocks[topic] - if !ok { - partitions = make(map[int32]*FetchResponseBlock) - r.Blocks[topic] = partitions - } - frb, ok := partitions[partition] - if !ok { - frb = new(FetchResponseBlock) - partitions[partition] = frb - } - frb.Err = err -} - -func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*FetchResponseBlock) - } - partitions, ok := r.Blocks[topic] - if !ok { - partitions = make(map[int32]*FetchResponseBlock) - r.Blocks[topic] = partitions - } - frb, ok := partitions[partition] - if !ok { - frb = new(FetchResponseBlock) - partitions[partition] = frb - } - var kb []byte - var vb []byte - if key != nil { - kb, _ = key.Encode() - } - if value != nil { - vb, _ = value.Encode() - } - msg := &Message{Key: kb, Value: vb} - msgBlock := &MessageBlock{Msg: msg, Offset: offset} - frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock) -} diff --git a/vendor/github.com/Shopify/sarama/fetch_response_test.go b/vendor/github.com/Shopify/sarama/fetch_response_test.go deleted file mode 100644 index 52fb5a74c..000000000 --- a/vendor/github.com/Shopify/sarama/fetch_response_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package sarama - -import ( - "bytes" - "testing" -) - -var ( - emptyFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} - - oneMessageFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x05, - 0x00, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, - 0x00, 0x00, 0x00, 0x1C, - // messageSet - 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x10, - // message - 0x23, 0x96, 0x4a, 0xf7, // CRC - 0x00, - 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} -) - -func TestEmptyFetchResponse(t *testing.T) { - response := FetchResponse{} - testVersionDecodable(t, "empty", &response, emptyFetchResponse, 0) - - if len(response.Blocks) != 0 { - t.Error("Decoding produced topic blocks where there were none.") - } - -} - -func TestOneMessageFetchResponse(t *testing.T) { - response := FetchResponse{} - testVersionDecodable(t, "one message", &response, oneMessageFetchResponse, 0) - - if len(response.Blocks) != 1 { - t.Fatal("Decoding produced incorrect number of topic blocks.") - } - - if len(response.Blocks["topic"]) != 1 { - t.Fatal("Decoding produced incorrect number of partition blocks for topic.") - } - - block := response.GetBlock("topic", 5) - if block == nil { - t.Fatal("GetBlock didn't return block.") - } - if block.Err != ErrOffsetOutOfRange { - t.Error("Decoding didn't produce correct error code.") - } - if block.HighWaterMarkOffset != 0x10101010 { - t.Error("Decoding didn't produce correct high water mark offset.") - } - if block.MsgSet.PartialTrailingMessage { - t.Error("Decoding detected a partial trailing message where there wasn't one.") - } - - if len(block.MsgSet.Messages) != 1 { - t.Fatal("Decoding produced incorrect number of messages.") - } - msgBlock := block.MsgSet.Messages[0] - if msgBlock.Offset != 0x550000 { - t.Error("Decoding produced incorrect message offset.") - } - msg := msgBlock.Msg - if msg.Codec != CompressionNone { - t.Error("Decoding produced incorrect message compression.") - } - if msg.Key != nil { - t.Error("Decoding produced message key where there was none.") - } - if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { - t.Error("Decoding produced incorrect message value.") - } -} diff --git a/vendor/github.com/Shopify/sarama/functional_client_test.go b/vendor/github.com/Shopify/sarama/functional_client_test.go deleted file mode 100644 index 9e8e32968..000000000 --- a/vendor/github.com/Shopify/sarama/functional_client_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package sarama - -import ( - "fmt" - "testing" - "time" -) - -func TestFuncConnectionFailure(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - Proxies["kafka1"].Enabled = false - SaveProxy(t, "kafka1") - - config := NewConfig() - config.Metadata.Retry.Max = 1 - - _, err := NewClient([]string{kafkaBrokers[0]}, config) - if err != ErrOutOfBrokers { - t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err) - } -} - -func TestFuncClientMetadata(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Metadata.Retry.Backoff = 10 * time.Millisecond - client, err := NewClient(kafkaBrokers, config) - if err != nil { - t.Fatal(err) - } - - if err := client.RefreshMetadata("unknown_topic"); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - if _, err := client.Leader("unknown_topic", 0); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - if _, err := client.Replicas("invalid/topic", 0); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - partitions, err := client.Partitions("test.4") - if err != nil { - t.Error(err) - } - if len(partitions) != 4 { - t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions) - } - - partitions, err = client.Partitions("test.1") - if err != nil { - t.Error(err) - } - if len(partitions) != 1 { - t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions) - } - - safeClose(t, client) -} - -func TestFuncClientCoordinator(t *testing.T) { - checkKafkaVersion(t, "0.8.2") - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - client, err := NewClient(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i)) - if err != nil { - t.Error(err) - } - - if connected, err := broker.Connected(); !connected || err != nil { - t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr()) - } - } - - safeClose(t, client) -} diff --git a/vendor/github.com/Shopify/sarama/functional_consumer_test.go b/vendor/github.com/Shopify/sarama/functional_consumer_test.go deleted file mode 100644 index ab8433109..000000000 --- a/vendor/github.com/Shopify/sarama/functional_consumer_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package sarama - -import ( - "math" - "testing" -) - -func TestFuncConsumerOffsetOutOfRange(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - consumer, err := NewConsumer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - if _, err := consumer.ConsumePartition("test.1", 0, -10); err != ErrOffsetOutOfRange { - t.Error("Expected ErrOffsetOutOfRange, got:", err) - } - - if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); err != ErrOffsetOutOfRange { - t.Error("Expected ErrOffsetOutOfRange, got:", err) - } - - safeClose(t, consumer) -} - -func TestConsumerHighWaterMarkOffset(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - p, err := NewSyncProducer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - defer safeClose(t, p) - - _, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")}) - if err != nil { - t.Fatal(err) - } - - c, err := NewConsumer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - defer safeClose(t, c) - - pc, err := c.ConsumePartition("test.1", 0, OffsetOldest) - if err != nil { - t.Fatal(err) - } - - <-pc.Messages() - - if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 { - t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo) - } - - safeClose(t, pc) -} diff --git a/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go b/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go deleted file mode 100644 index 436f35ef4..000000000 --- a/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package sarama - -import ( - "testing" -) - -func TestFuncOffsetManager(t *testing.T) { - checkKafkaVersion(t, "0.8.2") - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - client, err := NewClient(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - offsetManager, err := NewOffsetManagerFromClient("sarama.TestFuncOffsetManager", client) - if err != nil { - t.Fatal(err) - } - - pom1, err := offsetManager.ManagePartition("test.1", 0) - if err != nil { - t.Fatal(err) - } - - pom1.MarkOffset(10, "test metadata") - safeClose(t, pom1) - - pom2, err := offsetManager.ManagePartition("test.1", 0) - if err != nil { - t.Fatal(err) - } - - offset, metadata := pom2.NextOffset() - - if offset != 10 { - t.Errorf("Expected the next offset to be 10, found %d.", offset) - } - if metadata != "test metadata" { - t.Errorf("Expected metadata to be 'test metadata', found %s.", metadata) - } - - safeClose(t, pom2) - safeClose(t, offsetManager) - safeClose(t, client) -} diff --git a/vendor/github.com/Shopify/sarama/functional_producer_test.go b/vendor/github.com/Shopify/sarama/functional_producer_test.go deleted file mode 100644 index 1504e7600..000000000 --- a/vendor/github.com/Shopify/sarama/functional_producer_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package sarama - -import ( - "fmt" - "sync" - "testing" - "time" -) - -const TestBatchSize = 1000 - -func TestFuncProducing(t *testing.T) { - config := NewConfig() - testProducingMessages(t, config) -} - -func TestFuncProducingGzip(t *testing.T) { - config := NewConfig() - config.Producer.Compression = CompressionGZIP - testProducingMessages(t, config) -} - -func TestFuncProducingSnappy(t *testing.T) { - config := NewConfig() - config.Producer.Compression = CompressionSnappy - testProducingMessages(t, config) -} - -func TestFuncProducingNoResponse(t *testing.T) { - config := NewConfig() - config.Producer.RequiredAcks = NoResponse - testProducingMessages(t, config) -} - -func TestFuncProducingFlushing(t *testing.T) { - config := NewConfig() - config.Producer.Flush.Messages = TestBatchSize / 8 - config.Producer.Flush.Frequency = 250 * time.Millisecond - testProducingMessages(t, config) -} - -func TestFuncMultiPartitionProduce(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - config := NewConfig() - config.ChannelBufferSize = 20 - config.Producer.Flush.Frequency = 50 * time.Millisecond - config.Producer.Flush.Messages = 200 - config.Producer.Return.Successes = true - producer, err := NewSyncProducer(kafkaBrokers, config) - if err != nil { - t.Fatal(err) - } - - var wg sync.WaitGroup - wg.Add(TestBatchSize) - - for i := 1; i <= TestBatchSize; i++ { - go func(i int) { - defer wg.Done() - msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))} - if _, _, err := producer.SendMessage(msg); err != nil { - t.Error(i, err) - } - }(i) - } - - wg.Wait() - if err := producer.Close(); err != nil { - t.Error(err) - } -} - -func TestFuncProducingToInvalidTopic(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - producer, err := NewSyncProducer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - - if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - - safeClose(t, producer) -} - -func testProducingMessages(t *testing.T, config *Config) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - config.Producer.Return.Successes = true - config.Consumer.Return.Errors = true - - client, err := NewClient(kafkaBrokers, config) - if err != nil { - t.Fatal(err) - } - - master, err := NewConsumerFromClient(client) - if err != nil { - t.Fatal(err) - } - consumer, err := master.ConsumePartition("test.1", 0, OffsetNewest) - if err != nil { - t.Fatal(err) - } - - producer, err := NewAsyncProducerFromClient(client) - if err != nil { - t.Fatal(err) - } - - expectedResponses := TestBatchSize - for i := 1; i <= TestBatchSize; { - msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))} - select { - case producer.Input() <- msg: - i++ - case ret := <-producer.Errors(): - t.Fatal(ret.Err) - case <-producer.Successes(): - expectedResponses-- - } - } - for expectedResponses > 0 { - select { - case ret := <-producer.Errors(): - t.Fatal(ret.Err) - case <-producer.Successes(): - expectedResponses-- - } - } - safeClose(t, producer) - - for i := 1; i <= TestBatchSize; i++ { - select { - case <-time.After(10 * time.Second): - t.Fatal("Not received any more events in the last 10 seconds.") - - case err := <-consumer.Errors(): - t.Error(err) - - case message := <-consumer.Messages(): - if string(message.Value) != fmt.Sprintf("testing %d", i) { - t.Fatalf("Unexpected message with index %d: %s", i, message.Value) - } - } - - } - safeClose(t, consumer) - safeClose(t, client) -} - -// Benchmarks - -func BenchmarkProducerSmall(b *testing.B) { - benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128))) -} -func BenchmarkProducerMedium(b *testing.B) { - benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024))) -} -func BenchmarkProducerLarge(b *testing.B) { - benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192))) -} -func BenchmarkProducerSmallSinglePartition(b *testing.B) { - benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128))) -} -func BenchmarkProducerMediumSnappy(b *testing.B) { - conf := NewConfig() - conf.Producer.Compression = CompressionSnappy - benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024))) -} - -func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) { - setupFunctionalTest(b) - defer teardownFunctionalTest(b) - - producer, err := NewAsyncProducer(kafkaBrokers, conf) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - - for i := 1; i <= b.N; { - msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value} - select { - case producer.Input() <- msg: - i++ - case ret := <-producer.Errors(): - b.Fatal(ret.Err) - } - } - safeClose(b, producer) -} diff --git a/vendor/github.com/Shopify/sarama/functional_test.go b/vendor/github.com/Shopify/sarama/functional_test.go deleted file mode 100644 index 846eb29f9..000000000 --- a/vendor/github.com/Shopify/sarama/functional_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package sarama - -import ( - "log" - "math/rand" - "net" - "os" - "strconv" - "strings" - "testing" - "time" - - toxiproxy "github.com/Shopify/toxiproxy/client" -) - -const ( - VagrantToxiproxy = "http://192.168.100.67:8474" - VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095" - VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185" -) - -var ( - kafkaAvailable, kafkaRequired bool - kafkaBrokers []string - - proxyClient *toxiproxy.Client - Proxies map[string]*toxiproxy.Proxy - ZKProxies = []string{"zk1", "zk2", "zk3", "zk4", "zk5"} - KafkaProxies = []string{"kafka1", "kafka2", "kafka3", "kafka4", "kafka5"} -) - -func init() { - if os.Getenv("DEBUG") == "true" { - Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) - } - - seed := time.Now().UTC().UnixNano() - if tmp := os.Getenv("TEST_SEED"); tmp != "" { - seed, _ = strconv.ParseInt(tmp, 0, 64) - } - Logger.Println("Using random seed:", seed) - rand.Seed(seed) - - proxyAddr := os.Getenv("TOXIPROXY_ADDR") - if proxyAddr == "" { - proxyAddr = VagrantToxiproxy - } - proxyClient = toxiproxy.NewClient(proxyAddr) - - kafkaPeers := os.Getenv("KAFKA_PEERS") - if kafkaPeers == "" { - kafkaPeers = VagrantKafkaPeers - } - kafkaBrokers = strings.Split(kafkaPeers, ",") - - if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil { - if err = c.Close(); err == nil { - kafkaAvailable = true - } - } - - kafkaRequired = os.Getenv("CI") != "" -} - -func checkKafkaAvailability(t testing.TB) { - if !kafkaAvailable { - if kafkaRequired { - t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) - } else { - t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) - } - } -} - -func checkKafkaVersion(t testing.TB, requiredVersion string) { - kafkaVersion := os.Getenv("KAFKA_VERSION") - if kafkaVersion == "" { - t.Logf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion) - } else { - available := parseKafkaVersion(kafkaVersion) - required := parseKafkaVersion(requiredVersion) - if !available.satisfies(required) { - t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion) - } - } -} - -func resetProxies(t testing.TB) { - if err := proxyClient.ResetState(); err != nil { - t.Error(err) - } - Proxies = nil -} - -func fetchProxies(t testing.TB) { - var err error - Proxies, err = proxyClient.Proxies() - if err != nil { - t.Fatal(err) - } -} - -func SaveProxy(t *testing.T, px string) { - if err := Proxies[px].Save(); err != nil { - t.Fatal(err) - } -} - -func setupFunctionalTest(t testing.TB) { - checkKafkaAvailability(t) - resetProxies(t) - fetchProxies(t) -} - -func teardownFunctionalTest(t testing.TB) { - resetProxies(t) -} - -type kafkaVersion []int - -func (kv kafkaVersion) satisfies(other kafkaVersion) bool { - var ov int - for index, v := range kv { - if len(other) <= index { - ov = 0 - } else { - ov = other[index] - } - - if v < ov { - return false - } else if v > ov { - return true - } - } - return true -} - -func parseKafkaVersion(version string) kafkaVersion { - numbers := strings.Split(version, ".") - result := make(kafkaVersion, 0, len(numbers)) - for _, number := range numbers { - nr, _ := strconv.Atoi(number) - result = append(result, nr) - } - - return result -} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go deleted file mode 100644 index ce49c4739..000000000 --- a/vendor/github.com/Shopify/sarama/heartbeat_request.go +++ /dev/null @@ -1,47 +0,0 @@ -package sarama - -type HeartbeatRequest struct { - GroupId string - GenerationId int32 - MemberId string -} - -func (r *HeartbeatRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.GroupId); err != nil { - return err - } - - pe.putInt32(r.GenerationId) - - if err := pe.putString(r.MemberId); err != nil { - return err - } - - return nil -} - -func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { - if r.GroupId, err = pd.getString(); err != nil { - return - } - if r.GenerationId, err = pd.getInt32(); err != nil { - return - } - if r.MemberId, err = pd.getString(); err != nil { - return - } - - return nil -} - -func (r *HeartbeatRequest) key() int16 { - return 12 -} - -func (r *HeartbeatRequest) version() int16 { - return 0 -} - -func (r *HeartbeatRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request_test.go b/vendor/github.com/Shopify/sarama/heartbeat_request_test.go deleted file mode 100644 index da6cd18f5..000000000 --- a/vendor/github.com/Shopify/sarama/heartbeat_request_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package sarama - -import "testing" - -var ( - basicHeartbeatRequest = []byte{ - 0, 3, 'f', 'o', 'o', // Group ID - 0x00, 0x01, 0x02, 0x03, // Generatiuon ID - 0, 3, 'b', 'a', 'z', // Member ID - } -) - -func TestHeartbeatRequest(t *testing.T) { - var request *HeartbeatRequest - - request = new(HeartbeatRequest) - request.GroupId = "foo" - request.GenerationId = 66051 - request.MemberId = "baz" - testRequest(t, "basic", request, basicHeartbeatRequest) -} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go deleted file mode 100644 index 3c51163ad..000000000 --- a/vendor/github.com/Shopify/sarama/heartbeat_response.go +++ /dev/null @@ -1,32 +0,0 @@ -package sarama - -type HeartbeatResponse struct { - Err KError -} - -func (r *HeartbeatResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - return nil -} - -func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { - if kerr, err := pd.getInt16(); err != nil { - return err - } else { - r.Err = KError(kerr) - } - - return nil -} - -func (r *HeartbeatResponse) key() int16 { - return 12 -} - -func (r *HeartbeatResponse) version() int16 { - return 0 -} - -func (r *HeartbeatResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response_test.go b/vendor/github.com/Shopify/sarama/heartbeat_response_test.go deleted file mode 100644 index 5bcbec985..000000000 --- a/vendor/github.com/Shopify/sarama/heartbeat_response_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package sarama - -import "testing" - -var ( - heartbeatResponseNoError = []byte{ - 0x00, 0x00} -) - -func TestHeartbeatResponse(t *testing.T) { - var response *HeartbeatResponse - - response = new(HeartbeatResponse) - testVersionDecodable(t, "no error", response, heartbeatResponseNoError, 0) - if response.Err != ErrNoError { - t.Error("Decoding error failed: no error expected but found", response.Err) - } -} diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go deleted file mode 100644 index d95085b2d..000000000 --- a/vendor/github.com/Shopify/sarama/join_group_request.go +++ /dev/null @@ -1,108 +0,0 @@ -package sarama - -type JoinGroupRequest struct { - GroupId string - SessionTimeout int32 - MemberId string - ProtocolType string - GroupProtocols map[string][]byte -} - -func (r *JoinGroupRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.GroupId); err != nil { - return err - } - pe.putInt32(r.SessionTimeout) - if err := pe.putString(r.MemberId); err != nil { - return err - } - if err := pe.putString(r.ProtocolType); err != nil { - return err - } - - if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { - return err - } - for name, metadata := range r.GroupProtocols { - if err := pe.putString(name); err != nil { - return err - } - if err := pe.putBytes(metadata); err != nil { - return err - } - } - - return nil -} - -func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { - if r.GroupId, err = pd.getString(); err != nil { - return - } - - if r.SessionTimeout, err = pd.getInt32(); err != nil { - return - } - - if r.MemberId, err = pd.getString(); err != nil { - return - } - - if r.ProtocolType, err = pd.getString(); err != nil { - return - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - r.GroupProtocols = make(map[string][]byte) - for i := 0; i < n; i++ { - name, err := pd.getString() - if err != nil { - return err - } - metadata, err := pd.getBytes() - if err != nil { - return err - } - - r.GroupProtocols[name] = metadata - } - - return nil -} - -func (r *JoinGroupRequest) key() int16 { - return 11 -} - -func (r *JoinGroupRequest) version() int16 { - return 0 -} - -func (r *JoinGroupRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} - -func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { - if r.GroupProtocols == nil { - r.GroupProtocols = make(map[string][]byte) - } - - r.GroupProtocols[name] = metadata -} - -func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { - bin, err := encode(metadata) - if err != nil { - return err - } - - r.AddGroupProtocol(name, bin) - return nil -} diff --git a/vendor/github.com/Shopify/sarama/join_group_request_test.go b/vendor/github.com/Shopify/sarama/join_group_request_test.go deleted file mode 100644 index 8a6448c0e..000000000 --- a/vendor/github.com/Shopify/sarama/join_group_request_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package sarama - -import "testing" - -var ( - joinGroupRequestNoProtocols = []byte{ - 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID - 0, 0, 0, 100, // Session timeout - 0, 0, // Member ID - 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type - 0, 0, 0, 0, // 0 protocol groups - } - - joinGroupRequestOneProtocol = []byte{ - 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID - 0, 0, 0, 100, // Session timeout - 0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID - 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type - 0, 0, 0, 1, // 1 group protocol - 0, 3, 'o', 'n', 'e', // Protocol name - 0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata - } -) - -func TestJoinGroupRequest(t *testing.T) { - var request *JoinGroupRequest - - request = new(JoinGroupRequest) - request.GroupId = "TestGroup" - request.SessionTimeout = 100 - request.ProtocolType = "consumer" - testRequest(t, "no protocols", request, joinGroupRequestNoProtocols) - - request = new(JoinGroupRequest) - request.GroupId = "TestGroup" - request.SessionTimeout = 100 - request.MemberId = "OneProtocol" - request.ProtocolType = "consumer" - request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03}) - testRequest(t, "one protocol", request, joinGroupRequestOneProtocol) -} diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go deleted file mode 100644 index 94c7a7fde..000000000 --- a/vendor/github.com/Shopify/sarama/join_group_response.go +++ /dev/null @@ -1,114 +0,0 @@ -package sarama - -type JoinGroupResponse struct { - Err KError - GenerationId int32 - GroupProtocol string - LeaderId string - MemberId string - Members map[string][]byte -} - -func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { - members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) - for id, bin := range r.Members { - meta := new(ConsumerGroupMemberMetadata) - if err := decode(bin, meta); err != nil { - return nil, err - } - members[id] = *meta - } - return members, nil -} - -func (r *JoinGroupResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - pe.putInt32(r.GenerationId) - - if err := pe.putString(r.GroupProtocol); err != nil { - return err - } - if err := pe.putString(r.LeaderId); err != nil { - return err - } - if err := pe.putString(r.MemberId); err != nil { - return err - } - - if err := pe.putArrayLength(len(r.Members)); err != nil { - return err - } - - for memberId, memberMetadata := range r.Members { - if err := pe.putString(memberId); err != nil { - return err - } - - if err := pe.putBytes(memberMetadata); err != nil { - return err - } - } - - return nil -} - -func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { - if kerr, err := pd.getInt16(); err != nil { - return err - } else { - r.Err = KError(kerr) - } - - if r.GenerationId, err = pd.getInt32(); err != nil { - return - } - - if r.GroupProtocol, err = pd.getString(); err != nil { - return - } - - if r.LeaderId, err = pd.getString(); err != nil { - return - } - - if r.MemberId, err = pd.getString(); err != nil { - return - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - r.Members = make(map[string][]byte) - for i := 0; i < n; i++ { - memberId, err := pd.getString() - if err != nil { - return err - } - - memberMetadata, err := pd.getBytes() - if err != nil { - return err - } - - r.Members[memberId] = memberMetadata - } - - return nil -} - -func (r *JoinGroupResponse) key() int16 { - return 11 -} - -func (r *JoinGroupResponse) version() int16 { - return 0 -} - -func (r *JoinGroupResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/join_group_response_test.go b/vendor/github.com/Shopify/sarama/join_group_response_test.go deleted file mode 100644 index ba7f71f20..000000000 --- a/vendor/github.com/Shopify/sarama/join_group_response_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package sarama - -import ( - "reflect" - "testing" -) - -var ( - joinGroupResponseNoError = []byte{ - 0x00, 0x00, // No error - 0x00, 0x01, 0x02, 0x03, // Generation ID - 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen - 0, 3, 'f', 'o', 'o', // Leader ID - 0, 3, 'b', 'a', 'r', // Member ID - 0, 0, 0, 0, // No member info - } - - joinGroupResponseWithError = []byte{ - 0, 23, // Error: inconsistent group protocol - 0x00, 0x00, 0x00, 0x00, // Generation ID - 0, 0, // Protocol name chosen - 0, 0, // Leader ID - 0, 0, // Member ID - 0, 0, 0, 0, // No member info - } - - joinGroupResponseLeader = []byte{ - 0x00, 0x00, // No error - 0x00, 0x01, 0x02, 0x03, // Generation ID - 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen - 0, 3, 'f', 'o', 'o', // Leader ID - 0, 3, 'f', 'o', 'o', // Member ID == Leader ID - 0, 0, 0, 1, // 1 member - 0, 3, 'f', 'o', 'o', // Member ID - 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member metadata - } -) - -func TestJoinGroupResponse(t *testing.T) { - var response *JoinGroupResponse - - response = new(JoinGroupResponse) - testVersionDecodable(t, "no error", response, joinGroupResponseNoError, 0) - if response.Err != ErrNoError { - t.Error("Decoding Err failed: no error expected but found", response.Err) - } - if response.GenerationId != 66051 { - t.Error("Decoding GenerationId failed, found:", response.GenerationId) - } - if response.LeaderId != "foo" { - t.Error("Decoding LeaderId failed, found:", response.LeaderId) - } - if response.MemberId != "bar" { - t.Error("Decoding MemberId failed, found:", response.MemberId) - } - if len(response.Members) != 0 { - t.Error("Decoding Members failed, found:", response.Members) - } - - response = new(JoinGroupResponse) - testVersionDecodable(t, "with error", response, joinGroupResponseWithError, 0) - if response.Err != ErrInconsistentGroupProtocol { - t.Error("Decoding Err failed: ErrInconsistentGroupProtocol expected but found", response.Err) - } - if response.GenerationId != 0 { - t.Error("Decoding GenerationId failed, found:", response.GenerationId) - } - if response.LeaderId != "" { - t.Error("Decoding LeaderId failed, found:", response.LeaderId) - } - if response.MemberId != "" { - t.Error("Decoding MemberId failed, found:", response.MemberId) - } - if len(response.Members) != 0 { - t.Error("Decoding Members failed, found:", response.Members) - } - - response = new(JoinGroupResponse) - testVersionDecodable(t, "with error", response, joinGroupResponseLeader, 0) - if response.Err != ErrNoError { - t.Error("Decoding Err failed: ErrNoError expected but found", response.Err) - } - if response.GenerationId != 66051 { - t.Error("Decoding GenerationId failed, found:", response.GenerationId) - } - if response.LeaderId != "foo" { - t.Error("Decoding LeaderId failed, found:", response.LeaderId) - } - if response.MemberId != "foo" { - t.Error("Decoding MemberId failed, found:", response.MemberId) - } - if len(response.Members) != 1 { - t.Error("Decoding Members failed, found:", response.Members) - } - if !reflect.DeepEqual(response.Members["foo"], []byte{0x01, 0x02, 0x03}) { - t.Error("Decoding foo member failed, found:", response.Members["foo"]) - } -} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go deleted file mode 100644 index e17742748..000000000 --- a/vendor/github.com/Shopify/sarama/leave_group_request.go +++ /dev/null @@ -1,40 +0,0 @@ -package sarama - -type LeaveGroupRequest struct { - GroupId string - MemberId string -} - -func (r *LeaveGroupRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.GroupId); err != nil { - return err - } - if err := pe.putString(r.MemberId); err != nil { - return err - } - - return nil -} - -func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { - if r.GroupId, err = pd.getString(); err != nil { - return - } - if r.MemberId, err = pd.getString(); err != nil { - return - } - - return nil -} - -func (r *LeaveGroupRequest) key() int16 { - return 13 -} - -func (r *LeaveGroupRequest) version() int16 { - return 0 -} - -func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request_test.go b/vendor/github.com/Shopify/sarama/leave_group_request_test.go deleted file mode 100644 index c1fed6d25..000000000 --- a/vendor/github.com/Shopify/sarama/leave_group_request_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package sarama - -import "testing" - -var ( - basicLeaveGroupRequest = []byte{ - 0, 3, 'f', 'o', 'o', - 0, 3, 'b', 'a', 'r', - } -) - -func TestLeaveGroupRequest(t *testing.T) { - var request *LeaveGroupRequest - - request = new(LeaveGroupRequest) - request.GroupId = "foo" - request.MemberId = "bar" - testRequest(t, "basic", request, basicLeaveGroupRequest) -} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go deleted file mode 100644 index bd4a34f46..000000000 --- a/vendor/github.com/Shopify/sarama/leave_group_response.go +++ /dev/null @@ -1,32 +0,0 @@ -package sarama - -type LeaveGroupResponse struct { - Err KError -} - -func (r *LeaveGroupResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - return nil -} - -func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { - if kerr, err := pd.getInt16(); err != nil { - return err - } else { - r.Err = KError(kerr) - } - - return nil -} - -func (r *LeaveGroupResponse) key() int16 { - return 13 -} - -func (r *LeaveGroupResponse) version() int16 { - return 0 -} - -func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response_test.go b/vendor/github.com/Shopify/sarama/leave_group_response_test.go deleted file mode 100644 index 9207c6668..000000000 --- a/vendor/github.com/Shopify/sarama/leave_group_response_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -import "testing" - -var ( - leaveGroupResponseNoError = []byte{0x00, 0x00} - leaveGroupResponseWithError = []byte{0, 25} -) - -func TestLeaveGroupResponse(t *testing.T) { - var response *LeaveGroupResponse - - response = new(LeaveGroupResponse) - testVersionDecodable(t, "no error", response, leaveGroupResponseNoError, 0) - if response.Err != ErrNoError { - t.Error("Decoding error failed: no error expected but found", response.Err) - } - - response = new(LeaveGroupResponse) - testVersionDecodable(t, "with error", response, leaveGroupResponseWithError, 0) - if response.Err != ErrUnknownMemberId { - t.Error("Decoding error failed: ErrUnknownMemberId expected but found", response.Err) - } -} diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go deleted file mode 100644 index 70078be5d..000000000 --- a/vendor/github.com/Shopify/sarama/length_field.go +++ /dev/null @@ -1,29 +0,0 @@ -package sarama - -import "encoding/binary" - -// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. -type lengthField struct { - startOffset int -} - -func (l *lengthField) saveOffset(in int) { - l.startOffset = in -} - -func (l *lengthField) reserveLength() int { - return 4 -} - -func (l *lengthField) run(curOffset int, buf []byte) error { - binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) - return nil -} - -func (l *lengthField) check(curOffset int, buf []byte) error { - if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) { - return PacketDecodingError{"length field invalid"} - } - - return nil -} diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go deleted file mode 100644 index 3b16abf7f..000000000 --- a/vendor/github.com/Shopify/sarama/list_groups_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -type ListGroupsRequest struct { -} - -func (r *ListGroupsRequest) encode(pe packetEncoder) error { - return nil -} - -func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { - return nil -} - -func (r *ListGroupsRequest) key() int16 { - return 16 -} - -func (r *ListGroupsRequest) version() int16 { - return 0 -} - -func (r *ListGroupsRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/list_groups_request_test.go b/vendor/github.com/Shopify/sarama/list_groups_request_test.go deleted file mode 100644 index 2e977d9a5..000000000 --- a/vendor/github.com/Shopify/sarama/list_groups_request_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package sarama - -import "testing" - -func TestListGroupsRequest(t *testing.T) { - testRequest(t, "ListGroupsRequest", &ListGroupsRequest{}, []byte{}) -} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go deleted file mode 100644 index 3a84f9b6c..000000000 --- a/vendor/github.com/Shopify/sarama/list_groups_response.go +++ /dev/null @@ -1,68 +0,0 @@ -package sarama - -type ListGroupsResponse struct { - Err KError - Groups map[string]string -} - -func (r *ListGroupsResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - - if err := pe.putArrayLength(len(r.Groups)); err != nil { - return err - } - for groupId, protocolType := range r.Groups { - if err := pe.putString(groupId); err != nil { - return err - } - if err := pe.putString(protocolType); err != nil { - return err - } - } - - return nil -} - -func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { - if kerr, err := pd.getInt16(); err != nil { - return err - } else { - r.Err = KError(kerr) - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - r.Groups = make(map[string]string) - for i := 0; i < n; i++ { - groupId, err := pd.getString() - if err != nil { - return err - } - protocolType, err := pd.getString() - if err != nil { - return err - } - - r.Groups[groupId] = protocolType - } - - return nil -} - -func (r *ListGroupsResponse) key() int16 { - return 16 -} - -func (r *ListGroupsResponse) version() int16 { - return 0 -} - -func (r *ListGroupsResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response_test.go b/vendor/github.com/Shopify/sarama/list_groups_response_test.go deleted file mode 100644 index 41ab822f9..000000000 --- a/vendor/github.com/Shopify/sarama/list_groups_response_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - listGroupsResponseEmpty = []byte{ - 0, 0, // no error - 0, 0, 0, 0, // no groups - } - - listGroupsResponseError = []byte{ - 0, 31, // no error - 0, 0, 0, 0, // ErrClusterAuthorizationFailed - } - - listGroupsResponseWithConsumer = []byte{ - 0, 0, // no error - 0, 0, 0, 1, // 1 group - 0, 3, 'f', 'o', 'o', // group name - 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // protocol type - } -) - -func TestListGroupsResponse(t *testing.T) { - var response *ListGroupsResponse - - response = new(ListGroupsResponse) - testVersionDecodable(t, "no error", response, listGroupsResponseEmpty, 0) - if response.Err != ErrNoError { - t.Error("Expected no gerror, found:", response.Err) - } - if len(response.Groups) != 0 { - t.Error("Expected no groups") - } - - response = new(ListGroupsResponse) - testVersionDecodable(t, "no error", response, listGroupsResponseError, 0) - if response.Err != ErrClusterAuthorizationFailed { - t.Error("Expected no gerror, found:", response.Err) - } - if len(response.Groups) != 0 { - t.Error("Expected no groups") - } - - response = new(ListGroupsResponse) - testVersionDecodable(t, "no error", response, listGroupsResponseWithConsumer, 0) - if response.Err != ErrNoError { - t.Error("Expected no gerror, found:", response.Err) - } - if len(response.Groups) != 1 { - t.Error("Expected one group") - } - if response.Groups["foo"] != "consumer" { - t.Error("Expected foo group to use consumer protocol") - } -} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go deleted file mode 100644 index 0f0ca5b6d..000000000 --- a/vendor/github.com/Shopify/sarama/message.go +++ /dev/null @@ -1,163 +0,0 @@ -package sarama - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - "time" - - "github.com/eapache/go-xerial-snappy" -) - -// CompressionCodec represents the various compression codecs recognized by Kafka in messages. -type CompressionCodec int8 - -// only the last two bits are really used -const compressionCodecMask int8 = 0x03 - -const ( - CompressionNone CompressionCodec = 0 - CompressionGZIP CompressionCodec = 1 - CompressionSnappy CompressionCodec = 2 -) - -type Message struct { - Codec CompressionCodec // codec used to compress the message contents - Key []byte // the message key, may be nil - Value []byte // the message contents - Set *MessageSet // the message set a message might wrap - Version int8 // v1 requires Kafka 0.10 - Timestamp time.Time // the timestamp of the message (version 1+ only) - - compressedCache []byte -} - -func (m *Message) encode(pe packetEncoder) error { - pe.push(&crc32Field{}) - - pe.putInt8(m.Version) - - attributes := int8(m.Codec) & compressionCodecMask - pe.putInt8(attributes) - - if m.Version >= 1 { - pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond)) - } - - err := pe.putBytes(m.Key) - if err != nil { - return err - } - - var payload []byte - - if m.compressedCache != nil { - payload = m.compressedCache - m.compressedCache = nil - } else if m.Value != nil { - switch m.Codec { - case CompressionNone: - payload = m.Value - case CompressionGZIP: - var buf bytes.Buffer - writer := gzip.NewWriter(&buf) - if _, err = writer.Write(m.Value); err != nil { - return err - } - if err = writer.Close(); err != nil { - return err - } - m.compressedCache = buf.Bytes() - payload = m.compressedCache - case CompressionSnappy: - tmp := snappy.Encode(m.Value) - m.compressedCache = tmp - payload = m.compressedCache - default: - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} - } - } - - if err = pe.putBytes(payload); err != nil { - return err - } - - return pe.pop() -} - -func (m *Message) decode(pd packetDecoder) (err error) { - err = pd.push(&crc32Field{}) - if err != nil { - return err - } - - m.Version, err = pd.getInt8() - if err != nil { - return err - } - - attribute, err := pd.getInt8() - if err != nil { - return err - } - m.Codec = CompressionCodec(attribute & compressionCodecMask) - - if m.Version >= 1 { - millis, err := pd.getInt64() - if err != nil { - return err - } - m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) - } - - m.Key, err = pd.getBytes() - if err != nil { - return err - } - - m.Value, err = pd.getBytes() - if err != nil { - return err - } - - switch m.Codec { - case CompressionNone: - // nothing to do - case CompressionGZIP: - if m.Value == nil { - break - } - reader, err := gzip.NewReader(bytes.NewReader(m.Value)) - if err != nil { - return err - } - if m.Value, err = ioutil.ReadAll(reader); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - case CompressionSnappy: - if m.Value == nil { - break - } - if m.Value, err = snappy.Decode(m.Value); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - default: - return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} - } - - return pd.pop() -} - -// decodes a message set from a previousy encoded bulk-message -func (m *Message) decodeSet() (err error) { - pd := realDecoder{raw: m.Value} - m.Set = &MessageSet{} - return m.Set.decode(&pd) -} diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go deleted file mode 100644 index f028784e5..000000000 --- a/vendor/github.com/Shopify/sarama/message_set.go +++ /dev/null @@ -1,89 +0,0 @@ -package sarama - -type MessageBlock struct { - Offset int64 - Msg *Message -} - -// Messages convenience helper which returns either all the -// messages that are wrapped in this block -func (msb *MessageBlock) Messages() []*MessageBlock { - if msb.Msg.Set != nil { - return msb.Msg.Set.Messages - } - return []*MessageBlock{msb} -} - -func (msb *MessageBlock) encode(pe packetEncoder) error { - pe.putInt64(msb.Offset) - pe.push(&lengthField{}) - err := msb.Msg.encode(pe) - if err != nil { - return err - } - return pe.pop() -} - -func (msb *MessageBlock) decode(pd packetDecoder) (err error) { - if msb.Offset, err = pd.getInt64(); err != nil { - return err - } - - if err = pd.push(&lengthField{}); err != nil { - return err - } - - msb.Msg = new(Message) - if err = msb.Msg.decode(pd); err != nil { - return err - } - - if err = pd.pop(); err != nil { - return err - } - - return nil -} - -type MessageSet struct { - PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock - Messages []*MessageBlock -} - -func (ms *MessageSet) encode(pe packetEncoder) error { - for i := range ms.Messages { - err := ms.Messages[i].encode(pe) - if err != nil { - return err - } - } - return nil -} - -func (ms *MessageSet) decode(pd packetDecoder) (err error) { - ms.Messages = nil - - for pd.remaining() > 0 { - msb := new(MessageBlock) - err = msb.decode(pd) - switch err { - case nil: - ms.Messages = append(ms.Messages, msb) - case ErrInsufficientData: - // As an optimization the server is allowed to return a partial message at the - // end of the message set. Clients should handle this case. So we just ignore such things. - ms.PartialTrailingMessage = true - return nil - default: - return err - } - } - - return nil -} - -func (ms *MessageSet) addMessage(msg *Message) { - block := new(MessageBlock) - block.Msg = msg - ms.Messages = append(ms.Messages, block) -} diff --git a/vendor/github.com/Shopify/sarama/message_test.go b/vendor/github.com/Shopify/sarama/message_test.go deleted file mode 100644 index 1dae896fe..000000000 --- a/vendor/github.com/Shopify/sarama/message_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyMessage = []byte{ - 167, 236, 104, 3, // CRC - 0x00, // magic version byte - 0x00, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0xFF, 0xFF, 0xFF, 0xFF} // value - - emptyGzipMessage = []byte{ - 97, 79, 149, 90, //CRC - 0x00, // magic version byte - 0x01, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - // value - 0x00, 0x00, 0x00, 0x17, - 0x1f, 0x8b, - 0x08, - 0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0} - - emptyBulkSnappyMessage = []byte{ - 180, 47, 53, 209, //CRC - 0x00, // magic version byte - 0x02, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0, 0, 0, 42, - 130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic - 0, 0, 0, 1, // min version - 0, 0, 0, 1, // default version - 0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0} - - emptyBulkGzipMessage = []byte{ - 139, 160, 63, 141, //CRC - 0x00, // magic version byte - 0x01, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0x00, 0x00, 0x00, 0x27, // len - 0x1f, 0x8b, // Gzip Magic - 0x08, // deflate compressed - 0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0} -) - -func TestMessageEncoding(t *testing.T) { - message := Message{} - testEncodable(t, "empty", &message, emptyMessage) - - message.Value = []byte{} - message.Codec = CompressionGZIP - testEncodable(t, "empty gzip", &message, emptyGzipMessage) -} - -func TestMessageDecoding(t *testing.T) { - message := Message{} - testDecodable(t, "empty", &message, emptyMessage) - if message.Codec != CompressionNone { - t.Error("Decoding produced compression codec where there was none.") - } - if message.Key != nil { - t.Error("Decoding produced key where there was none.") - } - if message.Value != nil { - t.Error("Decoding produced value where there was none.") - } - if message.Set != nil { - t.Error("Decoding produced set where there was none.") - } - - testDecodable(t, "empty gzip", &message, emptyGzipMessage) - if message.Codec != CompressionGZIP { - t.Error("Decoding produced incorrect compression codec (was gzip).") - } - if message.Key != nil { - t.Error("Decoding produced key where there was none.") - } - if message.Value == nil || len(message.Value) != 0 { - t.Error("Decoding produced nil or content-ful value where there was an empty array.") - } -} - -func TestMessageDecodingBulkSnappy(t *testing.T) { - message := Message{} - testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage) - if message.Codec != CompressionSnappy { - t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy) - } - if message.Key != nil { - t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) - } - if message.Set == nil { - t.Error("Decoding produced no set, but one was expected.") - } else if len(message.Set.Messages) != 2 { - t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) - } -} - -func TestMessageDecodingBulkGzip(t *testing.T) { - message := Message{} - testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage) - if message.Codec != CompressionGZIP { - t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP) - } - if message.Key != nil { - t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) - } - if message.Set == nil { - t.Error("Decoding produced no set, but one was expected.") - } else if len(message.Set.Messages) != 2 { - t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) - } -} diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go deleted file mode 100644 index 9a26b55fd..000000000 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ /dev/null @@ -1,52 +0,0 @@ -package sarama - -type MetadataRequest struct { - Topics []string -} - -func (r *MetadataRequest) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(r.Topics)) - if err != nil { - return err - } - - for i := range r.Topics { - err = pe.putString(r.Topics[i]) - if err != nil { - return err - } - } - return nil -} - -func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - - r.Topics = make([]string, topicCount) - for i := range r.Topics { - topic, err := pd.getString() - if err != nil { - return err - } - r.Topics[i] = topic - } - return nil -} - -func (r *MetadataRequest) key() int16 { - return 3 -} - -func (r *MetadataRequest) version() int16 { - return 0 -} - -func (r *MetadataRequest) requiredVersion() KafkaVersion { - return minVersion -} diff --git a/vendor/github.com/Shopify/sarama/metadata_request_test.go b/vendor/github.com/Shopify/sarama/metadata_request_test.go deleted file mode 100644 index 44f3146e4..000000000 --- a/vendor/github.com/Shopify/sarama/metadata_request_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package sarama - -import "testing" - -var ( - metadataRequestNoTopics = []byte{ - 0x00, 0x00, 0x00, 0x00} - - metadataRequestOneTopic = []byte{ - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'} - - metadataRequestThreeTopics = []byte{ - 0x00, 0x00, 0x00, 0x03, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x03, 'b', 'a', 'r', - 0x00, 0x03, 'b', 'a', 'z'} -) - -func TestMetadataRequest(t *testing.T) { - request := new(MetadataRequest) - testRequest(t, "no topics", request, metadataRequestNoTopics) - - request.Topics = []string{"topic1"} - testRequest(t, "one topic", request, metadataRequestOneTopic) - - request.Topics = []string{"foo", "bar", "baz"} - testRequest(t, "three topics", request, metadataRequestThreeTopics) -} diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go deleted file mode 100644 index f9d6a4271..000000000 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ /dev/null @@ -1,239 +0,0 @@ -package sarama - -type PartitionMetadata struct { - Err KError - ID int32 - Leader int32 - Replicas []int32 - Isr []int32 -} - -func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - pm.Err = KError(tmp) - - pm.ID, err = pd.getInt32() - if err != nil { - return err - } - - pm.Leader, err = pd.getInt32() - if err != nil { - return err - } - - pm.Replicas, err = pd.getInt32Array() - if err != nil { - return err - } - - pm.Isr, err = pd.getInt32Array() - if err != nil { - return err - } - - return nil -} - -func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(pm.Err)) - pe.putInt32(pm.ID) - pe.putInt32(pm.Leader) - - err = pe.putInt32Array(pm.Replicas) - if err != nil { - return err - } - - err = pe.putInt32Array(pm.Isr) - if err != nil { - return err - } - - return nil -} - -type TopicMetadata struct { - Err KError - Name string - Partitions []*PartitionMetadata -} - -func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - tm.Err = KError(tmp) - - tm.Name, err = pd.getString() - if err != nil { - return err - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - tm.Partitions = make([]*PartitionMetadata, n) - for i := 0; i < n; i++ { - tm.Partitions[i] = new(PartitionMetadata) - err = tm.Partitions[i].decode(pd) - if err != nil { - return err - } - } - - return nil -} - -func (tm *TopicMetadata) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(tm.Err)) - - err = pe.putString(tm.Name) - if err != nil { - return err - } - - err = pe.putArrayLength(len(tm.Partitions)) - if err != nil { - return err - } - - for _, pm := range tm.Partitions { - err = pm.encode(pe) - if err != nil { - return err - } - } - - return nil -} - -type MetadataResponse struct { - Brokers []*Broker - Topics []*TopicMetadata -} - -func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { - n, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Brokers = make([]*Broker, n) - for i := 0; i < n; i++ { - r.Brokers[i] = new(Broker) - err = r.Brokers[i].decode(pd) - if err != nil { - return err - } - } - - n, err = pd.getArrayLength() - if err != nil { - return err - } - - r.Topics = make([]*TopicMetadata, n) - for i := 0; i < n; i++ { - r.Topics[i] = new(TopicMetadata) - err = r.Topics[i].decode(pd) - if err != nil { - return err - } - } - - return nil -} - -func (r *MetadataResponse) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(r.Brokers)) - if err != nil { - return err - } - for _, broker := range r.Brokers { - err = broker.encode(pe) - if err != nil { - return err - } - } - - err = pe.putArrayLength(len(r.Topics)) - if err != nil { - return err - } - for _, tm := range r.Topics { - err = tm.encode(pe) - if err != nil { - return err - } - } - - return nil -} - -func (r *MetadataResponse) key() int16 { - return 3 -} - -func (r *MetadataResponse) version() int16 { - return 0 -} - -func (r *MetadataResponse) requiredVersion() KafkaVersion { - return minVersion -} - -// testing API - -func (r *MetadataResponse) AddBroker(addr string, id int32) { - r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) -} - -func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { - var tmatch *TopicMetadata - - for _, tm := range r.Topics { - if tm.Name == topic { - tmatch = tm - goto foundTopic - } - } - - tmatch = new(TopicMetadata) - tmatch.Name = topic - r.Topics = append(r.Topics, tmatch) - -foundTopic: - - tmatch.Err = err - return tmatch -} - -func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { - tmatch := r.AddTopic(topic, ErrNoError) - var pmatch *PartitionMetadata - - for _, pm := range tmatch.Partitions { - if pm.ID == partition { - pmatch = pm - goto foundPartition - } - } - - pmatch = new(PartitionMetadata) - pmatch.ID = partition - tmatch.Partitions = append(tmatch.Partitions, pmatch) - -foundPartition: - - pmatch.Leader = brokerID - pmatch.Replicas = replicas - pmatch.Isr = isr - pmatch.Err = err - -} diff --git a/vendor/github.com/Shopify/sarama/metadata_response_test.go b/vendor/github.com/Shopify/sarama/metadata_response_test.go deleted file mode 100644 index ea62a4f1b..000000000 --- a/vendor/github.com/Shopify/sarama/metadata_response_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyMetadataResponse = []byte{ - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - brokersNoTopicsMetadataResponse = []byte{ - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x00, 0xab, 0xff, - 0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', - 0x00, 0x00, 0x00, 0x33, - - 0x00, 0x01, 0x02, 0x03, - 0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm', - 0x00, 0x00, 0x01, 0x11, - - 0x00, 0x00, 0x00, 0x00} - - topicsNoBrokersMetadataResponse = []byte{ - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x00, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x04, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x07, - 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x00, - 0x00, 0x03, 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x00} -) - -func TestEmptyMetadataResponse(t *testing.T) { - response := MetadataResponse{} - - testVersionDecodable(t, "empty", &response, emptyMetadataResponse, 0) - if len(response.Brokers) != 0 { - t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") - } - if len(response.Topics) != 0 { - t.Error("Decoding produced", len(response.Topics), "topics where there were none!") - } -} - -func TestMetadataResponseWithBrokers(t *testing.T) { - response := MetadataResponse{} - - testVersionDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse, 0) - if len(response.Brokers) != 2 { - t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!") - } - - if response.Brokers[0].id != 0xabff { - t.Error("Decoding produced invalid broker 0 id.") - } - if response.Brokers[0].addr != "localhost:51" { - t.Error("Decoding produced invalid broker 0 address.") - } - if response.Brokers[1].id != 0x010203 { - t.Error("Decoding produced invalid broker 1 id.") - } - if response.Brokers[1].addr != "google.com:273" { - t.Error("Decoding produced invalid broker 1 address.") - } - - if len(response.Topics) != 0 { - t.Error("Decoding produced", len(response.Topics), "topics where there were none!") - } -} - -func TestMetadataResponseWithTopics(t *testing.T) { - response := MetadataResponse{} - - testVersionDecodable(t, "topics, no brokers", &response, topicsNoBrokersMetadataResponse, 0) - if len(response.Brokers) != 0 { - t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") - } - - if len(response.Topics) != 2 { - t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!") - } - - if response.Topics[0].Err != ErrNoError { - t.Error("Decoding produced invalid topic 0 error.") - } - - if response.Topics[0].Name != "foo" { - t.Error("Decoding produced invalid topic 0 name.") - } - - if len(response.Topics[0].Partitions) != 1 { - t.Fatal("Decoding produced invalid partition count for topic 0.") - } - - if response.Topics[0].Partitions[0].Err != ErrInvalidMessageSize { - t.Error("Decoding produced invalid topic 0 partition 0 error.") - } - - if response.Topics[0].Partitions[0].ID != 0x01 { - t.Error("Decoding produced invalid topic 0 partition 0 id.") - } - - if response.Topics[0].Partitions[0].Leader != 0x07 { - t.Error("Decoding produced invalid topic 0 partition 0 leader.") - } - - if len(response.Topics[0].Partitions[0].Replicas) != 3 { - t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.") - } - for i := 0; i < 3; i++ { - if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) { - t.Error("Decoding produced invalid topic 0 partition 0 replica", i) - } - } - - if len(response.Topics[0].Partitions[0].Isr) != 0 { - t.Error("Decoding produced invalid topic 0 partition 0 isr length.") - } - - if response.Topics[1].Err != ErrNoError { - t.Error("Decoding produced invalid topic 1 error.") - } - - if response.Topics[1].Name != "bar" { - t.Error("Decoding produced invalid topic 0 name.") - } - - if len(response.Topics[1].Partitions) != 0 { - t.Error("Decoding produced invalid partition count for topic 1.") - } -} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go deleted file mode 100644 index 36996a50c..000000000 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ /dev/null @@ -1,300 +0,0 @@ -package sarama - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "net" - "reflect" - "strconv" - "sync" - "time" - - "github.com/davecgh/go-spew/spew" -) - -const ( - expectationTimeout = 500 * time.Millisecond -) - -type requestHandlerFunc func(req *request) (res encoder) - -// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed -// to facilitate testing of higher level or specialized consumers and producers -// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, -// but rather provides a facility to do that. It takes care of the TCP -// transport, request unmarshaling, response marshaling, and makes it the test -// writer responsibility to program correct according to the Kafka API protocol -// MockBroker behaviour. -// -// MockBroker is implemented as a TCP server listening on a kernel-selected -// localhost port that can accept many connections. It reads Kafka requests -// from that connection and returns responses programmed by the SetHandlerByMap -// function. If a MockBroker receives a request that it has no programmed -// response for, then it returns nothing and the request times out. -// -// A set of MockRequest builders to define mappings used by MockBroker is -// provided by Sarama. But users can develop MockRequests of their own and use -// them along with or instead of the standard ones. -// -// When running tests with MockBroker it is strongly recommended to specify -// a timeout to `go test` so that if the broker hangs waiting for a response, -// the test panics. -// -// It is not necessary to prefix message length or correlation ID to your -// response bytes, the server does that automatically as a convenience. -type MockBroker struct { - brokerID int32 - port int32 - closing chan none - stopper chan none - expectations chan encoder - listener net.Listener - t TestReporter - latency time.Duration - handler requestHandlerFunc - history []RequestResponse - lock sync.Mutex -} - -// RequestResponse represents a Request/Response pair processed by MockBroker. -type RequestResponse struct { - Request protocolBody - Response encoder -} - -// SetLatency makes broker pause for the specified period every time before -// replying. -func (b *MockBroker) SetLatency(latency time.Duration) { - b.latency = latency -} - -// SetHandlerByMap defines mapping of Request types to MockResponses. When a -// request is received by the broker, it looks up the request type in the map -// and uses the found MockResponse instance to generate an appropriate reply. -// If the request type is not found in the map then nothing is sent. -func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { - b.setHandler(func(req *request) (res encoder) { - reqTypeName := reflect.TypeOf(req.body).Elem().Name() - mockResponse := handlerMap[reqTypeName] - if mockResponse == nil { - return nil - } - return mockResponse.For(req.body) - }) -} - -// BrokerID returns broker ID assigned to the broker. -func (b *MockBroker) BrokerID() int32 { - return b.brokerID -} - -// History returns a slice of RequestResponse pairs in the order they were -// processed by the broker. Note that in case of multiple connections to the -// broker the order expected by a test can be different from the order recorded -// in the history, unless some synchronization is implemented in the test. -func (b *MockBroker) History() []RequestResponse { - b.lock.Lock() - history := make([]RequestResponse, len(b.history)) - copy(history, b.history) - b.lock.Unlock() - return history -} - -// Port returns the TCP port number the broker is listening for requests on. -func (b *MockBroker) Port() int32 { - return b.port -} - -// Addr returns the broker connection string in the form "
:". -func (b *MockBroker) Addr() string { - return b.listener.Addr().String() -} - -// Close terminates the broker blocking until it stops internal goroutines and -// releases all resources. -func (b *MockBroker) Close() { - close(b.expectations) - if len(b.expectations) > 0 { - buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) - for e := range b.expectations { - _, _ = buf.WriteString(spew.Sdump(e)) - } - b.t.Error(buf.String()) - } - close(b.closing) - <-b.stopper -} - -// setHandler sets the specified function as the request handler. Whenever -// a mock broker reads a request from the wire it passes the request to the -// function and sends back whatever the handler function returns. -func (b *MockBroker) setHandler(handler requestHandlerFunc) { - b.lock.Lock() - b.handler = handler - b.lock.Unlock() -} - -func (b *MockBroker) serverLoop() { - defer close(b.stopper) - var err error - var conn net.Conn - - go func() { - <-b.closing - err := b.listener.Close() - if err != nil { - b.t.Error(err) - } - }() - - wg := &sync.WaitGroup{} - i := 0 - for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { - wg.Add(1) - go b.handleRequests(conn, i, wg) - i++ - } - wg.Wait() - Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) -} - -func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { - defer wg.Done() - defer func() { - _ = conn.Close() - }() - Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) - var err error - - abort := make(chan none) - defer close(abort) - go func() { - select { - case <-b.closing: - _ = conn.Close() - case <-abort: - } - }() - - resHeader := make([]byte, 8) - for { - req, err := decodeRequest(conn) - if err != nil { - Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) - b.serverError(err) - break - } - - if b.latency > 0 { - time.Sleep(b.latency) - } - - b.lock.Lock() - res := b.handler(req) - b.history = append(b.history, RequestResponse{req.body, res}) - b.lock.Unlock() - - if res == nil { - Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) - continue - } - Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) - - encodedRes, err := encode(res) - if err != nil { - b.serverError(err) - break - } - if len(encodedRes) == 0 { - continue - } - - binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) - binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) - if _, err = conn.Write(resHeader); err != nil { - b.serverError(err) - break - } - if _, err = conn.Write(encodedRes); err != nil { - b.serverError(err) - break - } - } - Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) -} - -func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { - select { - case res, ok := <-b.expectations: - if !ok { - return nil - } - return res - case <-time.After(expectationTimeout): - return nil - } -} - -func (b *MockBroker) serverError(err error) { - isConnectionClosedError := false - if _, ok := err.(*net.OpError); ok { - isConnectionClosedError = true - } else if err == io.EOF { - isConnectionClosedError = true - } else if err.Error() == "use of closed network connection" { - isConnectionClosedError = true - } - - if isConnectionClosedError { - return - } - - b.t.Errorf(err.Error()) -} - -// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the -// test framework and a channel of responses to use. If an error occurs it is -// simply logged to the TestReporter and the broker exits. -func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { - return NewMockBrokerAddr(t, brokerID, "localhost:0") -} - -// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give -// it rather than just some ephemeral port. -func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { - var err error - - broker := &MockBroker{ - closing: make(chan none), - stopper: make(chan none), - t: t, - brokerID: brokerID, - expectations: make(chan encoder, 512), - } - broker.handler = broker.defaultRequestHandler - - broker.listener, err = net.Listen("tcp", addr) - if err != nil { - t.Fatal(err) - } - Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) - _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) - if err != nil { - t.Fatal(err) - } - tmp, err := strconv.ParseInt(portStr, 10, 32) - if err != nil { - t.Fatal(err) - } - broker.port = int32(tmp) - - go broker.serverLoop() - - return broker -} - -func (b *MockBroker) Returns(e encoder) { - b.expectations <- e -} diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go deleted file mode 100644 index a20314209..000000000 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ /dev/null @@ -1,455 +0,0 @@ -package sarama - -import ( - "fmt" -) - -// TestReporter has methods matching go's testing.T to avoid importing -// `testing` in the main part of the library. -type TestReporter interface { - Error(...interface{}) - Errorf(string, ...interface{}) - Fatal(...interface{}) - Fatalf(string, ...interface{}) -} - -// MockResponse is a response builder interface it defines one method that -// allows generating a response based on a request body. MockResponses are used -// to program behavior of MockBroker in tests. -type MockResponse interface { - For(reqBody versionedDecoder) (res encoder) -} - -// MockWrapper is a mock response builder that returns a particular concrete -// response regardless of the actual request passed to the `For` method. -type MockWrapper struct { - res encoder -} - -func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { - return mw.res -} - -func NewMockWrapper(res encoder) *MockWrapper { - return &MockWrapper{res: res} -} - -// MockSequence is a mock response builder that is created from a sequence of -// concrete responses. Every time when a `MockBroker` calls its `For` method -// the next response from the sequence is returned. When the end of the -// sequence is reached the last element from the sequence is returned. -type MockSequence struct { - responses []MockResponse -} - -func NewMockSequence(responses ...interface{}) *MockSequence { - ms := &MockSequence{} - ms.responses = make([]MockResponse, len(responses)) - for i, res := range responses { - switch res := res.(type) { - case MockResponse: - ms.responses[i] = res - case encoder: - ms.responses[i] = NewMockWrapper(res) - default: - panic(fmt.Sprintf("Unexpected response type: %T", res)) - } - } - return ms -} - -func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { - res = mc.responses[0].For(reqBody) - if len(mc.responses) > 1 { - mc.responses = mc.responses[1:] - } - return res -} - -// MockMetadataResponse is a `MetadataResponse` builder. -type MockMetadataResponse struct { - leaders map[string]map[int32]int32 - brokers map[string]int32 - t TestReporter -} - -func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { - return &MockMetadataResponse{ - leaders: make(map[string]map[int32]int32), - brokers: make(map[string]int32), - t: t, - } -} - -func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { - partitions := mmr.leaders[topic] - if partitions == nil { - partitions = make(map[int32]int32) - mmr.leaders[topic] = partitions - } - partitions[partition] = brokerID - return mmr -} - -func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse { - mmr.brokers[addr] = brokerID - return mmr -} - -func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { - metadataRequest := reqBody.(*MetadataRequest) - metadataResponse := &MetadataResponse{} - for addr, brokerID := range mmr.brokers { - metadataResponse.AddBroker(addr, brokerID) - } - if len(metadataRequest.Topics) == 0 { - for topic, partitions := range mmr.leaders { - for partition, brokerID := range partitions { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) - } - } - return metadataResponse - } - for _, topic := range metadataRequest.Topics { - for partition, brokerID := range mmr.leaders[topic] { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) - } - } - return metadataResponse -} - -// MockOffsetResponse is an `OffsetResponse` builder. -type MockOffsetResponse struct { - offsets map[string]map[int32]map[int64]int64 - t TestReporter -} - -func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { - return &MockOffsetResponse{ - offsets: make(map[string]map[int32]map[int64]int64), - t: t, - } -} - -func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { - partitions := mor.offsets[topic] - if partitions == nil { - partitions = make(map[int32]map[int64]int64) - mor.offsets[topic] = partitions - } - times := partitions[partition] - if times == nil { - times = make(map[int64]int64) - partitions[partition] = times - } - times[time] = offset - return mor -} - -func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { - offsetRequest := reqBody.(*OffsetRequest) - offsetResponse := &OffsetResponse{} - for topic, partitions := range offsetRequest.blocks { - for partition, block := range partitions { - offset := mor.getOffset(topic, partition, block.time) - offsetResponse.AddTopicPartition(topic, partition, offset) - } - } - return offsetResponse -} - -func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { - partitions := mor.offsets[topic] - if partitions == nil { - mor.t.Errorf("missing topic: %s", topic) - } - times := partitions[partition] - if times == nil { - mor.t.Errorf("missing partition: %d", partition) - } - offset, ok := times[time] - if !ok { - mor.t.Errorf("missing time: %d", time) - } - return offset -} - -// MockFetchResponse is a `FetchResponse` builder. -type MockFetchResponse struct { - messages map[string]map[int32]map[int64]Encoder - highWaterMarks map[string]map[int32]int64 - t TestReporter - batchSize int -} - -func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { - return &MockFetchResponse{ - messages: make(map[string]map[int32]map[int64]Encoder), - highWaterMarks: make(map[string]map[int32]int64), - t: t, - batchSize: batchSize, - } -} - -func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { - partitions := mfr.messages[topic] - if partitions == nil { - partitions = make(map[int32]map[int64]Encoder) - mfr.messages[topic] = partitions - } - messages := partitions[partition] - if messages == nil { - messages = make(map[int64]Encoder) - partitions[partition] = messages - } - messages[offset] = msg - return mfr -} - -func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse { - partitions := mfr.highWaterMarks[topic] - if partitions == nil { - partitions = make(map[int32]int64) - mfr.highWaterMarks[topic] = partitions - } - partitions[partition] = offset - return mfr -} - -func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { - fetchRequest := reqBody.(*FetchRequest) - res := &FetchResponse{} - for topic, partitions := range fetchRequest.blocks { - for partition, block := range partitions { - initialOffset := block.fetchOffset - offset := initialOffset - maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) - for i := 0; i < mfr.batchSize && offset < maxOffset; { - msg := mfr.getMessage(topic, partition, offset) - if msg != nil { - res.AddMessage(topic, partition, nil, msg, offset) - i++ - } - offset++ - } - fb := res.GetBlock(topic, partition) - if fb == nil { - res.AddError(topic, partition, ErrNoError) - fb = res.GetBlock(topic, partition) - } - fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) - } - } - return res -} - -func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { - partitions := mfr.messages[topic] - if partitions == nil { - return nil - } - messages := partitions[partition] - if messages == nil { - return nil - } - return messages[offset] -} - -func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { - partitions := mfr.messages[topic] - if partitions == nil { - return 0 - } - messages := partitions[partition] - if messages == nil { - return 0 - } - return len(messages) -} - -func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { - partitions := mfr.highWaterMarks[topic] - if partitions == nil { - return 0 - } - return partitions[partition] -} - -// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. -type MockConsumerMetadataResponse struct { - coordinators map[string]interface{} - t TestReporter -} - -func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse { - return &MockConsumerMetadataResponse{ - coordinators: make(map[string]interface{}), - t: t, - } -} - -func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse { - mr.coordinators[group] = broker - return mr -} - -func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse { - mr.coordinators[group] = kerror - return mr -} - -func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { - req := reqBody.(*ConsumerMetadataRequest) - group := req.ConsumerGroup - res := &ConsumerMetadataResponse{} - v := mr.coordinators[group] - switch v := v.(type) { - case *MockBroker: - res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} - case KError: - res.Err = v - } - return res -} - -// MockOffsetCommitResponse is a `OffsetCommitResponse` builder. -type MockOffsetCommitResponse struct { - errors map[string]map[string]map[int32]KError - t TestReporter -} - -func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse { - return &MockOffsetCommitResponse{t: t} -} - -func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse { - if mr.errors == nil { - mr.errors = make(map[string]map[string]map[int32]KError) - } - topics := mr.errors[group] - if topics == nil { - topics = make(map[string]map[int32]KError) - mr.errors[group] = topics - } - partitions := topics[topic] - if partitions == nil { - partitions = make(map[int32]KError) - topics[topic] = partitions - } - partitions[partition] = kerror - return mr -} - -func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { - req := reqBody.(*OffsetCommitRequest) - group := req.ConsumerGroup - res := &OffsetCommitResponse{} - for topic, partitions := range req.blocks { - for partition := range partitions { - res.AddError(topic, partition, mr.getError(group, topic, partition)) - } - } - return res -} - -func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError { - topics := mr.errors[group] - if topics == nil { - return ErrNoError - } - partitions := topics[topic] - if partitions == nil { - return ErrNoError - } - kerror, ok := partitions[partition] - if !ok { - return ErrNoError - } - return kerror -} - -// MockProduceResponse is a `ProduceResponse` builder. -type MockProduceResponse struct { - errors map[string]map[int32]KError - t TestReporter -} - -func NewMockProduceResponse(t TestReporter) *MockProduceResponse { - return &MockProduceResponse{t: t} -} - -func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { - if mr.errors == nil { - mr.errors = make(map[string]map[int32]KError) - } - partitions := mr.errors[topic] - if partitions == nil { - partitions = make(map[int32]KError) - mr.errors[topic] = partitions - } - partitions[partition] = kerror - return mr -} - -func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { - req := reqBody.(*ProduceRequest) - res := &ProduceResponse{} - for topic, partitions := range req.msgSets { - for partition := range partitions { - res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) - } - } - return res -} - -func (mr *MockProduceResponse) getError(topic string, partition int32) KError { - partitions := mr.errors[topic] - if partitions == nil { - return ErrNoError - } - kerror, ok := partitions[partition] - if !ok { - return ErrNoError - } - return kerror -} - -// MockOffsetFetchResponse is a `OffsetFetchResponse` builder. -type MockOffsetFetchResponse struct { - offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock - t TestReporter -} - -func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse { - return &MockOffsetFetchResponse{t: t} -} - -func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse { - if mr.offsets == nil { - mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) - } - topics := mr.offsets[group] - if topics == nil { - topics = make(map[string]map[int32]*OffsetFetchResponseBlock) - mr.offsets[group] = topics - } - partitions := topics[topic] - if partitions == nil { - partitions = make(map[int32]*OffsetFetchResponseBlock) - topics[topic] = partitions - } - partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} - return mr -} - -func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { - req := reqBody.(*OffsetFetchRequest) - group := req.ConsumerGroup - res := &OffsetFetchResponse{} - for topic, partitions := range mr.offsets[group] { - for partition, block := range partitions { - res.AddBlock(topic, partition, block) - } - } - return res -} diff --git a/vendor/github.com/Shopify/sarama/mocks/README.md b/vendor/github.com/Shopify/sarama/mocks/README.md deleted file mode 100644 index 55a6c2e61..000000000 --- a/vendor/github.com/Shopify/sarama/mocks/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# sarama/mocks - -The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. -You can use them to test your sarama applications using dependency injection. - -The following mock objects are available: - -- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks. -- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer) -- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer) - -The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, -and the results will be reported to the `*testing.T` object you provided when creating the mock. diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer.go b/vendor/github.com/Shopify/sarama/mocks/async_producer.go deleted file mode 100644 index d1d9ba416..000000000 --- a/vendor/github.com/Shopify/sarama/mocks/async_producer.go +++ /dev/null @@ -1,174 +0,0 @@ -package mocks - -import ( - "sync" - - "github.com/Shopify/sarama" -) - -// AsyncProducer implements sarama's Producer interface for testing purposes. -// Before you can send messages to it's Input channel, you have to set expectations -// so it knows how to handle the input; it returns an error if the number of messages -// received is bigger then the number of expectations set. You can also set a -// function in each expectation so that the message value is checked by this function -// and an error is returned if the match fails. -type AsyncProducer struct { - l sync.Mutex - t ErrorReporter - expectations []*producerExpectation - closed chan struct{} - input chan *sarama.ProducerMessage - successes chan *sarama.ProducerMessage - errors chan *sarama.ProducerError - lastOffset int64 -} - -// NewAsyncProducer instantiates a new Producer mock. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is used to determine whether it -// should ack successes on the Successes channel. -func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { - if config == nil { - config = sarama.NewConfig() - } - mp := &AsyncProducer{ - t: t, - closed: make(chan struct{}, 0), - expectations: make([]*producerExpectation, 0), - input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), - successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), - errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), - } - - go func() { - defer func() { - close(mp.successes) - close(mp.errors) - }() - - for msg := range mp.input { - mp.l.Lock() - if mp.expectations == nil || len(mp.expectations) == 0 { - mp.expectations = nil - mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") - } else { - expectation := mp.expectations[0] - mp.expectations = mp.expectations[1:] - if expectation.CheckFunction != nil { - if val, err := msg.Value.Encode(); err != nil { - mp.t.Errorf("Input message encoding failed: %s", err.Error()) - mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} - } else { - err = expectation.CheckFunction(val) - if err != nil { - mp.t.Errorf("Check function returned an error: %s", err.Error()) - mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} - } - } - } - if expectation.Result == errProduceSuccess { - mp.lastOffset++ - if config.Producer.Return.Successes { - msg.Offset = mp.lastOffset - mp.successes <- msg - } - } else { - if config.Producer.Return.Errors { - mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} - } - } - } - mp.l.Unlock() - } - - mp.l.Lock() - if len(mp.expectations) > 0 { - mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) - } - mp.l.Unlock() - - close(mp.closed) - }() - - return mp -} - -//////////////////////////////////////////////// -// Implement Producer interface -//////////////////////////////////////////////// - -// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. -// By closing a mock producer, you also tell it that no more input will be provided, so it will -// write an error to the test state if there's any remaining expectations. -func (mp *AsyncProducer) AsyncClose() { - close(mp.input) -} - -// Close corresponds with the Close method of sarama's Producer implementation. -// By closing a mock producer, you also tell it that no more input will be provided, so it will -// write an error to the test state if there's any remaining expectations. -func (mp *AsyncProducer) Close() error { - mp.AsyncClose() - <-mp.closed - return nil -} - -// Input corresponds with the Input method of sarama's Producer implementation. -// You have to set expectations on the mock producer before writing messages to the Input -// channel, so it knows how to handle them. If there is no more remaining expectations and -// a messages is written to the Input channel, the mock producer will write an error to the test -// state object. -func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { - return mp.input -} - -// Successes corresponds with the Successes method of sarama's Producer implementation. -func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { - return mp.successes -} - -// Errors corresponds with the Errors method of sarama's Producer implementation. -func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { - return mp.errors -} - -//////////////////////////////////////////////// -// Setting expectations -//////////////////////////////////////////////// - -// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message -// will be provided on the input channel. The mock producer will call the given function to check -// the message value. If an error is returned it will be made available on the Errors channel -// otherwise the mock will handle the message as if it produced successfully, i.e. it will make -// it available on the Successes channel if the Producer.Return.Successes setting is set to true. -func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) { - mp.l.Lock() - defer mp.l.Unlock() - mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) -} - -// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message -// will be provided on the input channel. The mock producer will first call the given function to -// check the message value. If an error is returned it will be made available on the Errors channel -// otherwise the mock will handle the message as if it failed to produce successfully. This means -// it will make a ProducerError available on the Errors channel. -func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) { - mp.l.Lock() - defer mp.l.Unlock() - mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) -} - -// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided -// on the input channel. The mock producer will handle the message as if it is produced successfully, -// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting -// is set to true. -func (mp *AsyncProducer) ExpectInputAndSucceed() { - mp.ExpectInputWithCheckerFunctionAndSucceed(nil) -} - -// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided -// on the input channel. The mock producer will handle the message as if it failed to produce -// successfully. This means it will make a ProducerError available on the Errors channel. -func (mp *AsyncProducer) ExpectInputAndFail(err error) { - mp.ExpectInputWithCheckerFunctionAndFail(nil, err) -} diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go b/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go deleted file mode 100644 index b5d92aad8..000000000 --- a/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package mocks - -import ( - "errors" - "fmt" - "regexp" - "strings" - "testing" - - "github.com/Shopify/sarama" -) - -func generateRegexpChecker(re string) func([]byte) error { - return func(val []byte) error { - matched, err := regexp.MatchString(re, string(val)) - if err != nil { - return errors.New("Error while trying to match the input message with the expected pattern: " + err.Error()) - } - if !matched { - return fmt.Errorf("No match between input value \"%s\" and expected pattern \"%s\"", val, re) - } - return nil - } -} - -type testReporterMock struct { - errors []string -} - -func newTestReporterMock() *testReporterMock { - return &testReporterMock{errors: make([]string, 0)} -} - -func (trm *testReporterMock) Errorf(format string, args ...interface{}) { - trm.errors = append(trm.errors, fmt.Sprintf(format, args...)) -} - -func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) { - var mp interface{} = &AsyncProducer{} - if _, ok := mp.(sarama.AsyncProducer); !ok { - t.Error("The mock producer should implement the sarama.Producer interface.") - } -} - -func TestProducerReturnsExpectationsToChannels(t *testing.T) { - config := sarama.NewConfig() - config.Producer.Return.Successes = true - mp := NewAsyncProducer(t, config) - - mp.ExpectInputAndSucceed() - mp.ExpectInputAndSucceed() - mp.ExpectInputAndFail(sarama.ErrOutOfBrokers) - - mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"} - mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"} - mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"} - - msg1 := <-mp.Successes() - msg2 := <-mp.Successes() - err1 := <-mp.Errors() - - if msg1.Topic != "test 1" { - t.Error("Expected message 1 to be returned first") - } - - if msg2.Topic != "test 2" { - t.Error("Expected message 2 to be returned second") - } - - if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers { - t.Error("Expected message 3 to be returned as error") - } - - if err := mp.Close(); err != nil { - t.Error(err) - } -} - -func TestProducerWithTooFewExpectations(t *testing.T) { - trm := newTestReporterMock() - mp := NewAsyncProducer(trm, nil) - mp.ExpectInputAndSucceed() - - mp.Input() <- &sarama.ProducerMessage{Topic: "test"} - mp.Input() <- &sarama.ProducerMessage{Topic: "test"} - - if err := mp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestProducerWithTooManyExpectations(t *testing.T) { - trm := newTestReporterMock() - mp := NewAsyncProducer(trm, nil) - mp.ExpectInputAndSucceed() - mp.ExpectInputAndFail(sarama.ErrOutOfBrokers) - - mp.Input() <- &sarama.ProducerMessage{Topic: "test"} - if err := mp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestProducerWithCheckerFunction(t *testing.T) { - trm := newTestReporterMock() - mp := NewAsyncProducer(trm, nil) - mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) - mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$")) - - mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if err := mp.Close(); err != nil { - t.Error(err) - } - - if len(mp.Errors()) != 1 { - t.Error("Expected to report an error") - } - - err1 := <-mp.Errors() - if !strings.HasPrefix(err1.Err.Error(), "No match") { - t.Error("Expected to report a value check error, found: ", err1.Err) - } -} diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer.go b/vendor/github.com/Shopify/sarama/mocks/consumer.go deleted file mode 100644 index 09657c0ee..000000000 --- a/vendor/github.com/Shopify/sarama/mocks/consumer.go +++ /dev/null @@ -1,299 +0,0 @@ -package mocks - -import ( - "sync" - "sync/atomic" - - "github.com/Shopify/sarama" -) - -// Consumer implements sarama's Consumer interface for testing purposes. -// Before you can start consuming from this consumer, you have to register -// topic/partitions using ExpectConsumePartition, and set expectations on them. -type Consumer struct { - l sync.Mutex - t ErrorReporter - config *sarama.Config - partitionConsumers map[string]map[int32]*PartitionConsumer - metadata map[string][]int32 -} - -// NewConsumer returns a new mock Consumer instance. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is currently unused and can be set to nil. -func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { - if config == nil { - config = sarama.NewConfig() - } - - c := &Consumer{ - t: t, - config: config, - partitionConsumers: make(map[string]map[int32]*PartitionConsumer), - } - return c -} - -/////////////////////////////////////////////////// -// Consumer interface implementation -/////////////////////////////////////////////////// - -// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. -// Before you can start consuming a partition, you have to set expectations on it using -// ExpectConsumePartition. You can only consume a partition once per consumer. -func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { - c.t.Errorf("No expectations set for %s/%d", topic, partition) - return nil, errOutOfExpectations - } - - pc := c.partitionConsumers[topic][partition] - if pc.consumed { - return nil, sarama.ConfigurationError("The topic/partition is already being consumed") - } - - if pc.offset != AnyOffset && pc.offset != offset { - c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) - } - - pc.consumed = true - return pc, nil -} - -// Topics returns a list of topics, as registered with SetMetadata -func (c *Consumer) Topics() ([]string, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.metadata == nil { - c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.") - return nil, sarama.ErrOutOfBrokers - } - - var result []string - for topic := range c.metadata { - result = append(result, topic) - } - return result, nil -} - -// Partitions returns the list of parititons for the given topic, as registered with SetMetadata -func (c *Consumer) Partitions(topic string) ([]int32, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.metadata == nil { - c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.") - return nil, sarama.ErrOutOfBrokers - } - if c.metadata[topic] == nil { - return nil, sarama.ErrUnknownTopicOrPartition - } - - return c.metadata[topic], nil -} - -// Close implements the Close method from the sarama.Consumer interface. It will close -// all registered PartitionConsumer instances. -func (c *Consumer) Close() error { - c.l.Lock() - defer c.l.Unlock() - - for _, partitions := range c.partitionConsumers { - for _, partitionConsumer := range partitions { - _ = partitionConsumer.Close() - } - } - - return nil -} - -/////////////////////////////////////////////////// -// Expectation API -/////////////////////////////////////////////////// - -// SetTopicMetadata sets the clusters topic/partition metadata, -// which will be returned by Topics() and Partitions(). -func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { - c.l.Lock() - defer c.l.Unlock() - - c.metadata = metadata -} - -// ExpectConsumePartition will register a topic/partition, so you can set expectations on it. -// The registered PartitionConsumer will be returned, so you can set expectations -// on it using method chaining. Once a topic/partition is registered, you are -// expected to start consuming it using ConsumePartition. If that doesn't happen, -// an error will be written to the error reporter once the mock consumer is closed. It will -// also expect that the -func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { - c.l.Lock() - defer c.l.Unlock() - - if c.partitionConsumers[topic] == nil { - c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) - } - - if c.partitionConsumers[topic][partition] == nil { - c.partitionConsumers[topic][partition] = &PartitionConsumer{ - t: c.t, - topic: topic, - partition: partition, - offset: offset, - messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), - errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), - } - } - - return c.partitionConsumers[topic][partition] -} - -/////////////////////////////////////////////////// -// PartitionConsumer mock type -/////////////////////////////////////////////////// - -// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. -// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is -// registered first using the Consumer's ExpectConsumePartition method. Before consuming the -// Errors and Messages channel, you should specify what values will be provided on these -// channels using YieldMessage and YieldError. -type PartitionConsumer struct { - l sync.Mutex - t ErrorReporter - topic string - partition int32 - offset int64 - messages chan *sarama.ConsumerMessage - errors chan *sarama.ConsumerError - singleClose sync.Once - consumed bool - errorsShouldBeDrained bool - messagesShouldBeDrained bool - highWaterMarkOffset int64 -} - -/////////////////////////////////////////////////// -// PartitionConsumer interface implementation -/////////////////////////////////////////////////// - -// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) AsyncClose() { - pc.singleClose.Do(func() { - close(pc.messages) - close(pc.errors) - }) -} - -// Close implements the Close method from the sarama.PartitionConsumer interface. It will -// verify whether the partition consumer was actually started. -func (pc *PartitionConsumer) Close() error { - if !pc.consumed { - pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) - return errPartitionConsumerNotStarted - } - - if pc.errorsShouldBeDrained && len(pc.errors) > 0 { - pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) - } - - if pc.messagesShouldBeDrained && len(pc.messages) > 0 { - pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) - } - - pc.AsyncClose() - - var ( - closeErr error - wg sync.WaitGroup - ) - - wg.Add(1) - go func() { - defer wg.Done() - - var errs = make(sarama.ConsumerErrors, 0) - for err := range pc.errors { - errs = append(errs, err) - } - - if len(errs) > 0 { - closeErr = errs - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for _ = range pc.messages { - // drain - } - }() - - wg.Wait() - return closeErr -} - -// Errors implements the Errors method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { - return pc.errors -} - -// Messages implements the Messages method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { - return pc.messages -} - -func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { - return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1 -} - -/////////////////////////////////////////////////// -// Expectation API -/////////////////////////////////////////////////// - -// YieldMessage will yield a messages Messages channel of this partition consumer -// when it is consumed. By default, the mock consumer will not verify whether this -// message was consumed from the Messages channel, because there are legitimate -// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will -// verify that the channel is empty on close. -func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) { - pc.l.Lock() - defer pc.l.Unlock() - - msg.Topic = pc.topic - msg.Partition = pc.partition - msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1) - - pc.messages <- msg -} - -// YieldError will yield an error on the Errors channel of this partition consumer -// when it is consumed. By default, the mock consumer will not verify whether this error was -// consumed from the Errors channel, because there are legitimate reasons for this -// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that -// the channel is empty on close. -func (pc *PartitionConsumer) YieldError(err error) { - pc.errors <- &sarama.ConsumerError{ - Topic: pc.topic, - Partition: pc.partition, - Err: err, - } -} - -// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer -// that the messages channel will be fully drained when Close is called. If this -// expectation is not met, an error is reported to the error reporter. -func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() { - pc.messagesShouldBeDrained = true -} - -// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer -// that the errors channel will be fully drained when Close is called. If this -// expectation is not met, an error is reported to the error reporter. -func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() { - pc.errorsShouldBeDrained = true -} diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer_test.go b/vendor/github.com/Shopify/sarama/mocks/consumer_test.go deleted file mode 100644 index 50dad3a69..000000000 --- a/vendor/github.com/Shopify/sarama/mocks/consumer_test.go +++ /dev/null @@ -1,249 +0,0 @@ -package mocks - -import ( - "sort" - "testing" - - "github.com/Shopify/sarama" -) - -func TestMockConsumerImplementsConsumerInterface(t *testing.T) { - var c interface{} = &Consumer{} - if _, ok := c.(sarama.Consumer); !ok { - t.Error("The mock consumer should implement the sarama.Consumer interface.") - } - - var pc interface{} = &PartitionConsumer{} - if _, ok := pc.(sarama.PartitionConsumer); !ok { - t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.") - } -} - -func TestConsumerHandlesExpectations(t *testing.T) { - consumer := NewConsumer(t, nil) - defer func() { - if err := consumer.Close(); err != nil { - t.Error(err) - } - }() - - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) - consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")}) - consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")}) - - pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Fatal(err) - } - test0_msg := <-pc_test0.Messages() - if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" { - t.Error("Message was not as expected:", test0_msg) - } - test0_err := <-pc_test0.Errors() - if test0_err.Err != sarama.ErrOutOfBrokers { - t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err) - } - - pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) - if err != nil { - t.Fatal(err) - } - test1_msg := <-pc_test1.Messages() - if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" { - t.Error("Message was not as expected:", test1_msg) - } - - pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest) - if err != nil { - t.Fatal(err) - } - other0_msg := <-pc_other0.Messages() - if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" { - t.Error("Message was not as expected:", other0_msg) - } -} - -func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) { - consumer := NewConsumer(t, nil) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) - - pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Fatal(err) - } - - select { - case <-pc.Messages(): - t.Error("Did not epxect a message on the messages channel.") - case err := <-pc.Errors(): - if err.Err != sarama.ErrOutOfBrokers { - t.Error("Expected sarama.ErrOutOfBrokers, found", err) - } - } - - errs := pc.Close().(sarama.ConsumerErrors) - if len(errs) != 1 && errs[0].Err != sarama.ErrOutOfBrokers { - t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers") - } -} - -func TestConsumerWithoutExpectationsOnPartition(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - _, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) - if err != errOutOfExpectations { - t.Error("Expected ConsumePartition to return errOutOfExpectations") - } - - if err := consumer.Close(); err != nil { - t.Error("No error expected on close, but found:", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} - -func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) - - if err := consumer.Close(); err != nil { - t.Error("No error expected on close, but found:", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} - -func TestConsumerWithWrongOffsetExpectation(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) - - _, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest) - if err != nil { - t.Error("Did not expect error, found:", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } - - if err := consumer.Close(); err != nil { - t.Error(err) - } -} - -func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) - pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) - pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) - pcmock.ExpectMessagesDrainedOnClose() - - pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Error(err) - } - - // consume first message, not second one - <-pc.Messages() - - if err := consumer.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} - -func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) - pcmock.YieldError(sarama.ErrInvalidMessage) - pcmock.YieldError(sarama.ErrInvalidMessage) - pcmock.ExpectErrorsDrainedOnClose() - - pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Error(err) - } - - // consume first and second error, - <-pc.Errors() - <-pc.Errors() - - if err := consumer.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 0 { - t.Errorf("Expected no expectation failures to be set on the error reporter.") - } -} - -func TestConsumerTopicMetadata(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - consumer.SetTopicMetadata(map[string][]int32{ - "test1": []int32{0, 1, 2, 3}, - "test2": []int32{0, 1, 2, 3, 4, 5, 6, 7}, - }) - - topics, err := consumer.Topics() - if err != nil { - t.Error(t) - } - - sortedTopics := sort.StringSlice(topics) - sortedTopics.Sort() - if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" { - t.Error("Unexpected topics returned:", sortedTopics) - } - - partitions1, err := consumer.Partitions("test1") - if err != nil { - t.Error(t) - } - - if len(partitions1) != 4 { - t.Error("Unexpected partitions returned:", len(partitions1)) - } - - partitions2, err := consumer.Partitions("test2") - if err != nil { - t.Error(t) - } - - if len(partitions2) != 8 { - t.Error("Unexpected partitions returned:", len(partitions2)) - } - - if len(trm.errors) != 0 { - t.Errorf("Expected no expectation failures to be set on the error reporter.") - } -} - -func TestConsumerUnexpectedTopicMetadata(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - if _, err := consumer.Topics(); err != sarama.ErrOutOfBrokers { - t.Error("Expected sarama.ErrOutOfBrokers, found", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} diff --git a/vendor/github.com/Shopify/sarama/mocks/mocks.go b/vendor/github.com/Shopify/sarama/mocks/mocks.go deleted file mode 100644 index 4adb838d9..000000000 --- a/vendor/github.com/Shopify/sarama/mocks/mocks.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Package mocks provides mocks that can be used for testing applications -that use Sarama. The mock types provided by this package implement the -interfaces Sarama exports, so you can use them for dependency injection -in your tests. - -All mock instances require you to set expectations on them before you -can use them. It will determine how the mock will behave. If an -expectation is not met, it will make your test fail. - -NOTE: this package currently does not fall under the API stability -guarantee of Sarama as it is still considered experimental. -*/ -package mocks - -import ( - "errors" - - "github.com/Shopify/sarama" -) - -// ErrorReporter is a simple interface that includes the testing.T methods we use to report -// expectation violations when using the mock objects. -type ErrorReporter interface { - Errorf(string, ...interface{}) -} - -// ValueChecker is a function type to be set in each expectation of the producer mocks -// to check the value passed. -type ValueChecker func(val []byte) error - -var ( - errProduceSuccess error = nil - errOutOfExpectations = errors.New("No more expectations set on mock") - errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") -) - -const AnyOffset int64 = -1000 - -type producerExpectation struct { - Result error - CheckFunction ValueChecker -} - -type consumerExpectation struct { - Err error - Msg *sarama.ConsumerMessage -} diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go deleted file mode 100644 index 2ac7b5c32..000000000 --- a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go +++ /dev/null @@ -1,148 +0,0 @@ -package mocks - -import ( - "sync" - - "github.com/Shopify/sarama" -) - -// SyncProducer implements sarama's SyncProducer interface for testing purposes. -// Before you can use it, you have to set expectations on the mock SyncProducer -// to tell it how to handle calls to SendMessage, so you can easily test success -// and failure scenarios. -type SyncProducer struct { - l sync.Mutex - t ErrorReporter - expectations []*producerExpectation - lastOffset int64 -} - -// NewSyncProducer instantiates a new SyncProducer mock. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is currently unused, but is -// maintained to be compatible with the async Producer. -func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { - return &SyncProducer{ - t: t, - expectations: make([]*producerExpectation, 0), - } -} - -//////////////////////////////////////////////// -// Implement SyncProducer interface -//////////////////////////////////////////////// - -// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. -// You have to set expectations on the mock producer before calling SendMessage, so it knows -// how to handle them. You can set a function in each expectation so that the message value -// checked by this function and an error is returned if the match fails. -// If there is no more remaining expectation when SendMessage is called, -// the mock producer will write an error to the test state object. -func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) > 0 { - expectation := sp.expectations[0] - sp.expectations = sp.expectations[1:] - if expectation.CheckFunction != nil { - if val, err := msg.Value.Encode(); err != nil { - sp.t.Errorf("Input message encoding failed: %s", err.Error()) - return -1, -1, err - } else { - err := expectation.CheckFunction(val) - if err != nil { - sp.t.Errorf("Check function returned an error: %s", err.Error()) - return -1, -1, err - } - } - } - if expectation.Result == errProduceSuccess { - sp.lastOffset++ - msg.Offset = sp.lastOffset - return 0, msg.Offset, nil - } else { - return -1, -1, expectation.Result - } - } else { - sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") - return -1, -1, errOutOfExpectations - } -} - -// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation. -// You have to set expectations on the mock producer before calling SendMessages, so it knows -// how to handle them. If there is no more remaining expectations when SendMessages is called, -// the mock producer will write an error to the test state object. -func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) >= len(msgs) { - expectations := sp.expectations[0 : len(msgs)-1] - sp.expectations = sp.expectations[len(msgs):] - - for _, expectation := range expectations { - if expectation.Result != errProduceSuccess { - return expectation.Result - } - - } - return nil - } else { - sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") - return errOutOfExpectations - } -} - -// Close corresponds with the Close method of sarama's SyncProducer implementation. -// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, -// so it will write an error to the test state if there's any remaining expectations. -func (sp *SyncProducer) Close() error { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) > 0 { - sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) - } - - return nil -} - -//////////////////////////////////////////////// -// Setting expectations -//////////////////////////////////////////////// - -// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage -// will be called. The mock producer will first call the given function to check the message value. -// It will cascade the error of the function, if any, or handle the message as if it produced -// successfully, i.e. by returning a valid partition, and offset, and a nil error. -func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) { - sp.l.Lock() - defer sp.l.Unlock() - sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) -} - -// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will first call the given function to check the message value. -// It will cascade the error of the function, if any, or handle the message as if it failed -// to produce successfully, i.e. by returning the provided error. -func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) { - sp.l.Lock() - defer sp.l.Unlock() - sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) -} - -// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will handle the message as if it produced successfully, i.e. by -// returning a valid partition, and offset, and a nil error. -func (sp *SyncProducer) ExpectSendMessageAndSucceed() { - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(nil) -} - -// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will handle the message as if it failed to produce -// successfully, i.e. by returning the provided error. -func (sp *SyncProducer) ExpectSendMessageAndFail(err error) { - sp.ExpectSendMessageWithCheckerFunctionAndFail(nil, err) -} diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go deleted file mode 100644 index 0fdc99877..000000000 --- a/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package mocks - -import ( - "strings" - "testing" - - "github.com/Shopify/sarama" -) - -func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) { - var mp interface{} = &SyncProducer{} - if _, ok := mp.(sarama.SyncProducer); !ok { - t.Error("The mock async producer should implement the sarama.SyncProducer interface.") - } -} - -func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) { - sp := NewSyncProducer(t, nil) - defer func() { - if err := sp.Close(); err != nil { - t.Error(err) - } - }() - - sp.ExpectSendMessageAndSucceed() - sp.ExpectSendMessageAndSucceed() - sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - - _, offset, err := sp.SendMessage(msg) - if err != nil { - t.Errorf("The first message should have been produced successfully, but got %s", err) - } - if offset != 1 || offset != msg.Offset { - t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset) - } - - _, offset, err = sp.SendMessage(msg) - if err != nil { - t.Errorf("The second message should have been produced successfully, but got %s", err) - } - if offset != 2 || offset != msg.Offset { - t.Errorf("The second message should have been assigned offset 2, but got %d", offset) - } - - _, _, err = sp.SendMessage(msg) - if err != sarama.ErrOutOfBrokers { - t.Errorf("The third message should not have been produced successfully") - } - - if err := sp.Close(); err != nil { - t.Error(err) - } -} - -func TestSyncProducerWithTooManyExpectations(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageAndSucceed() - sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if _, _, err := sp.SendMessage(msg); err != nil { - t.Error("No error expected on first SendMessage call", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestSyncProducerWithTooFewExpectations(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageAndSucceed() - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if _, _, err := sp.SendMessage(msg); err != nil { - t.Error("No error expected on first SendMessage call", err) - } - if _, _, err := sp.SendMessage(msg); err != errOutOfExpectations { - t.Error("errOutOfExpectations expected on second SendMessage call, found:", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestSyncProducerWithCheckerFunction(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$")) - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if _, _, err := sp.SendMessage(msg); err != nil { - t.Error("No error expected on first SendMessage call, found: ", err) - } - msg = &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if _, _, err := sp.SendMessage(msg); err == nil || !strings.HasPrefix(err.Error(), "No match") { - t.Error("Error during value check expected on second SendMessage call, found:", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go deleted file mode 100644 index b21ea634b..000000000 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ /dev/null @@ -1,190 +0,0 @@ -package sarama - -// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which -// tells the broker to set the timestamp to the time at which the request was received. -// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. -const ReceiveTime int64 = -1 - -// GroupGenerationUndefined is a special value for the group generation field of -// Offset Commit Requests that should be used when a consumer group does not rely -// on Kafka for partition management. -const GroupGenerationUndefined = -1 - -type offsetCommitRequestBlock struct { - offset int64 - timestamp int64 - metadata string -} - -func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { - pe.putInt64(b.offset) - if version == 1 { - pe.putInt64(b.timestamp) - } else if b.timestamp != 0 { - Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") - } - - return pe.putString(b.metadata) -} - -func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { - if b.offset, err = pd.getInt64(); err != nil { - return err - } - if version == 1 { - if b.timestamp, err = pd.getInt64(); err != nil { - return err - } - } - b.metadata, err = pd.getString() - return err -} - -type OffsetCommitRequest struct { - ConsumerGroup string - ConsumerGroupGeneration int32 // v1 or later - ConsumerID string // v1 or later - RetentionTime int64 // v2 or later - - // Version can be: - // - 0 (kafka 0.8.1 and later) - // - 1 (kafka 0.8.2 and later) - // - 2 (kafka 0.9.0 and later) - Version int16 - blocks map[string]map[int32]*offsetCommitRequestBlock -} - -func (r *OffsetCommitRequest) encode(pe packetEncoder) error { - if r.Version < 0 || r.Version > 2 { - return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} - } - - if err := pe.putString(r.ConsumerGroup); err != nil { - return err - } - - if r.Version >= 1 { - pe.putInt32(r.ConsumerGroupGeneration) - if err := pe.putString(r.ConsumerID); err != nil { - return err - } - } else { - if r.ConsumerGroupGeneration != 0 { - Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") - } - if r.ConsumerID != "" { - Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") - } - } - - if r.Version >= 2 { - pe.putInt64(r.RetentionTime) - } else if r.RetentionTime != 0 { - Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") - } - - if err := pe.putArrayLength(len(r.blocks)); err != nil { - return err - } - for topic, partitions := range r.blocks { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err := block.encode(pe, r.Version); err != nil { - return err - } - } - } - return nil -} - -func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - - if r.ConsumerGroup, err = pd.getString(); err != nil { - return err - } - - if r.Version >= 1 { - if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { - return err - } - if r.ConsumerID, err = pd.getString(); err != nil { - return err - } - } - - if r.Version >= 2 { - if r.RetentionTime, err = pd.getInt64(); err != nil { - return err - } - } - - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) - for i := 0; i < topicCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - block := &offsetCommitRequestBlock{} - if err := block.decode(pd, r.Version); err != nil { - return err - } - r.blocks[topic][partition] = block - } - } - return nil -} - -func (r *OffsetCommitRequest) key() int16 { - return 8 -} - -func (r *OffsetCommitRequest) version() int16 { - return r.Version -} - -func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_8_2_0 - case 2: - return V0_9_0_0 - default: - return minVersion - } -} - -func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { - if r.blocks == nil { - r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) - } - - if r.blocks[topic] == nil { - r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) - } - - r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} -} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request_test.go b/vendor/github.com/Shopify/sarama/offset_commit_request_test.go deleted file mode 100644 index afc25b7b3..000000000 --- a/vendor/github.com/Shopify/sarama/offset_commit_request_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package sarama - -import "testing" - -var ( - offsetCommitRequestNoBlocksV0 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x00} - - offsetCommitRequestNoBlocksV1 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x00} - - offsetCommitRequestNoBlocksV2 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, - 0x00, 0x00, 0x00, 0x00} - - offsetCommitRequestOneBlockV0 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x52, 0x21, - 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, - 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} - - offsetCommitRequestOneBlockV1 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x52, 0x21, - 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} - - offsetCommitRequestOneBlockV2 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x52, 0x21, - 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, - 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} -) - -func TestOffsetCommitRequestV0(t *testing.T) { - request := new(OffsetCommitRequest) - request.Version = 0 - request.ConsumerGroup = "foobar" - testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0) - - request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") - testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0) -} - -func TestOffsetCommitRequestV1(t *testing.T) { - request := new(OffsetCommitRequest) - request.ConsumerGroup = "foobar" - request.ConsumerID = "cons" - request.ConsumerGroupGeneration = 0x1122 - request.Version = 1 - testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1) - - request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata") - testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1) -} - -func TestOffsetCommitRequestV2(t *testing.T) { - request := new(OffsetCommitRequest) - request.ConsumerGroup = "foobar" - request.ConsumerID = "cons" - request.ConsumerGroupGeneration = 0x1122 - request.RetentionTime = 0x4433 - request.Version = 2 - testRequest(t, "no blocks v2", request, offsetCommitRequestNoBlocksV2) - - request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") - testRequest(t, "one block v2", request, offsetCommitRequestOneBlockV2) -} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go deleted file mode 100644 index 7f277e775..000000000 --- a/vendor/github.com/Shopify/sarama/offset_commit_response.go +++ /dev/null @@ -1,85 +0,0 @@ -package sarama - -type OffsetCommitResponse struct { - Errors map[string]map[int32]KError -} - -func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { - if r.Errors == nil { - r.Errors = make(map[string]map[int32]KError) - } - partitions := r.Errors[topic] - if partitions == nil { - partitions = make(map[int32]KError) - r.Errors[topic] = partitions - } - partitions[partition] = kerror -} - -func (r *OffsetCommitResponse) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(r.Errors)); err != nil { - return err - } - for topic, partitions := range r.Errors { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, kerror := range partitions { - pe.putInt32(partition) - pe.putInt16(int16(kerror)) - } - } - return nil -} - -func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil || numTopics == 0 { - return err - } - - r.Errors = make(map[string]map[int32]KError, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numErrors, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Errors[name] = make(map[int32]KError, numErrors) - - for j := 0; j < numErrors; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - tmp, err := pd.getInt16() - if err != nil { - return err - } - r.Errors[name][id] = KError(tmp) - } - } - - return nil -} - -func (r *OffsetCommitResponse) key() int16 { - return 8 -} - -func (r *OffsetCommitResponse) version() int16 { - return 0 -} - -func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { - return minVersion -} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response_test.go b/vendor/github.com/Shopify/sarama/offset_commit_response_test.go deleted file mode 100644 index 074ec9232..000000000 --- a/vendor/github.com/Shopify/sarama/offset_commit_response_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - emptyOffsetCommitResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} -) - -func TestEmptyOffsetCommitResponse(t *testing.T) { - response := OffsetCommitResponse{} - testResponse(t, "empty", &response, emptyOffsetCommitResponse) -} - -func TestNormalOffsetCommitResponse(t *testing.T) { - response := OffsetCommitResponse{} - response.AddError("t", 0, ErrNotLeaderForPartition) - response.Errors["m"] = make(map[int32]KError) - // The response encoded form cannot be checked for it varies due to - // unpredictable map traversal order. - testResponse(t, "normal", &response, nil) -} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go deleted file mode 100644 index b19fe79ba..000000000 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go +++ /dev/null @@ -1,81 +0,0 @@ -package sarama - -type OffsetFetchRequest struct { - ConsumerGroup string - Version int16 - partitions map[string][]int32 -} - -func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { - if r.Version < 0 || r.Version > 1 { - return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} - } - - if err = pe.putString(r.ConsumerGroup); err != nil { - return err - } - if err = pe.putArrayLength(len(r.partitions)); err != nil { - return err - } - for topic, partitions := range r.partitions { - if err = pe.putString(topic); err != nil { - return err - } - if err = pe.putInt32Array(partitions); err != nil { - return err - } - } - return nil -} - -func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - if r.ConsumerGroup, err = pd.getString(); err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - if partitionCount == 0 { - return nil - } - r.partitions = make(map[string][]int32) - for i := 0; i < partitionCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitions, err := pd.getInt32Array() - if err != nil { - return err - } - r.partitions[topic] = partitions - } - return nil -} - -func (r *OffsetFetchRequest) key() int16 { - return 9 -} - -func (r *OffsetFetchRequest) version() int16 { - return r.Version -} - -func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_8_2_0 - default: - return minVersion - } -} - -func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { - if r.partitions == nil { - r.partitions = make(map[string][]int32) - } - - r.partitions[topic] = append(r.partitions[topic], partitionID) -} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go b/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go deleted file mode 100644 index 025d725c9..000000000 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package sarama - -import "testing" - -var ( - offsetFetchRequestNoGroupNoPartitions = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - offsetFetchRequestNoPartitions = []byte{ - 0x00, 0x04, 'b', 'l', 'a', 'h', - 0x00, 0x00, 0x00, 0x00} - - offsetFetchRequestOnePartition = []byte{ - 0x00, 0x04, 'b', 'l', 'a', 'h', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't', - 0x00, 0x00, 0x00, 0x01, - 0x4F, 0x4F, 0x4F, 0x4F} -) - -func TestOffsetFetchRequest(t *testing.T) { - request := new(OffsetFetchRequest) - testRequest(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions) - - request.ConsumerGroup = "blah" - testRequest(t, "no partitions", request, offsetFetchRequestNoPartitions) - - request.AddPartition("topicTheFirst", 0x4F4F4F4F) - testRequest(t, "one partition", request, offsetFetchRequestOnePartition) -} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go deleted file mode 100644 index 323220eac..000000000 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go +++ /dev/null @@ -1,143 +0,0 @@ -package sarama - -type OffsetFetchResponseBlock struct { - Offset int64 - Metadata string - Err KError -} - -func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { - b.Offset, err = pd.getInt64() - if err != nil { - return err - } - - b.Metadata, err = pd.getString() - if err != nil { - return err - } - - tmp, err := pd.getInt16() - if err != nil { - return err - } - b.Err = KError(tmp) - - return nil -} - -func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { - pe.putInt64(b.Offset) - - err = pe.putString(b.Metadata) - if err != nil { - return err - } - - pe.putInt16(int16(b.Err)) - - return nil -} - -type OffsetFetchResponse struct { - Blocks map[string]map[int32]*OffsetFetchResponseBlock -} - -func (r *OffsetFetchResponse) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(r.Blocks)); err != nil { - return err - } - for topic, partitions := range r.Blocks { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err := block.encode(pe); err != nil { - return err - } - } - } - return nil -} - -func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil || numTopics == 0 { - return err - } - - r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - if numBlocks == 0 { - r.Blocks[name] = nil - continue - } - r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(OffsetFetchResponseBlock) - err = block.decode(pd) - if err != nil { - return err - } - r.Blocks[name][id] = block - } - } - - return nil -} - -func (r *OffsetFetchResponse) key() int16 { - return 9 -} - -func (r *OffsetFetchResponse) version() int16 { - return 0 -} - -func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { - return minVersion -} - -func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { - if r.Blocks == nil { - return nil - } - - if r.Blocks[topic] == nil { - return nil - } - - return r.Blocks[topic][partition] -} - -func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) - } - partitions := r.Blocks[topic] - if partitions == nil { - partitions = make(map[int32]*OffsetFetchResponseBlock) - r.Blocks[topic] = partitions - } - partitions[partition] = block -} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go b/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go deleted file mode 100644 index 7614ae424..000000000 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyOffsetFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} -) - -func TestEmptyOffsetFetchResponse(t *testing.T) { - response := OffsetFetchResponse{} - testResponse(t, "empty", &response, emptyOffsetFetchResponse) -} - -func TestNormalOffsetFetchResponse(t *testing.T) { - response := OffsetFetchResponse{} - response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, "md", ErrRequestTimedOut}) - response.Blocks["m"] = nil - // The response encoded form cannot be checked for it varies due to - // unpredictable map traversal order. - testResponse(t, "normal", &response, nil) -} diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go deleted file mode 100644 index 5e15cdafe..000000000 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ /dev/null @@ -1,542 +0,0 @@ -package sarama - -import ( - "sync" - "time" -) - -// Offset Manager - -// OffsetManager uses Kafka to store and fetch consumed partition offsets. -type OffsetManager interface { - // ManagePartition creates a PartitionOffsetManager on the given topic/partition. - // It will return an error if this OffsetManager is already managing the given - // topic/partition. - ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) - - // Close stops the OffsetManager from managing offsets. It is required to call - // this function before an OffsetManager object passes out of scope, as it - // will otherwise leak memory. You must call this after all the - // PartitionOffsetManagers are closed. - Close() error -} - -type offsetManager struct { - client Client - conf *Config - group string - - lock sync.Mutex - poms map[string]map[int32]*partitionOffsetManager - boms map[*Broker]*brokerOffsetManager -} - -// NewOffsetManagerFromClient creates a new OffsetManager from the given client. -// It is still necessary to call Close() on the underlying client when finished with the partition manager. -func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { - // Check that we are not dealing with a closed Client before processing any other arguments - if client.Closed() { - return nil, ErrClosedClient - } - - om := &offsetManager{ - client: client, - conf: client.Config(), - group: group, - poms: make(map[string]map[int32]*partitionOffsetManager), - boms: make(map[*Broker]*brokerOffsetManager), - } - - return om, nil -} - -func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) { - pom, err := om.newPartitionOffsetManager(topic, partition) - if err != nil { - return nil, err - } - - om.lock.Lock() - defer om.lock.Unlock() - - topicManagers := om.poms[topic] - if topicManagers == nil { - topicManagers = make(map[int32]*partitionOffsetManager) - om.poms[topic] = topicManagers - } - - if topicManagers[partition] != nil { - return nil, ConfigurationError("That topic/partition is already being managed") - } - - topicManagers[partition] = pom - return pom, nil -} - -func (om *offsetManager) Close() error { - return nil -} - -func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager { - om.lock.Lock() - defer om.lock.Unlock() - - bom := om.boms[broker] - if bom == nil { - bom = om.newBrokerOffsetManager(broker) - om.boms[broker] = bom - } - - bom.refs++ - - return bom -} - -func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() - - bom.refs-- - - if bom.refs == 0 { - close(bom.updateSubscriptions) - if om.boms[bom.broker] == bom { - delete(om.boms, bom.broker) - } - } -} - -func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() - - delete(om.boms, bom.broker) -} - -func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() - - delete(om.poms[pom.topic], pom.partition) - if len(om.poms[pom.topic]) == 0 { - delete(om.poms, pom.topic) - } -} - -// Partition Offset Manager - -// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() -// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes -// out of scope. -type PartitionOffsetManager interface { - // NextOffset returns the next offset that should be consumed for the managed - // partition, accompanied by metadata which can be used to reconstruct the state - // of the partition consumer when it resumes. NextOffset() will return - // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset - // was committed for this partition yet. - NextOffset() (int64, string) - - // MarkOffset marks the provided offset, alongside a metadata string - // that represents the state of the partition consumer at that point in time. The - // metadata string can be used by another consumer to restore that state, so it - // can resume consumption. - // - // To follow upstream conventions, you are expected to mark the offset of the - // next message to read, not the last message read. Thus, when calling `MarkOffset` - // you should typically add one to the offset of the last consumed message. - // - // Note: calling MarkOffset does not necessarily commit the offset to the backend - // store immediately for efficiency reasons, and it may never be committed if - // your application crashes. This means that you may end up processing the same - // message twice, and your processing should ideally be idempotent. - MarkOffset(offset int64, metadata string) - - // Errors returns a read channel of errors that occur during offset management, if - // enabled. By default, errors are logged and not returned over this channel. If - // you want to implement any custom error handling, set your config's - // Consumer.Return.Errors setting to true, and read from this channel. - Errors() <-chan *ConsumerError - - // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will - // return immediately, after which you should wait until the 'errors' channel has - // been drained and closed. It is required to call this function, or Close before - // a consumer object passes out of scope, as it will otherwise leak memory. You - // must call this before calling Close on the underlying client. - AsyncClose() - - // Close stops the PartitionOffsetManager from managing offsets. It is required to - // call this function (or AsyncClose) before a PartitionOffsetManager object - // passes out of scope, as it will otherwise leak memory. You must call this - // before calling Close on the underlying client. - Close() error -} - -type partitionOffsetManager struct { - parent *offsetManager - topic string - partition int32 - - lock sync.Mutex - offset int64 - metadata string - dirty bool - clean sync.Cond - broker *brokerOffsetManager - - errors chan *ConsumerError - rebalance chan none - dying chan none -} - -func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { - pom := &partitionOffsetManager{ - parent: om, - topic: topic, - partition: partition, - errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), - rebalance: make(chan none, 1), - dying: make(chan none), - } - pom.clean.L = &pom.lock - - if err := pom.selectBroker(); err != nil { - return nil, err - } - - if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil { - return nil, err - } - - pom.broker.updateSubscriptions <- pom - - go withRecover(pom.mainLoop) - - return pom, nil -} - -func (pom *partitionOffsetManager) mainLoop() { - for { - select { - case <-pom.rebalance: - if err := pom.selectBroker(); err != nil { - pom.handleError(err) - pom.rebalance <- none{} - } else { - pom.broker.updateSubscriptions <- pom - } - case <-pom.dying: - if pom.broker != nil { - select { - case <-pom.rebalance: - case pom.broker.updateSubscriptions <- pom: - } - pom.parent.unrefBrokerOffsetManager(pom.broker) - } - pom.parent.abandonPartitionOffsetManager(pom) - close(pom.errors) - return - } - } -} - -func (pom *partitionOffsetManager) selectBroker() error { - if pom.broker != nil { - pom.parent.unrefBrokerOffsetManager(pom.broker) - pom.broker = nil - } - - var broker *Broker - var err error - - if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil { - return err - } - - if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil { - return err - } - - pom.broker = pom.parent.refBrokerOffsetManager(broker) - return nil -} - -func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error { - request := new(OffsetFetchRequest) - request.Version = 1 - request.ConsumerGroup = pom.parent.group - request.AddPartition(pom.topic, pom.partition) - - response, err := pom.broker.broker.FetchOffset(request) - if err != nil { - return err - } - - block := response.GetBlock(pom.topic, pom.partition) - if block == nil { - return ErrIncompleteResponse - } - - switch block.Err { - case ErrNoError: - pom.offset = block.Offset - pom.metadata = block.Metadata - return nil - case ErrNotCoordinatorForConsumer: - if retries <= 0 { - return block.Err - } - if err := pom.selectBroker(); err != nil { - return err - } - return pom.fetchInitialOffset(retries - 1) - case ErrOffsetsLoadInProgress: - if retries <= 0 { - return block.Err - } - time.Sleep(pom.parent.conf.Metadata.Retry.Backoff) - return pom.fetchInitialOffset(retries - 1) - default: - return block.Err - } -} - -func (pom *partitionOffsetManager) handleError(err error) { - cErr := &ConsumerError{ - Topic: pom.topic, - Partition: pom.partition, - Err: err, - } - - if pom.parent.conf.Consumer.Return.Errors { - pom.errors <- cErr - } else { - Logger.Println(cErr) - } -} - -func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { - return pom.errors -} - -func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { - pom.lock.Lock() - defer pom.lock.Unlock() - - if offset > pom.offset { - pom.offset = offset - pom.metadata = metadata - pom.dirty = true - } -} - -func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { - pom.lock.Lock() - defer pom.lock.Unlock() - - if pom.offset == offset && pom.metadata == metadata { - pom.dirty = false - pom.clean.Signal() - } -} - -func (pom *partitionOffsetManager) NextOffset() (int64, string) { - pom.lock.Lock() - defer pom.lock.Unlock() - - if pom.offset >= 0 { - return pom.offset, pom.metadata - } - - return pom.parent.conf.Consumer.Offsets.Initial, "" -} - -func (pom *partitionOffsetManager) AsyncClose() { - go func() { - pom.lock.Lock() - defer pom.lock.Unlock() - - for pom.dirty { - pom.clean.Wait() - } - - close(pom.dying) - }() -} - -func (pom *partitionOffsetManager) Close() error { - pom.AsyncClose() - - var errors ConsumerErrors - for err := range pom.errors { - errors = append(errors, err) - } - - if len(errors) > 0 { - return errors - } - return nil -} - -// Broker Offset Manager - -type brokerOffsetManager struct { - parent *offsetManager - broker *Broker - timer *time.Ticker - updateSubscriptions chan *partitionOffsetManager - subscriptions map[*partitionOffsetManager]none - refs int -} - -func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager { - bom := &brokerOffsetManager{ - parent: om, - broker: broker, - timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval), - updateSubscriptions: make(chan *partitionOffsetManager), - subscriptions: make(map[*partitionOffsetManager]none), - } - - go withRecover(bom.mainLoop) - - return bom -} - -func (bom *brokerOffsetManager) mainLoop() { - for { - select { - case <-bom.timer.C: - if len(bom.subscriptions) > 0 { - bom.flushToBroker() - } - case s, ok := <-bom.updateSubscriptions: - if !ok { - bom.timer.Stop() - return - } - if _, ok := bom.subscriptions[s]; ok { - delete(bom.subscriptions, s) - } else { - bom.subscriptions[s] = none{} - } - } - } -} - -func (bom *brokerOffsetManager) flushToBroker() { - request := bom.constructRequest() - if request == nil { - return - } - - response, err := bom.broker.CommitOffset(request) - - if err != nil { - bom.abort(err) - return - } - - for s := range bom.subscriptions { - if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil { - continue - } - - var err KError - var ok bool - - if response.Errors[s.topic] == nil { - s.handleError(ErrIncompleteResponse) - delete(bom.subscriptions, s) - s.rebalance <- none{} - continue - } - if err, ok = response.Errors[s.topic][s.partition]; !ok { - s.handleError(ErrIncompleteResponse) - delete(bom.subscriptions, s) - s.rebalance <- none{} - continue - } - - switch err { - case ErrNoError: - block := request.blocks[s.topic][s.partition] - s.updateCommitted(block.offset, block.metadata) - case ErrNotLeaderForPartition, ErrLeaderNotAvailable, - ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: - // not a critical error, we just need to redispatch - delete(bom.subscriptions, s) - s.rebalance <- none{} - case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: - // nothing we can do about this, just tell the user and carry on - s.handleError(err) - case ErrOffsetsLoadInProgress: - // nothing wrong but we didn't commit, we'll get it next time round - break - case ErrUnknownTopicOrPartition: - // let the user know *and* try redispatching - if topic-auto-create is - // enabled, redispatching should trigger a metadata request and create the - // topic; if not then re-dispatching won't help, but we've let the user - // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) - fallthrough - default: - // dunno, tell the user and try redispatching - s.handleError(err) - delete(bom.subscriptions, s) - s.rebalance <- none{} - } - } -} - -func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest { - var r *OffsetCommitRequest - var perPartitionTimestamp int64 - if bom.parent.conf.Consumer.Offsets.Retention == 0 { - perPartitionTimestamp = ReceiveTime - r = &OffsetCommitRequest{ - Version: 1, - ConsumerGroup: bom.parent.group, - ConsumerGroupGeneration: GroupGenerationUndefined, - } - } else { - r = &OffsetCommitRequest{ - Version: 2, - RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond), - ConsumerGroup: bom.parent.group, - ConsumerGroupGeneration: GroupGenerationUndefined, - } - - } - - for s := range bom.subscriptions { - s.lock.Lock() - if s.dirty { - r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata) - } - s.lock.Unlock() - } - - if len(r.blocks) > 0 { - return r - } - - return nil -} - -func (bom *brokerOffsetManager) abort(err error) { - _ = bom.broker.Close() // we don't care about the error this might return, we already have one - bom.parent.abandonBroker(bom) - - for pom := range bom.subscriptions { - pom.handleError(err) - pom.rebalance <- none{} - } - - for s := range bom.updateSubscriptions { - if _, ok := bom.subscriptions[s]; !ok { - s.handleError(err) - s.rebalance <- none{} - } - } - - bom.subscriptions = make(map[*partitionOffsetManager]none) -} diff --git a/vendor/github.com/Shopify/sarama/offset_manager_test.go b/vendor/github.com/Shopify/sarama/offset_manager_test.go deleted file mode 100644 index c111a5a63..000000000 --- a/vendor/github.com/Shopify/sarama/offset_manager_test.go +++ /dev/null @@ -1,369 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -func initOffsetManager(t *testing.T) (om OffsetManager, - testClient Client, broker, coordinator *MockBroker) { - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Consumer.Offsets.CommitInterval = 1 * time.Millisecond - config.Version = V0_9_0_0 - - broker = NewMockBroker(t, 1) - coordinator = NewMockBroker(t, 2) - - seedMeta := new(MetadataResponse) - seedMeta.AddBroker(coordinator.Addr(), coordinator.BrokerID()) - seedMeta.AddTopicPartition("my_topic", 0, 1, []int32{}, []int32{}, ErrNoError) - seedMeta.AddTopicPartition("my_topic", 1, 1, []int32{}, []int32{}, ErrNoError) - broker.Returns(seedMeta) - - var err error - testClient, err = NewClient([]string{broker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: coordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: coordinator.Port(), - }) - - om, err = NewOffsetManagerFromClient("group", testClient) - if err != nil { - t.Fatal(err) - } - - return om, testClient, broker, coordinator -} - -func initPartitionOffsetManager(t *testing.T, om OffsetManager, - coordinator *MockBroker, initialOffset int64, metadata string) PartitionOffsetManager { - - fetchResponse := new(OffsetFetchResponse) - fetchResponse.AddBlock("my_topic", 0, &OffsetFetchResponseBlock{ - Err: ErrNoError, - Offset: initialOffset, - Metadata: metadata, - }) - coordinator.Returns(fetchResponse) - - pom, err := om.ManagePartition("my_topic", 0) - if err != nil { - t.Fatal(err) - } - - return pom -} - -func TestNewOffsetManager(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - seedBroker.Returns(new(MetadataResponse)) - - testClient, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - _, err = NewOffsetManagerFromClient("group", testClient) - if err != nil { - t.Error(err) - } - - safeClose(t, testClient) - - _, err = NewOffsetManagerFromClient("group", testClient) - if err != ErrClosedClient { - t.Errorf("Error expected for closed client; actual value: %v", err) - } - - seedBroker.Close() -} - -// Test recovery from ErrNotCoordinatorForConsumer -// on first fetchInitialOffset call -func TestOffsetManagerFetchInitialFail(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t) - - // Error on first fetchInitialOffset call - responseBlock := OffsetFetchResponseBlock{ - Err: ErrNotCoordinatorForConsumer, - Offset: 5, - Metadata: "test_meta", - } - - fetchResponse := new(OffsetFetchResponse) - fetchResponse.AddBlock("my_topic", 0, &responseBlock) - coordinator.Returns(fetchResponse) - - // Refresh coordinator - newCoordinator := NewMockBroker(t, 3) - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: newCoordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: newCoordinator.Port(), - }) - - // Second fetchInitialOffset call is fine - fetchResponse2 := new(OffsetFetchResponse) - responseBlock2 := responseBlock - responseBlock2.Err = ErrNoError - fetchResponse2.AddBlock("my_topic", 0, &responseBlock2) - newCoordinator.Returns(fetchResponse2) - - pom, err := om.ManagePartition("my_topic", 0) - if err != nil { - t.Error(err) - } - - broker.Close() - coordinator.Close() - newCoordinator.Close() - safeClose(t, pom) - safeClose(t, om) - safeClose(t, testClient) -} - -// Test fetchInitialOffset retry on ErrOffsetsLoadInProgress -func TestOffsetManagerFetchInitialLoadInProgress(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t) - - // Error on first fetchInitialOffset call - responseBlock := OffsetFetchResponseBlock{ - Err: ErrOffsetsLoadInProgress, - Offset: 5, - Metadata: "test_meta", - } - - fetchResponse := new(OffsetFetchResponse) - fetchResponse.AddBlock("my_topic", 0, &responseBlock) - coordinator.Returns(fetchResponse) - - // Second fetchInitialOffset call is fine - fetchResponse2 := new(OffsetFetchResponse) - responseBlock2 := responseBlock - responseBlock2.Err = ErrNoError - fetchResponse2.AddBlock("my_topic", 0, &responseBlock2) - coordinator.Returns(fetchResponse2) - - pom, err := om.ManagePartition("my_topic", 0) - if err != nil { - t.Error(err) - } - - broker.Close() - coordinator.Close() - safeClose(t, pom) - safeClose(t, om) - safeClose(t, testClient) -} - -func TestPartitionOffsetManagerInitialOffset(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t) - testClient.Config().Consumer.Offsets.Initial = OffsetOldest - - // Kafka returns -1 if no offset has been stored for this partition yet. - pom := initPartitionOffsetManager(t, om, coordinator, -1, "") - - offset, meta := pom.NextOffset() - if offset != OffsetOldest { - t.Errorf("Expected offset 5. Actual: %v", offset) - } - if meta != "" { - t.Errorf("Expected metadata to be empty. Actual: %q", meta) - } - - safeClose(t, pom) - safeClose(t, om) - broker.Close() - coordinator.Close() - safeClose(t, testClient) -} - -func TestPartitionOffsetManagerNextOffset(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "test_meta") - - offset, meta := pom.NextOffset() - if offset != 5 { - t.Errorf("Expected offset 5. Actual: %v", offset) - } - if meta != "test_meta" { - t.Errorf("Expected metadata \"test_meta\". Actual: %q", meta) - } - - safeClose(t, pom) - safeClose(t, om) - broker.Close() - coordinator.Close() - safeClose(t, testClient) -} - -func TestPartitionOffsetManagerMarkOffset(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") - - ocResponse := new(OffsetCommitResponse) - ocResponse.AddError("my_topic", 0, ErrNoError) - coordinator.Returns(ocResponse) - - pom.MarkOffset(100, "modified_meta") - offset, meta := pom.NextOffset() - - if offset != 100 { - t.Errorf("Expected offset 100. Actual: %v", offset) - } - if meta != "modified_meta" { - t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) - } - - safeClose(t, pom) - safeClose(t, om) - safeClose(t, testClient) - broker.Close() - coordinator.Close() -} - -func TestPartitionOffsetManagerMarkOffsetWithRetention(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t) - testClient.Config().Consumer.Offsets.Retention = time.Hour - - pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") - - ocResponse := new(OffsetCommitResponse) - ocResponse.AddError("my_topic", 0, ErrNoError) - handler := func(req *request) (res encoder) { - if req.body.version() != 2 { - t.Errorf("Expected to be using version 2. Actual: %v", req.body.version()) - } - offsetCommitRequest := req.body.(*OffsetCommitRequest) - if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) { - t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime) - } - return ocResponse - } - coordinator.setHandler(handler) - - pom.MarkOffset(100, "modified_meta") - offset, meta := pom.NextOffset() - - if offset != 100 { - t.Errorf("Expected offset 100. Actual: %v", offset) - } - if meta != "modified_meta" { - t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) - } - - safeClose(t, pom) - safeClose(t, om) - safeClose(t, testClient) - broker.Close() - coordinator.Close() -} - -func TestPartitionOffsetManagerCommitErr(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta") - - // Error on one partition - ocResponse := new(OffsetCommitResponse) - ocResponse.AddError("my_topic", 0, ErrOffsetOutOfRange) - ocResponse.AddError("my_topic", 1, ErrNoError) - coordinator.Returns(ocResponse) - - newCoordinator := NewMockBroker(t, 3) - - // For RefreshCoordinator() - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: newCoordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: newCoordinator.Port(), - }) - - // Nothing in response.Errors at all - ocResponse2 := new(OffsetCommitResponse) - newCoordinator.Returns(ocResponse2) - - // For RefreshCoordinator() - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: newCoordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: newCoordinator.Port(), - }) - - // Error on the wrong partition for this pom - ocResponse3 := new(OffsetCommitResponse) - ocResponse3.AddError("my_topic", 1, ErrNoError) - newCoordinator.Returns(ocResponse3) - - // For RefreshCoordinator() - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: newCoordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: newCoordinator.Port(), - }) - - // ErrUnknownTopicOrPartition/ErrNotLeaderForPartition/ErrLeaderNotAvailable block - ocResponse4 := new(OffsetCommitResponse) - ocResponse4.AddError("my_topic", 0, ErrUnknownTopicOrPartition) - newCoordinator.Returns(ocResponse4) - - // For RefreshCoordinator() - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: newCoordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: newCoordinator.Port(), - }) - - // Normal error response - ocResponse5 := new(OffsetCommitResponse) - ocResponse5.AddError("my_topic", 0, ErrNoError) - newCoordinator.Returns(ocResponse5) - - pom.MarkOffset(100, "modified_meta") - - err := pom.Close() - if err != nil { - t.Error(err) - } - - broker.Close() - coordinator.Close() - newCoordinator.Close() - safeClose(t, om) - safeClose(t, testClient) -} - -// Test of recovery from abort -func TestAbortPartitionOffsetManager(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta") - - // this triggers an error in the CommitOffset request, - // which leads to the abort call - coordinator.Close() - - // Response to refresh coordinator request - newCoordinator := NewMockBroker(t, 3) - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: newCoordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: newCoordinator.Port(), - }) - - ocResponse := new(OffsetCommitResponse) - ocResponse.AddError("my_topic", 0, ErrNoError) - newCoordinator.Returns(ocResponse) - - pom.MarkOffset(100, "modified_meta") - - safeClose(t, pom) - safeClose(t, om) - broker.Close() - safeClose(t, testClient) -} diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go deleted file mode 100644 index c66d8f709..000000000 --- a/vendor/github.com/Shopify/sarama/offset_request.go +++ /dev/null @@ -1,117 +0,0 @@ -package sarama - -type offsetRequestBlock struct { - time int64 - maxOffsets int32 -} - -func (b *offsetRequestBlock) encode(pe packetEncoder) error { - pe.putInt64(int64(b.time)) - pe.putInt32(b.maxOffsets) - return nil -} - -func (b *offsetRequestBlock) decode(pd packetDecoder) (err error) { - if b.time, err = pd.getInt64(); err != nil { - return err - } - if b.maxOffsets, err = pd.getInt32(); err != nil { - return err - } - return nil -} - -type OffsetRequest struct { - blocks map[string]map[int32]*offsetRequestBlock -} - -func (r *OffsetRequest) encode(pe packetEncoder) error { - pe.putInt32(-1) // replica ID is always -1 for clients - err := pe.putArrayLength(len(r.blocks)) - if err != nil { - return err - } - for topic, partitions := range r.blocks { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err = block.encode(pe); err != nil { - return err - } - } - } - return nil -} - -func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { - // Ignore replica ID - if _, err := pd.getInt32(); err != nil { - return err - } - blockCount, err := pd.getArrayLength() - if err != nil { - return err - } - if blockCount == 0 { - return nil - } - r.blocks = make(map[string]map[int32]*offsetRequestBlock) - for i := 0; i < blockCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - r.blocks[topic] = make(map[int32]*offsetRequestBlock) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - block := &offsetRequestBlock{} - if err := block.decode(pd); err != nil { - return err - } - r.blocks[topic][partition] = block - } - } - return nil -} - -func (r *OffsetRequest) key() int16 { - return 2 -} - -func (r *OffsetRequest) version() int16 { - return 0 -} - -func (r *OffsetRequest) requiredVersion() KafkaVersion { - return minVersion -} - -func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { - if r.blocks == nil { - r.blocks = make(map[string]map[int32]*offsetRequestBlock) - } - - if r.blocks[topic] == nil { - r.blocks[topic] = make(map[int32]*offsetRequestBlock) - } - - tmp := new(offsetRequestBlock) - tmp.time = time - tmp.maxOffsets = maxOffsets - - r.blocks[topic][partitionID] = tmp -} diff --git a/vendor/github.com/Shopify/sarama/offset_request_test.go b/vendor/github.com/Shopify/sarama/offset_request_test.go deleted file mode 100644 index f3b3046bb..000000000 --- a/vendor/github.com/Shopify/sarama/offset_request_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package sarama - -import "testing" - -var ( - offsetRequestNoBlocks = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x00} - - offsetRequestOneBlock = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x04, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x02} -) - -func TestOffsetRequest(t *testing.T) { - request := new(OffsetRequest) - testRequest(t, "no blocks", request, offsetRequestNoBlocks) - - request.AddBlock("foo", 4, 1, 2) - testRequest(t, "one block", request, offsetRequestOneBlock) -} diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go deleted file mode 100644 index ad1a66974..000000000 --- a/vendor/github.com/Shopify/sarama/offset_response.go +++ /dev/null @@ -1,142 +0,0 @@ -package sarama - -type OffsetResponseBlock struct { - Err KError - Offsets []int64 -} - -func (b *OffsetResponseBlock) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - b.Err = KError(tmp) - - b.Offsets, err = pd.getInt64Array() - - return err -} - -func (b *OffsetResponseBlock) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(b.Err)) - - return pe.putInt64Array(b.Offsets) -} - -type OffsetResponse struct { - Blocks map[string]map[int32]*OffsetResponseBlock -} - -func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(OffsetResponseBlock) - err = block.decode(pd) - if err != nil { - return err - } - r.Blocks[name][id] = block - } - } - - return nil -} - -func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { - if r.Blocks == nil { - return nil - } - - if r.Blocks[topic] == nil { - return nil - } - - return r.Blocks[topic][partition] -} - -/* -// [0 0 0 1 ntopics -0 8 109 121 95 116 111 112 105 99 topic -0 0 0 1 npartitions -0 0 0 0 id -0 0 - -0 0 0 1 0 0 0 0 -0 1 1 1 0 0 0 1 -0 8 109 121 95 116 111 112 -105 99 0 0 0 1 0 0 -0 0 0 0 0 0 0 1 -0 0 0 0 0 1 1 1] - -*/ -func (r *OffsetResponse) encode(pe packetEncoder) (err error) { - if err = pe.putArrayLength(len(r.Blocks)); err != nil { - return err - } - - for topic, partitions := range r.Blocks { - if err = pe.putString(topic); err != nil { - return err - } - if err = pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err = block.encode(pe); err != nil { - return err - } - } - } - - return nil -} - -func (r *OffsetResponse) key() int16 { - return 2 -} - -func (r *OffsetResponse) version() int16 { - return 0 -} - -func (r *OffsetResponse) requiredVersion() KafkaVersion { - return minVersion -} - -// testing API - -func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) - } - byTopic, ok := r.Blocks[topic] - if !ok { - byTopic = make(map[int32]*OffsetResponseBlock) - r.Blocks[topic] = byTopic - } - byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}} -} diff --git a/vendor/github.com/Shopify/sarama/offset_response_test.go b/vendor/github.com/Shopify/sarama/offset_response_test.go deleted file mode 100644 index fc00f4b60..000000000 --- a/vendor/github.com/Shopify/sarama/offset_response_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyOffsetResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} - - normalOffsetResponse = []byte{ - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x01, 'a', - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x01, 'z', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06} -) - -func TestEmptyOffsetResponse(t *testing.T) { - response := OffsetResponse{} - - testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 0) - if len(response.Blocks) != 0 { - t.Error("Decoding produced", len(response.Blocks), "topics where there were none.") - } -} - -func TestNormalOffsetResponse(t *testing.T) { - response := OffsetResponse{} - - testVersionDecodable(t, "normal", &response, normalOffsetResponse, 0) - - if len(response.Blocks) != 2 { - t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.") - } - - if len(response.Blocks["a"]) != 0 { - t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.") - } - - if len(response.Blocks["z"]) != 1 { - t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.") - } - - if response.Blocks["z"][2].Err != ErrNoError { - t.Fatal("Decoding produced invalid error for topic z partition 2.") - } - - if len(response.Blocks["z"][2].Offsets) != 2 { - t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.") - } - - if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 { - t.Fatal("Decoding produced invalid offsets for topic z partition 2.") - } - -} diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go deleted file mode 100644 index 28670c0e6..000000000 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ /dev/null @@ -1,45 +0,0 @@ -package sarama - -// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. -// Types implementing Decoder only need to worry about calling methods like GetString, -// not about how a string is represented in Kafka. -type packetDecoder interface { - // Primitives - getInt8() (int8, error) - getInt16() (int16, error) - getInt32() (int32, error) - getInt64() (int64, error) - getArrayLength() (int, error) - - // Collections - getBytes() ([]byte, error) - getString() (string, error) - getInt32Array() ([]int32, error) - getInt64Array() ([]int64, error) - getStringArray() ([]string, error) - - // Subsets - remaining() int - getSubset(length int) (packetDecoder, error) - - // Stacks, see PushDecoder - push(in pushDecoder) error - pop() error -} - -// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity -// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where -// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they -// depend upon have been decoded. -type pushDecoder interface { - // Saves the offset into the input buffer as the location to actually read the calculated value when able. - saveOffset(in int) - - // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). - reserveLength() int - - // Indicates that all required data is now available to calculate and check the field. - // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes - // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. - check(curOffset int, buf []byte) error -} diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go deleted file mode 100644 index 0df6e24aa..000000000 --- a/vendor/github.com/Shopify/sarama/packet_encoder.go +++ /dev/null @@ -1,42 +0,0 @@ -package sarama - -// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. -// Types implementing Encoder only need to worry about calling methods like PutString, -// not about how a string is represented in Kafka. -type packetEncoder interface { - // Primitives - putInt8(in int8) - putInt16(in int16) - putInt32(in int32) - putInt64(in int64) - putArrayLength(in int) error - - // Collections - putBytes(in []byte) error - putRawBytes(in []byte) error - putString(in string) error - putStringArray(in []string) error - putInt32Array(in []int32) error - putInt64Array(in []int64) error - - // Stacks, see PushEncoder - push(in pushEncoder) - pop() error -} - -// PushEncoder is the interface for encoding fields like CRCs and lengths where the value -// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where -// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they -// depend upon have been written. -type pushEncoder interface { - // Saves the offset into the input buffer as the location to actually write the calculated value when able. - saveOffset(in int) - - // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). - reserveLength() int - - // Indicates that all required data is now available to calculate and write the field. - // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes - // of data to the saved offset, based on the data between the saved offset and curOffset. - run(curOffset int, buf []byte) error -} diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go deleted file mode 100644 index d24199da9..000000000 --- a/vendor/github.com/Shopify/sarama/partitioner.go +++ /dev/null @@ -1,123 +0,0 @@ -package sarama - -import ( - "hash" - "hash/fnv" - "math/rand" - "time" -) - -// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], -// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided -// as simple default implementations. -type Partitioner interface { - // Partition takes a message and partition count and chooses a partition - Partition(message *ProducerMessage, numPartitions int32) (int32, error) - - // RequiresConsistency indicates to the user of the partitioner whether the - // mapping of key->partition is consistent or not. Specifically, if a - // partitioner requires consistency then it must be allowed to choose from all - // partitions (even ones known to be unavailable), and its choice must be - // respected by the caller. The obvious example is the HashPartitioner. - RequiresConsistency() bool -} - -// PartitionerConstructor is the type for a function capable of constructing new Partitioners. -type PartitionerConstructor func(topic string) Partitioner - -type manualPartitioner struct{} - -// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided -// ProducerMessage's Partition field as the partition to produce to. -func NewManualPartitioner(topic string) Partitioner { - return new(manualPartitioner) -} - -func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - return message.Partition, nil -} - -func (p *manualPartitioner) RequiresConsistency() bool { - return true -} - -type randomPartitioner struct { - generator *rand.Rand -} - -// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. -func NewRandomPartitioner(topic string) Partitioner { - p := new(randomPartitioner) - p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) - return p -} - -func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - return int32(p.generator.Intn(int(numPartitions))), nil -} - -func (p *randomPartitioner) RequiresConsistency() bool { - return false -} - -type roundRobinPartitioner struct { - partition int32 -} - -// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. -func NewRoundRobinPartitioner(topic string) Partitioner { - return &roundRobinPartitioner{} -} - -func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - if p.partition >= numPartitions { - p.partition = 0 - } - ret := p.partition - p.partition++ - return ret, nil -} - -func (p *roundRobinPartitioner) RequiresConsistency() bool { - return false -} - -type hashPartitioner struct { - random Partitioner - hasher hash.Hash32 -} - -// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a -// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, -// modulus the number of partitions. This ensures that messages with the same key always end up on the -// same partition. -func NewHashPartitioner(topic string) Partitioner { - p := new(hashPartitioner) - p.random = NewRandomPartitioner(topic) - p.hasher = fnv.New32a() - return p -} - -func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - if message.Key == nil { - return p.random.Partition(message, numPartitions) - } - bytes, err := message.Key.Encode() - if err != nil { - return -1, err - } - p.hasher.Reset() - _, err = p.hasher.Write(bytes) - if err != nil { - return -1, err - } - partition := int32(p.hasher.Sum32()) % numPartitions - if partition < 0 { - partition = -partition - } - return partition, nil -} - -func (p *hashPartitioner) RequiresConsistency() bool { - return true -} diff --git a/vendor/github.com/Shopify/sarama/partitioner_test.go b/vendor/github.com/Shopify/sarama/partitioner_test.go deleted file mode 100644 index 3d391c59c..000000000 --- a/vendor/github.com/Shopify/sarama/partitioner_test.go +++ /dev/null @@ -1,215 +0,0 @@ -package sarama - -import ( - "crypto/rand" - "log" - "testing" -) - -func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, message *ProducerMessage, numPartitions int32) { - choice, err := partitioner.Partition(message, numPartitions) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= numPartitions { - t.Error(partitioner, "returned partition", choice, "outside of range for", message) - } - for i := 1; i < 50; i++ { - newChoice, err := partitioner.Partition(message, numPartitions) - if err != nil { - t.Error(partitioner, err) - } - if newChoice != choice { - t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".") - } - } -} - -func TestRandomPartitioner(t *testing.T) { - partitioner := NewRandomPartitioner("mytopic") - - choice, err := partitioner.Partition(nil, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - for i := 1; i < 50; i++ { - choice, err := partitioner.Partition(nil, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= 50 { - t.Error("Returned partition", choice, "outside of range.") - } - } -} - -func TestRoundRobinPartitioner(t *testing.T) { - partitioner := NewRoundRobinPartitioner("mytopic") - - choice, err := partitioner.Partition(nil, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - var i int32 - for i = 1; i < 50; i++ { - choice, err := partitioner.Partition(nil, 7) - if err != nil { - t.Error(partitioner, err) - } - if choice != i%7 { - t.Error("Returned partition", choice, "expecting", i%7) - } - } -} - -func TestHashPartitioner(t *testing.T) { - partitioner := NewHashPartitioner("mytopic") - - choice, err := partitioner.Partition(&ProducerMessage{}, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - for i := 1; i < 50; i++ { - choice, err := partitioner.Partition(&ProducerMessage{}, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= 50 { - t.Error("Returned partition", choice, "outside of range for nil key.") - } - } - - buf := make([]byte, 256) - for i := 1; i < 50; i++ { - if _, err := rand.Read(buf); err != nil { - t.Error(err) - } - assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50) - } -} - -func TestHashPartitionerMinInt32(t *testing.T) { - partitioner := NewHashPartitioner("mytopic") - - msg := ProducerMessage{} - // "1468509572224" generates 2147483648 (uint32) result from Sum32 function - // which is -2147483648 or int32's min value - msg.Key = StringEncoder("1468509572224") - - choice, err := partitioner.Partition(&msg, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= 50 { - t.Error("Returned partition", choice, "outside of range for nil key.") - } -} - -func TestManualPartitioner(t *testing.T) { - partitioner := NewManualPartitioner("mytopic") - - choice, err := partitioner.Partition(&ProducerMessage{}, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - for i := int32(1); i < 50; i++ { - choice, err := partitioner.Partition(&ProducerMessage{Partition: i}, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice != i { - t.Error("Returned partition not the same as the input partition") - } - } -} - -// By default, Sarama uses the message's key to consistently assign a partition to -// a message using hashing. If no key is set, a random partition will be chosen. -// This example shows how you can partition messages randomly, even when a key is set, -// by overriding Config.Producer.Partitioner. -func ExamplePartitioner_random() { - config := NewConfig() - config.Producer.Partitioner = NewRandomPartitioner - - producer, err := NewSyncProducer([]string{"localhost:9092"}, config) - if err != nil { - log.Fatal(err) - } - defer func() { - if err := producer.Close(); err != nil { - log.Println("Failed to close producer:", err) - } - }() - - msg := &ProducerMessage{Topic: "test", Key: StringEncoder("key is set"), Value: StringEncoder("test")} - partition, offset, err := producer.SendMessage(msg) - if err != nil { - log.Fatalln("Failed to produce message to kafka cluster.") - } - - log.Printf("Produced message to partition %d with offset %d", partition, offset) -} - -// This example shows how to assign partitions to your messages manually. -func ExamplePartitioner_manual() { - config := NewConfig() - - // First, we tell the producer that we are going to partition ourselves. - config.Producer.Partitioner = NewManualPartitioner - - producer, err := NewSyncProducer([]string{"localhost:9092"}, config) - if err != nil { - log.Fatal(err) - } - defer func() { - if err := producer.Close(); err != nil { - log.Println("Failed to close producer:", err) - } - }() - - // Now, we set the Partition field of the ProducerMessage struct. - msg := &ProducerMessage{Topic: "test", Partition: 6, Value: StringEncoder("test")} - - partition, offset, err := producer.SendMessage(msg) - if err != nil { - log.Fatalln("Failed to produce message to kafka cluster.") - } - - if partition != 6 { - log.Fatal("Message should have been produced to partition 6!") - } - - log.Printf("Produced message to partition %d with offset %d", partition, offset) -} - -// This example shows how to set a different partitioner depending on the topic. -func ExamplePartitioner_per_topic() { - config := NewConfig() - config.Producer.Partitioner = func(topic string) Partitioner { - switch topic { - case "access_log", "error_log": - return NewRandomPartitioner(topic) - - default: - return NewHashPartitioner(topic) - } - } - - // ... -} diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go deleted file mode 100644 index 8c6ba8502..000000000 --- a/vendor/github.com/Shopify/sarama/prep_encoder.go +++ /dev/null @@ -1,110 +0,0 @@ -package sarama - -import ( - "fmt" - "math" -) - -type prepEncoder struct { - length int -} - -// primitives - -func (pe *prepEncoder) putInt8(in int8) { - pe.length++ -} - -func (pe *prepEncoder) putInt16(in int16) { - pe.length += 2 -} - -func (pe *prepEncoder) putInt32(in int32) { - pe.length += 4 -} - -func (pe *prepEncoder) putInt64(in int64) { - pe.length += 8 -} - -func (pe *prepEncoder) putArrayLength(in int) error { - if in > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} - } - pe.length += 4 - return nil -} - -// arrays - -func (pe *prepEncoder) putBytes(in []byte) error { - pe.length += 4 - if in == nil { - return nil - } - if len(in) > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} - } - pe.length += len(in) - return nil -} - -func (pe *prepEncoder) putRawBytes(in []byte) error { - if len(in) > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} - } - pe.length += len(in) - return nil -} - -func (pe *prepEncoder) putString(in string) error { - pe.length += 2 - if len(in) > math.MaxInt16 { - return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} - } - pe.length += len(in) - return nil -} - -func (pe *prepEncoder) putStringArray(in []string) error { - err := pe.putArrayLength(len(in)) - if err != nil { - return err - } - - for _, str := range in { - if err := pe.putString(str); err != nil { - return err - } - } - - return nil -} - -func (pe *prepEncoder) putInt32Array(in []int32) error { - err := pe.putArrayLength(len(in)) - if err != nil { - return err - } - pe.length += 4 * len(in) - return nil -} - -func (pe *prepEncoder) putInt64Array(in []int64) error { - err := pe.putArrayLength(len(in)) - if err != nil { - return err - } - pe.length += 8 * len(in) - return nil -} - -// stackable - -func (pe *prepEncoder) push(in pushEncoder) { - pe.length += in.reserveLength() -} - -func (pe *prepEncoder) pop() error { - return nil -} diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go deleted file mode 100644 index f8a250946..000000000 --- a/vendor/github.com/Shopify/sarama/produce_request.go +++ /dev/null @@ -1,157 +0,0 @@ -package sarama - -// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements -// it must see before responding. Any of the constants defined here are valid. On broker versions -// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many -// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced -// by setting the `min.isr` value in the brokers configuration). -type RequiredAcks int16 - -const ( - // NoResponse doesn't send any response, the TCP ACK is all you get. - NoResponse RequiredAcks = 0 - // WaitForLocal waits for only the local commit to succeed before responding. - WaitForLocal RequiredAcks = 1 - // WaitForAll waits for all replicas to commit before responding. - WaitForAll RequiredAcks = -1 -) - -type ProduceRequest struct { - RequiredAcks RequiredAcks - Timeout int32 - Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10 - msgSets map[string]map[int32]*MessageSet -} - -func (r *ProduceRequest) encode(pe packetEncoder) error { - pe.putInt16(int16(r.RequiredAcks)) - pe.putInt32(r.Timeout) - err := pe.putArrayLength(len(r.msgSets)) - if err != nil { - return err - } - for topic, partitions := range r.msgSets { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - for id, msgSet := range partitions { - pe.putInt32(id) - pe.push(&lengthField{}) - err = msgSet.encode(pe) - if err != nil { - return err - } - err = pe.pop() - if err != nil { - return err - } - } - } - return nil -} - -func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { - requiredAcks, err := pd.getInt16() - if err != nil { - return err - } - r.RequiredAcks = RequiredAcks(requiredAcks) - if r.Timeout, err = pd.getInt32(); err != nil { - return err - } - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - r.msgSets = make(map[string]map[int32]*MessageSet) - for i := 0; i < topicCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - r.msgSets[topic] = make(map[int32]*MessageSet) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - messageSetSize, err := pd.getInt32() - if err != nil { - return err - } - msgSetDecoder, err := pd.getSubset(int(messageSetSize)) - if err != nil { - return err - } - msgSet := &MessageSet{} - err = msgSet.decode(msgSetDecoder) - if err != nil { - return err - } - r.msgSets[topic][partition] = msgSet - } - } - return nil -} - -func (r *ProduceRequest) key() int16 { - return 0 -} - -func (r *ProduceRequest) version() int16 { - return r.Version -} - -func (r *ProduceRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - default: - return minVersion - } -} - -func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { - if r.msgSets == nil { - r.msgSets = make(map[string]map[int32]*MessageSet) - } - - if r.msgSets[topic] == nil { - r.msgSets[topic] = make(map[int32]*MessageSet) - } - - set := r.msgSets[topic][partition] - - if set == nil { - set = new(MessageSet) - r.msgSets[topic][partition] = set - } - - set.addMessage(msg) -} - -func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { - if r.msgSets == nil { - r.msgSets = make(map[string]map[int32]*MessageSet) - } - - if r.msgSets[topic] == nil { - r.msgSets[topic] = make(map[int32]*MessageSet) - } - - r.msgSets[topic][partition] = set -} diff --git a/vendor/github.com/Shopify/sarama/produce_request_test.go b/vendor/github.com/Shopify/sarama/produce_request_test.go deleted file mode 100644 index 21f4ba5b1..000000000 --- a/vendor/github.com/Shopify/sarama/produce_request_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - produceRequestEmpty = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - produceRequestHeader = []byte{ - 0x01, 0x23, - 0x00, 0x00, 0x04, 0x44, - 0x00, 0x00, 0x00, 0x00} - - produceRequestOneMessage = []byte{ - 0x01, 0x23, - 0x00, 0x00, 0x04, 0x44, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0xAD, - 0x00, 0x00, 0x00, 0x1C, - // messageSet - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x10, - // message - 0x23, 0x96, 0x4a, 0xf7, // CRC - 0x00, - 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} -) - -func TestProduceRequest(t *testing.T) { - request := new(ProduceRequest) - testRequest(t, "empty", request, produceRequestEmpty) - - request.RequiredAcks = 0x123 - request.Timeout = 0x444 - testRequest(t, "header", request, produceRequestHeader) - - request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}}) - testRequest(t, "one message", request, produceRequestOneMessage) -} diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go deleted file mode 100644 index 195abcb81..000000000 --- a/vendor/github.com/Shopify/sarama/produce_response.go +++ /dev/null @@ -1,158 +0,0 @@ -package sarama - -import "time" - -type ProduceResponseBlock struct { - Err KError - Offset int64 - // only provided if Version >= 2 and the broker is configured with `LogAppendTime` - Timestamp time.Time -} - -func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - b.Err = KError(tmp) - - b.Offset, err = pd.getInt64() - if err != nil { - return err - } - - if version >= 2 { - if millis, err := pd.getInt64(); err != nil { - return err - } else if millis != -1 { - b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) - } - } - - return nil -} - -type ProduceResponse struct { - Blocks map[string]map[int32]*ProduceResponseBlock - Version int16 - ThrottleTime time.Duration // only provided if Version >= 1 -} - -func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - - numTopics, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(ProduceResponseBlock) - err = block.decode(pd, version) - if err != nil { - return err - } - r.Blocks[name][id] = block - } - } - - if r.Version >= 1 { - if millis, err := pd.getInt32(); err != nil { - return err - } else { - r.ThrottleTime = time.Duration(millis) * time.Millisecond - } - } - - return nil -} - -func (r *ProduceResponse) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(r.Blocks)) - if err != nil { - return err - } - for topic, partitions := range r.Blocks { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - for id, prb := range partitions { - pe.putInt32(id) - pe.putInt16(int16(prb.Err)) - pe.putInt64(prb.Offset) - } - } - if r.Version >= 1 { - pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) - } - return nil -} - -func (r *ProduceResponse) key() int16 { - return 0 -} - -func (r *ProduceResponse) version() int16 { - return r.Version -} - -func (r *ProduceResponse) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - default: - return minVersion - } -} - -func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { - if r.Blocks == nil { - return nil - } - - if r.Blocks[topic] == nil { - return nil - } - - return r.Blocks[topic][partition] -} - -// Testing API - -func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) - } - byTopic, ok := r.Blocks[topic] - if !ok { - byTopic = make(map[int32]*ProduceResponseBlock) - r.Blocks[topic] = byTopic - } - byTopic[partition] = &ProduceResponseBlock{Err: err} -} diff --git a/vendor/github.com/Shopify/sarama/produce_response_test.go b/vendor/github.com/Shopify/sarama/produce_response_test.go deleted file mode 100644 index f71709fe8..000000000 --- a/vendor/github.com/Shopify/sarama/produce_response_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package sarama - -import "testing" - -var ( - produceResponseNoBlocks = []byte{ - 0x00, 0x00, 0x00, 0x00} - - produceResponseManyBlocks = []byte{ - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x03, 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, - - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x02, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} -) - -func TestProduceResponse(t *testing.T) { - response := ProduceResponse{} - - testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocks, 0) - if len(response.Blocks) != 0 { - t.Error("Decoding produced", len(response.Blocks), "topics where there were none") - } - - testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, 0) - if len(response.Blocks) != 2 { - t.Error("Decoding produced", len(response.Blocks), "topics where there were 2") - } - if len(response.Blocks["foo"]) != 0 { - t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none") - } - if len(response.Blocks["bar"]) != 2 { - t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two") - } - block := response.GetBlock("bar", 1) - if block == nil { - t.Error("Decoding did not produce a block for bar/1") - } else { - if block.Err != ErrNoError { - t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err)) - } - if block.Offset != 0xFF { - t.Error("Decoding failed for bar/1/Offset, got:", block.Offset) - } - } - block = response.GetBlock("bar", 2) - if block == nil { - t.Error("Decoding did not produce a block for bar/2") - } else { - if block.Err != ErrInvalidMessage { - t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err)) - } - if block.Offset != 0 { - t.Error("Decoding failed for bar/2/Offset, got:", block.Offset) - } - } -} diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go deleted file mode 100644 index 992f1f141..000000000 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ /dev/null @@ -1,166 +0,0 @@ -package sarama - -import "time" - -type partitionSet struct { - msgs []*ProducerMessage - setToSend *MessageSet - bufferBytes int -} - -type produceSet struct { - parent *asyncProducer - msgs map[string]map[int32]*partitionSet - - bufferBytes int - bufferCount int -} - -func newProduceSet(parent *asyncProducer) *produceSet { - return &produceSet{ - msgs: make(map[string]map[int32]*partitionSet), - parent: parent, - } -} - -func (ps *produceSet) add(msg *ProducerMessage) error { - var err error - var key, val []byte - - if msg.Key != nil { - if key, err = msg.Key.Encode(); err != nil { - return err - } - } - - if msg.Value != nil { - if val, err = msg.Value.Encode(); err != nil { - return err - } - } - - partitions := ps.msgs[msg.Topic] - if partitions == nil { - partitions = make(map[int32]*partitionSet) - ps.msgs[msg.Topic] = partitions - } - - set := partitions[msg.Partition] - if set == nil { - set = &partitionSet{setToSend: new(MessageSet)} - partitions[msg.Partition] = set - } - - set.msgs = append(set.msgs, msg) - msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} - if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) && !msg.Timestamp.IsZero() { - msgToSend.Timestamp = msg.Timestamp - msgToSend.Version = 1 - } - set.setToSend.addMessage(msgToSend) - - size := producerMessageOverhead + len(key) + len(val) - set.bufferBytes += size - ps.bufferBytes += size - ps.bufferCount++ - - return nil -} - -func (ps *produceSet) buildRequest() *ProduceRequest { - req := &ProduceRequest{ - RequiredAcks: ps.parent.conf.Producer.RequiredAcks, - Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), - } - if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { - req.Version = 2 - } - - for topic, partitionSet := range ps.msgs { - for partition, set := range partitionSet { - if ps.parent.conf.Producer.Compression == CompressionNone { - req.AddSet(topic, partition, set.setToSend) - } else { - // When compression is enabled, the entire set for each partition is compressed - // and sent as the payload of a single fake "message" with the appropriate codec - // set and no key. When the server sees a message with a compression codec, it - // decompresses the payload and treats the result as its message set. - payload, err := encode(set.setToSend) - if err != nil { - Logger.Println(err) // if this happens, it's basically our fault. - panic(err) - } - req.AddMessage(topic, partition, &Message{ - Codec: ps.parent.conf.Producer.Compression, - Key: nil, - Value: payload, - }) - } - } - } - - return req -} - -func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) { - for topic, partitionSet := range ps.msgs { - for partition, set := range partitionSet { - cb(topic, partition, set.msgs) - } - } -} - -func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage { - if ps.msgs[topic] == nil { - return nil - } - set := ps.msgs[topic][partition] - if set == nil { - return nil - } - ps.bufferBytes -= set.bufferBytes - ps.bufferCount -= len(set.msgs) - delete(ps.msgs[topic], partition) - return set.msgs -} - -func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { - switch { - // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. - case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)): - return true - // Would we overflow the size-limit of a compressed message-batch for this partition? - case ps.parent.conf.Producer.Compression != CompressionNone && - ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && - ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes: - return true - // Would we overflow simply in number of messages? - case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: - return true - default: - return false - } -} - -func (ps *produceSet) readyToFlush() bool { - switch { - // If we don't have any messages, nothing else matters - case ps.empty(): - return false - // If all three config values are 0, we always flush as-fast-as-possible - case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0: - return true - // If we've passed the message trigger-point - case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages: - return true - // If we've passed the byte trigger-point - case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes: - return true - default: - return false - } -} - -func (ps *produceSet) empty() bool { - return ps.bufferCount == 0 -} diff --git a/vendor/github.com/Shopify/sarama/produce_set_test.go b/vendor/github.com/Shopify/sarama/produce_set_test.go deleted file mode 100644 index da62da914..000000000 --- a/vendor/github.com/Shopify/sarama/produce_set_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -func makeProduceSet() (*asyncProducer, *produceSet) { - parent := &asyncProducer{ - conf: NewConfig(), - } - return parent, newProduceSet(parent) -} - -func safeAddMessage(t *testing.T, ps *produceSet, msg *ProducerMessage) { - if err := ps.add(msg); err != nil { - t.Error(err) - } -} - -func TestProduceSetInitial(t *testing.T) { - _, ps := makeProduceSet() - - if !ps.empty() { - t.Error("New produceSet should be empty") - } - - if ps.readyToFlush() { - t.Error("Empty produceSet must never be ready to flush") - } -} - -func TestProduceSetAddingMessages(t *testing.T) { - parent, ps := makeProduceSet() - parent.conf.Producer.Flush.MaxMessages = 1000 - - msg := &ProducerMessage{Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage)} - safeAddMessage(t, ps, msg) - - if ps.empty() { - t.Error("set shouldn't be empty when a message is added") - } - - if !ps.readyToFlush() { - t.Error("by default set should be ready to flush when any message is in place") - } - - for i := 0; i < 999; i++ { - if ps.wouldOverflow(msg) { - t.Error("set shouldn't fill up after only", i+1, "messages") - } - safeAddMessage(t, ps, msg) - } - - if !ps.wouldOverflow(msg) { - t.Error("set should be full after 1000 messages") - } -} - -func TestProduceSetPartitionTracking(t *testing.T) { - _, ps := makeProduceSet() - - m1 := &ProducerMessage{Topic: "t1", Partition: 0} - m2 := &ProducerMessage{Topic: "t1", Partition: 1} - m3 := &ProducerMessage{Topic: "t2", Partition: 0} - safeAddMessage(t, ps, m1) - safeAddMessage(t, ps, m2) - safeAddMessage(t, ps, m3) - - seenT1P0 := false - seenT1P1 := false - seenT2P0 := false - - ps.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - if len(msgs) != 1 { - t.Error("Wrong message count") - } - - if topic == "t1" && partition == 0 { - seenT1P0 = true - } else if topic == "t1" && partition == 1 { - seenT1P1 = true - } else if topic == "t2" && partition == 0 { - seenT2P0 = true - } - }) - - if !seenT1P0 { - t.Error("Didn't see t1p0") - } - if !seenT1P1 { - t.Error("Didn't see t1p1") - } - if !seenT2P0 { - t.Error("Didn't see t2p0") - } - - if len(ps.dropPartition("t1", 1)) != 1 { - t.Error("Got wrong messages back from dropping partition") - } - - if ps.bufferCount != 2 { - t.Error("Incorrect buffer count after dropping partition") - } -} - -func TestProduceSetRequestBuilding(t *testing.T) { - parent, ps := makeProduceSet() - parent.conf.Producer.RequiredAcks = WaitForAll - parent.conf.Producer.Timeout = 10 * time.Second - - msg := &ProducerMessage{ - Topic: "t1", - Partition: 0, - Key: StringEncoder(TestMessage), - Value: StringEncoder(TestMessage), - } - for i := 0; i < 10; i++ { - safeAddMessage(t, ps, msg) - } - msg.Partition = 1 - for i := 0; i < 10; i++ { - safeAddMessage(t, ps, msg) - } - msg.Topic = "t2" - for i := 0; i < 10; i++ { - safeAddMessage(t, ps, msg) - } - - req := ps.buildRequest() - - if req.RequiredAcks != WaitForAll { - t.Error("RequiredAcks not set properly") - } - - if req.Timeout != 10000 { - t.Error("Timeout not set properly") - } - - if len(req.msgSets) != 2 { - t.Error("Wrong number of topics in request") - } -} diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go deleted file mode 100644 index a0141af07..000000000 --- a/vendor/github.com/Shopify/sarama/real_decoder.go +++ /dev/null @@ -1,259 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "math" -) - -var errInvalidArrayLength = PacketDecodingError{"invalid array length"} -var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} -var errInvalidStringLength = PacketDecodingError{"invalid string length"} -var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} - -type realDecoder struct { - raw []byte - off int - stack []pushDecoder -} - -// primitives - -func (rd *realDecoder) getInt8() (int8, error) { - if rd.remaining() < 1 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int8(rd.raw[rd.off]) - rd.off++ - return tmp, nil -} - -func (rd *realDecoder) getInt16() (int16, error) { - if rd.remaining() < 2 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) - rd.off += 2 - return tmp, nil -} - -func (rd *realDecoder) getInt32() (int32, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - return tmp, nil -} - -func (rd *realDecoder) getInt64() (int64, error) { - if rd.remaining() < 8 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) - rd.off += 8 - return tmp, nil -} - -func (rd *realDecoder) getArrayLength() (int, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - if tmp > rd.remaining() { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } else if tmp > 2*math.MaxUint16 { - return -1, errInvalidArrayLength - } - return tmp, nil -} - -// collections - -func (rd *realDecoder) getBytes() ([]byte, error) { - tmp, err := rd.getInt32() - - if err != nil { - return nil, err - } - - n := int(tmp) - - switch { - case n < -1: - return nil, errInvalidByteSliceLength - case n == -1: - return nil, nil - case n == 0: - return make([]byte, 0), nil - case n > rd.remaining(): - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - tmpStr := rd.raw[rd.off : rd.off+n] - rd.off += n - return tmpStr, nil -} - -func (rd *realDecoder) getString() (string, error) { - tmp, err := rd.getInt16() - - if err != nil { - return "", err - } - - n := int(tmp) - - switch { - case n < -1: - return "", errInvalidStringLength - case n == -1: - return "", nil - case n == 0: - return "", nil - case n > rd.remaining(): - rd.off = len(rd.raw) - return "", ErrInsufficientData - } - - tmpStr := string(rd.raw[rd.off : rd.off+n]) - rd.off += n - return tmpStr, nil -} - -func (rd *realDecoder) getInt32Array() ([]int32, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if rd.remaining() < 4*n { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, errInvalidArrayLength - } - - ret := make([]int32, n) - for i := range ret { - ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - } - return ret, nil -} - -func (rd *realDecoder) getInt64Array() ([]int64, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if rd.remaining() < 8*n { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, errInvalidArrayLength - } - - ret := make([]int64, n) - for i := range ret { - ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) - rd.off += 8 - } - return ret, nil -} - -func (rd *realDecoder) getStringArray() ([]string, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, errInvalidArrayLength - } - - ret := make([]string, n) - for i := range ret { - if str, err := rd.getString(); err != nil { - return nil, err - } else { - ret[i] = str - } - } - return ret, nil -} - -// subsets - -func (rd *realDecoder) remaining() int { - return len(rd.raw) - rd.off -} - -func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { - if length < 0 { - return nil, errInvalidSubsetSize - } else if length > rd.remaining() { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - start := rd.off - rd.off += length - return &realDecoder{raw: rd.raw[start:rd.off]}, nil -} - -// stacks - -func (rd *realDecoder) push(in pushDecoder) error { - in.saveOffset(rd.off) - - reserve := in.reserveLength() - if rd.remaining() < reserve { - rd.off = len(rd.raw) - return ErrInsufficientData - } - - rd.stack = append(rd.stack, in) - - rd.off += reserve - - return nil -} - -func (rd *realDecoder) pop() error { - // this is go's ugly pop pattern (the inverse of append) - in := rd.stack[len(rd.stack)-1] - rd.stack = rd.stack[:len(rd.stack)-1] - - return in.check(rd.off, rd.raw) -} diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go deleted file mode 100644 index 076fdd0ca..000000000 --- a/vendor/github.com/Shopify/sarama/real_encoder.go +++ /dev/null @@ -1,115 +0,0 @@ -package sarama - -import "encoding/binary" - -type realEncoder struct { - raw []byte - off int - stack []pushEncoder -} - -// primitives - -func (re *realEncoder) putInt8(in int8) { - re.raw[re.off] = byte(in) - re.off++ -} - -func (re *realEncoder) putInt16(in int16) { - binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) - re.off += 2 -} - -func (re *realEncoder) putInt32(in int32) { - binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) - re.off += 4 -} - -func (re *realEncoder) putInt64(in int64) { - binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) - re.off += 8 -} - -func (re *realEncoder) putArrayLength(in int) error { - re.putInt32(int32(in)) - return nil -} - -// collection - -func (re *realEncoder) putRawBytes(in []byte) error { - copy(re.raw[re.off:], in) - re.off += len(in) - return nil -} - -func (re *realEncoder) putBytes(in []byte) error { - if in == nil { - re.putInt32(-1) - return nil - } - re.putInt32(int32(len(in))) - copy(re.raw[re.off:], in) - re.off += len(in) - return nil -} - -func (re *realEncoder) putString(in string) error { - re.putInt16(int16(len(in))) - copy(re.raw[re.off:], in) - re.off += len(in) - return nil -} - -func (re *realEncoder) putStringArray(in []string) error { - err := re.putArrayLength(len(in)) - if err != nil { - return err - } - - for _, val := range in { - if err := re.putString(val); err != nil { - return err - } - } - - return nil -} - -func (re *realEncoder) putInt32Array(in []int32) error { - err := re.putArrayLength(len(in)) - if err != nil { - return err - } - for _, val := range in { - re.putInt32(val) - } - return nil -} - -func (re *realEncoder) putInt64Array(in []int64) error { - err := re.putArrayLength(len(in)) - if err != nil { - return err - } - for _, val := range in { - re.putInt64(val) - } - return nil -} - -// stacks - -func (re *realEncoder) push(in pushEncoder) { - in.saveOffset(re.off) - re.off += in.reserveLength() - re.stack = append(re.stack, in) -} - -func (re *realEncoder) pop() error { - // this is go's ugly pop pattern (the inverse of append) - in := re.stack[len(re.stack)-1] - re.stack = re.stack[:len(re.stack)-1] - - return in.run(re.off, re.raw) -} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go deleted file mode 100644 index 5dd337b0d..000000000 --- a/vendor/github.com/Shopify/sarama/request.go +++ /dev/null @@ -1,117 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "fmt" - "io" -) - -type protocolBody interface { - encoder - versionedDecoder - key() int16 - version() int16 - requiredVersion() KafkaVersion -} - -type request struct { - correlationID int32 - clientID string - body protocolBody -} - -func (r *request) encode(pe packetEncoder) (err error) { - pe.push(&lengthField{}) - pe.putInt16(r.body.key()) - pe.putInt16(r.body.version()) - pe.putInt32(r.correlationID) - err = pe.putString(r.clientID) - if err != nil { - return err - } - err = r.body.encode(pe) - if err != nil { - return err - } - return pe.pop() -} - -func (r *request) decode(pd packetDecoder) (err error) { - var key int16 - if key, err = pd.getInt16(); err != nil { - return err - } - var version int16 - if version, err = pd.getInt16(); err != nil { - return err - } - if r.correlationID, err = pd.getInt32(); err != nil { - return err - } - r.clientID, err = pd.getString() - - r.body = allocateBody(key, version) - if r.body == nil { - return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} - } - return r.body.decode(pd, version) -} - -func decodeRequest(r io.Reader) (req *request, err error) { - lengthBytes := make([]byte, 4) - if _, err := io.ReadFull(r, lengthBytes); err != nil { - return nil, err - } - - length := int32(binary.BigEndian.Uint32(lengthBytes)) - if length <= 4 || length > MaxRequestSize { - return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} - } - - encodedReq := make([]byte, length) - if _, err := io.ReadFull(r, encodedReq); err != nil { - return nil, err - } - - req = &request{} - if err := decode(encodedReq, req); err != nil { - return nil, err - } - return req, nil -} - -func allocateBody(key, version int16) protocolBody { - switch key { - case 0: - return &ProduceRequest{} - case 1: - return &FetchRequest{} - case 2: - return &OffsetRequest{} - case 3: - return &MetadataRequest{} - case 8: - return &OffsetCommitRequest{Version: version} - case 9: - return &OffsetFetchRequest{} - case 10: - return &ConsumerMetadataRequest{} - case 11: - return &JoinGroupRequest{} - case 12: - return &HeartbeatRequest{} - case 13: - return &LeaveGroupRequest{} - case 14: - return &SyncGroupRequest{} - case 15: - return &DescribeGroupsRequest{} - case 16: - return &ListGroupsRequest{} - case 17: - return &SaslHandshakeRequest{} - case 18: - return &ApiVersionsRequest{} - } - return nil -} diff --git a/vendor/github.com/Shopify/sarama/request_test.go b/vendor/github.com/Shopify/sarama/request_test.go deleted file mode 100644 index e431e23d1..000000000 --- a/vendor/github.com/Shopify/sarama/request_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package sarama - -import ( - "bytes" - "reflect" - "testing" -) - -type testRequestBody struct { -} - -func (s *testRequestBody) key() int16 { - return 0x666 -} - -func (s *testRequestBody) version() int16 { - return 0xD2 -} - -func (s *testRequestBody) encode(pe packetEncoder) error { - return pe.putString("abc") -} - -// not specific to request tests, just helper functions for testing structures that -// implement the encoder or decoder interfaces that needed somewhere to live - -func testEncodable(t *testing.T, name string, in encoder, expect []byte) { - packet, err := encode(in) - if err != nil { - t.Error(err) - } else if !bytes.Equal(packet, expect) { - t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect) - } -} - -func testDecodable(t *testing.T, name string, out decoder, in []byte) { - err := decode(in, out) - if err != nil { - t.Error("Decoding", name, "failed:", err) - } -} - -func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []byte, version int16) { - err := versionedDecode(in, out, version) - if err != nil { - t.Error("Decoding", name, "version", version, "failed:", err) - } -} - -func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) { - // Encoder request - req := &request{correlationID: 123, clientID: "foo", body: rb} - packet, err := encode(req) - headerSize := 14 + len("foo") - if err != nil { - t.Error(err) - } else if !bytes.Equal(packet[headerSize:], expected) { - t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected) - } - // Decoder request - decoded, err := decodeRequest(bytes.NewReader(packet)) - if err != nil { - t.Error("Failed to decode request", err) - } else if decoded.correlationID != 123 || decoded.clientID != "foo" { - t.Errorf("Decoded header is not valid: %v", decoded) - } else if !reflect.DeepEqual(rb, decoded.body) { - t.Errorf("Decoded request does not match the encoded one\nencoded: %v\ndecoded: %v", rb, decoded.body) - } -} - -func testResponse(t *testing.T, name string, res protocolBody, expected []byte) { - encoded, err := encode(res) - if err != nil { - t.Error(err) - } else if expected != nil && !bytes.Equal(encoded, expected) { - t.Error("Encoding", name, "failed\ngot ", encoded, "\nwant", expected) - } - - decoded := reflect.New(reflect.TypeOf(res).Elem()).Interface().(versionedDecoder) - if err := versionedDecode(encoded, decoded, res.version()); err != nil { - t.Error("Decoding", name, "failed:", err) - } - - if !reflect.DeepEqual(decoded, res) { - t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded) - } -} diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go deleted file mode 100644 index f3f4d27d6..000000000 --- a/vendor/github.com/Shopify/sarama/response_header.go +++ /dev/null @@ -1,21 +0,0 @@ -package sarama - -import "fmt" - -type responseHeader struct { - length int32 - correlationID int32 -} - -func (r *responseHeader) decode(pd packetDecoder) (err error) { - r.length, err = pd.getInt32() - if err != nil { - return err - } - if r.length <= 4 || r.length > MaxResponseSize { - return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} - } - - r.correlationID, err = pd.getInt32() - return err -} diff --git a/vendor/github.com/Shopify/sarama/response_header_test.go b/vendor/github.com/Shopify/sarama/response_header_test.go deleted file mode 100644 index 8f9fdb80c..000000000 --- a/vendor/github.com/Shopify/sarama/response_header_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package sarama - -import "testing" - -var ( - responseHeaderBytes = []byte{ - 0x00, 0x00, 0x0f, 0x00, - 0x0a, 0xbb, 0xcc, 0xff} -) - -func TestResponseHeader(t *testing.T) { - header := responseHeader{} - - testDecodable(t, "response header", &header, responseHeaderBytes) - if header.length != 0xf00 { - t.Error("Decoding header length failed, got", header.length) - } - if header.correlationID != 0x0abbccff { - t.Error("Decoding header correlation id failed, got", header.correlationID) - } -} diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go deleted file mode 100644 index 8faa74a91..000000000 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level -API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level -API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. - -To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel -and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. -The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be -useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees -depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the -SyncProducer can still sometimes be lost. - -To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic -consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the -https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 -and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. - -For lower-level needs, the Broker and Request/Response objects permit precise control over each connection -and message sent on the wire; the Client provides higher-level metadata management that is shared between -the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up -exactly with the protocol fields documented by Kafka at -https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol -*/ -package sarama - -import ( - "io/ioutil" - "log" -) - -// Logger is the instance of a StdLogger interface that Sarama writes connection -// management events to. By default it is set to discard all log messages via ioutil.Discard, -// but you can set it to redirect wherever you want. -var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) - -// StdLogger is used to log error messages. -type StdLogger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) - Println(v ...interface{}) -} - -// PanicHandler is called for recovering from panics spawned internally to the library (and thus -// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. -var PanicHandler func(interface{}) - -// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying -// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned -// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt -// to process. -var MaxRequestSize int32 = 100 * 1024 * 1024 - -// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If -// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to -// protect the client from running out of memory. Please note that brokers do not have any natural limit on -// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers -// (see https://issues.apache.org/jira/browse/KAFKA-2063). -var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go deleted file mode 100644 index fbbc8947b..000000000 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go +++ /dev/null @@ -1,33 +0,0 @@ -package sarama - -type SaslHandshakeRequest struct { - Mechanism string -} - -func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.Mechanism); err != nil { - return err - } - - return nil -} - -func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { - if r.Mechanism, err = pd.getString(); err != nil { - return err - } - - return nil -} - -func (r *SaslHandshakeRequest) key() int16 { - return 17 -} - -func (r *SaslHandshakeRequest) version() int16 { - return 0 -} - -func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { - return V0_10_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go deleted file mode 100644 index 806e628fd..000000000 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package sarama - -import "testing" - -var ( - baseSaslRequest = []byte{ - 0, 3, 'f', 'o', 'o', // Mechanism - } -) - -func TestSaslHandshakeRequest(t *testing.T) { - var request *SaslHandshakeRequest - - request = new(SaslHandshakeRequest) - request.Mechanism = "foo" - testRequest(t, "basic", request, baseSaslRequest) -} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go deleted file mode 100644 index 8379bbb26..000000000 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go +++ /dev/null @@ -1,38 +0,0 @@ -package sarama - -type SaslHandshakeResponse struct { - Err KError - EnabledMechanisms []string -} - -func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - return pe.putStringArray(r.EnabledMechanisms) -} - -func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { - if kerr, err := pd.getInt16(); err != nil { - return err - } else { - r.Err = KError(kerr) - } - - var err error - if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { - return err - } - - return nil -} - -func (r *SaslHandshakeResponse) key() int16 { - return 17 -} - -func (r *SaslHandshakeResponse) version() int16 { - return 0 -} - -func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { - return V0_10_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go deleted file mode 100644 index 1fd4c79e0..000000000 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -import "testing" - -var ( - saslHandshakeResponse = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x03, 'f', 'o', 'o', - } -) - -func TestSaslHandshakeResponse(t *testing.T) { - var response *SaslHandshakeResponse - - response = new(SaslHandshakeResponse) - testVersionDecodable(t, "no error", response, saslHandshakeResponse, 0) - if response.Err != ErrNoError { - t.Error("Decoding error failed: no error expected but found", response.Err) - } - if response.EnabledMechanisms[0] != "foo" { - t.Error("Decoding error failed: expected 'foo' but found", response.EnabledMechanisms) - } -} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go deleted file mode 100644 index 7fbe47b20..000000000 --- a/vendor/github.com/Shopify/sarama/sync_group_request.go +++ /dev/null @@ -1,100 +0,0 @@ -package sarama - -type SyncGroupRequest struct { - GroupId string - GenerationId int32 - MemberId string - GroupAssignments map[string][]byte -} - -func (r *SyncGroupRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.GroupId); err != nil { - return err - } - - pe.putInt32(r.GenerationId) - - if err := pe.putString(r.MemberId); err != nil { - return err - } - - if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { - return err - } - for memberId, memberAssignment := range r.GroupAssignments { - if err := pe.putString(memberId); err != nil { - return err - } - if err := pe.putBytes(memberAssignment); err != nil { - return err - } - } - - return nil -} - -func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { - if r.GroupId, err = pd.getString(); err != nil { - return - } - if r.GenerationId, err = pd.getInt32(); err != nil { - return - } - if r.MemberId, err = pd.getString(); err != nil { - return - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - r.GroupAssignments = make(map[string][]byte) - for i := 0; i < n; i++ { - memberId, err := pd.getString() - if err != nil { - return err - } - memberAssignment, err := pd.getBytes() - if err != nil { - return err - } - - r.GroupAssignments[memberId] = memberAssignment - } - - return nil -} - -func (r *SyncGroupRequest) key() int16 { - return 14 -} - -func (r *SyncGroupRequest) version() int16 { - return 0 -} - -func (r *SyncGroupRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} - -func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { - if r.GroupAssignments == nil { - r.GroupAssignments = make(map[string][]byte) - } - - r.GroupAssignments[memberId] = memberAssignment -} - -func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { - bin, err := encode(memberAssignment) - if err != nil { - return err - } - - r.AddGroupAssignment(memberId, bin) - return nil -} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request_test.go b/vendor/github.com/Shopify/sarama/sync_group_request_test.go deleted file mode 100644 index 3f537ef9f..000000000 --- a/vendor/github.com/Shopify/sarama/sync_group_request_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package sarama - -import "testing" - -var ( - emptySyncGroupRequest = []byte{ - 0, 3, 'f', 'o', 'o', // Group ID - 0x00, 0x01, 0x02, 0x03, // Generation ID - 0, 3, 'b', 'a', 'z', // Member ID - 0, 0, 0, 0, // no assignments - } - - populatedSyncGroupRequest = []byte{ - 0, 3, 'f', 'o', 'o', // Group ID - 0x00, 0x01, 0x02, 0x03, // Generation ID - 0, 3, 'b', 'a', 'z', // Member ID - 0, 0, 0, 1, // one assignment - 0, 3, 'b', 'a', 'z', // Member ID - 0, 0, 0, 3, 'f', 'o', 'o', // Member assignment - } -) - -func TestSyncGroupRequest(t *testing.T) { - var request *SyncGroupRequest - - request = new(SyncGroupRequest) - request.GroupId = "foo" - request.GenerationId = 66051 - request.MemberId = "baz" - testRequest(t, "empty", request, emptySyncGroupRequest) - - request = new(SyncGroupRequest) - request.GroupId = "foo" - request.GenerationId = 66051 - request.MemberId = "baz" - request.AddGroupAssignment("baz", []byte("foo")) - testRequest(t, "populated", request, populatedSyncGroupRequest) -} diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go deleted file mode 100644 index 12aef6730..000000000 --- a/vendor/github.com/Shopify/sarama/sync_group_response.go +++ /dev/null @@ -1,40 +0,0 @@ -package sarama - -type SyncGroupResponse struct { - Err KError - MemberAssignment []byte -} - -func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { - assignment := new(ConsumerGroupMemberAssignment) - err := decode(r.MemberAssignment, assignment) - return assignment, err -} - -func (r *SyncGroupResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - return pe.putBytes(r.MemberAssignment) -} - -func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { - if kerr, err := pd.getInt16(); err != nil { - return err - } else { - r.Err = KError(kerr) - } - - r.MemberAssignment, err = pd.getBytes() - return -} - -func (r *SyncGroupResponse) key() int16 { - return 14 -} - -func (r *SyncGroupResponse) version() int16 { - return 0 -} - -func (r *SyncGroupResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/sync_group_response_test.go b/vendor/github.com/Shopify/sarama/sync_group_response_test.go deleted file mode 100644 index 6fb708858..000000000 --- a/vendor/github.com/Shopify/sarama/sync_group_response_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package sarama - -import ( - "reflect" - "testing" -) - -var ( - syncGroupResponseNoError = []byte{ - 0x00, 0x00, // No error - 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member assignment data - } - - syncGroupResponseWithError = []byte{ - 0, 27, // ErrRebalanceInProgress - 0, 0, 0, 0, // No member assignment data - } -) - -func TestSyncGroupResponse(t *testing.T) { - var response *SyncGroupResponse - - response = new(SyncGroupResponse) - testVersionDecodable(t, "no error", response, syncGroupResponseNoError, 0) - if response.Err != ErrNoError { - t.Error("Decoding Err failed: no error expected but found", response.Err) - } - if !reflect.DeepEqual(response.MemberAssignment, []byte{0x01, 0x02, 0x03}) { - t.Error("Decoding MemberAssignment failed, found:", response.MemberAssignment) - } - - response = new(SyncGroupResponse) - testVersionDecodable(t, "no error", response, syncGroupResponseWithError, 0) - if response.Err != ErrRebalanceInProgress { - t.Error("Decoding Err failed: ErrRebalanceInProgress expected but found", response.Err) - } - if !reflect.DeepEqual(response.MemberAssignment, []byte{}) { - t.Error("Decoding MemberAssignment failed, found:", response.MemberAssignment) - } -} diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go deleted file mode 100644 index b181527f0..000000000 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ /dev/null @@ -1,140 +0,0 @@ -package sarama - -import "sync" - -// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct -// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer -// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. -// -// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual -// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. -// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. -type SyncProducer interface { - - // SendMessage produces a given message, and returns only when it either has - // succeeded or failed to produce. It will return the partition and the offset - // of the produced message, or an error if the message failed to produce. - SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) - - // SendMessages produces a given set of messages, and returns only when all - // messages in the set have either succeeded or failed. Note that messages - // can succeed and fail individually; if some succeed and some fail, - // SendMessages will return an error. - SendMessages(msgs []*ProducerMessage) error - - // Close shuts down the producer and flushes any messages it may have buffered. - // You must call this function before a producer object passes out of scope, as - // it may otherwise leak memory. You must call this before calling Close on the - // underlying client. - Close() error -} - -type syncProducer struct { - producer *asyncProducer - wg sync.WaitGroup -} - -// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. -func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { - p, err := NewAsyncProducer(addrs, config) - if err != nil { - return nil, err - } - return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil -} - -// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still -// necessary to call Close() on the underlying client when shutting down this producer. -func NewSyncProducerFromClient(client Client) (SyncProducer, error) { - p, err := NewAsyncProducerFromClient(client) - if err != nil { - return nil, err - } - return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil -} - -func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { - p.conf.Producer.Return.Successes = true - p.conf.Producer.Return.Errors = true - sp := &syncProducer{producer: p} - - sp.wg.Add(2) - go withRecover(sp.handleSuccesses) - go withRecover(sp.handleErrors) - - return sp -} - -func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { - oldMetadata := msg.Metadata - defer func() { - msg.Metadata = oldMetadata - }() - - expectation := make(chan *ProducerError, 1) - msg.Metadata = expectation - sp.producer.Input() <- msg - - if err := <-expectation; err != nil { - return -1, -1, err.Err - } - - return msg.Partition, msg.Offset, nil -} - -func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { - savedMetadata := make([]interface{}, len(msgs)) - for i := range msgs { - savedMetadata[i] = msgs[i].Metadata - } - defer func() { - for i := range msgs { - msgs[i].Metadata = savedMetadata[i] - } - }() - - expectations := make(chan chan *ProducerError, len(msgs)) - go func() { - for _, msg := range msgs { - expectation := make(chan *ProducerError, 1) - msg.Metadata = expectation - sp.producer.Input() <- msg - expectations <- expectation - } - close(expectations) - }() - - var errors ProducerErrors - for expectation := range expectations { - if err := <-expectation; err != nil { - errors = append(errors, err) - } - } - - if len(errors) > 0 { - return errors - } - return nil -} - -func (sp *syncProducer) handleSuccesses() { - defer sp.wg.Done() - for msg := range sp.producer.Successes() { - expectation := msg.Metadata.(chan *ProducerError) - expectation <- nil - } -} - -func (sp *syncProducer) handleErrors() { - defer sp.wg.Done() - for err := range sp.producer.Errors() { - expectation := err.Msg.Metadata.(chan *ProducerError) - expectation <- err - } -} - -func (sp *syncProducer) Close() error { - sp.producer.AsyncClose() - sp.wg.Wait() - return nil -} diff --git a/vendor/github.com/Shopify/sarama/sync_producer_test.go b/vendor/github.com/Shopify/sarama/sync_producer_test.go deleted file mode 100644 index 12ed20e1f..000000000 --- a/vendor/github.com/Shopify/sarama/sync_producer_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package sarama - -import ( - "log" - "sync" - "testing" -) - -func TestSyncProducer(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - for i := 0; i < 10; i++ { - leader.Returns(prodSuccess) - } - - producer, err := NewSyncProducer([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - msg := &ProducerMessage{ - Topic: "my_topic", - Value: StringEncoder(TestMessage), - Metadata: "test", - } - - partition, offset, err := producer.SendMessage(msg) - - if partition != 0 || msg.Partition != partition { - t.Error("Unexpected partition") - } - if offset != 0 || msg.Offset != offset { - t.Error("Unexpected offset") - } - if str, ok := msg.Metadata.(string); !ok || str != "test" { - t.Error("Unexpected metadata") - } - if err != nil { - t.Error(err) - } - } - - safeClose(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestSyncProducerBatch(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 3 - producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - err = producer.SendMessages([]*ProducerMessage{ - &ProducerMessage{ - Topic: "my_topic", - Value: StringEncoder(TestMessage), - Metadata: "test", - }, - &ProducerMessage{ - Topic: "my_topic", - Value: StringEncoder(TestMessage), - Metadata: "test", - }, - &ProducerMessage{ - Topic: "my_topic", - Value: StringEncoder(TestMessage), - Metadata: "test", - }, - }) - - if err != nil { - t.Error(err) - } - - safeClose(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestConcurrentSyncProducer(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 100 - producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - wg := sync.WaitGroup{} - - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)} - partition, _, err := producer.SendMessage(msg) - if partition != 0 { - t.Error("Unexpected partition") - } - if err != nil { - t.Error(err) - } - wg.Done() - }() - } - wg.Wait() - - safeClose(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestSyncProducerToNonExistingTopic(t *testing.T) { - broker := NewMockBroker(t, 1) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, ErrNoError) - broker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - config.Producer.Retry.Max = 0 - - producer, err := NewSyncProducer([]string{broker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - metadataResponse = new(MetadataResponse) - metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) - broker.Returns(metadataResponse) - - _, _, err = producer.SendMessage(&ProducerMessage{Topic: "unknown"}) - if err != ErrUnknownTopicOrPartition { - t.Error("Uxpected ErrUnknownTopicOrPartition, found:", err) - } - - safeClose(t, producer) - broker.Close() -} - -// This example shows the basic usage pattern of the SyncProducer. -func ExampleSyncProducer() { - producer, err := NewSyncProducer([]string{"localhost:9092"}, nil) - if err != nil { - log.Fatalln(err) - } - defer func() { - if err := producer.Close(); err != nil { - log.Fatalln(err) - } - }() - - msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} - partition, offset, err := producer.SendMessage(msg) - if err != nil { - log.Printf("FAILED to send message: %s\n", err) - } else { - log.Printf("> message sent to partition %d at offset %d\n", partition, offset) - } -} diff --git a/vendor/github.com/Shopify/sarama/tools/README.md b/vendor/github.com/Shopify/sarama/tools/README.md deleted file mode 100644 index 3464c4ad8..000000000 --- a/vendor/github.com/Shopify/sarama/tools/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Sarama tools - -This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation. -Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function. - -- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer. -- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster. -- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster. - -To install all tools, run `go get github.com/Shopify/sarama/tools/...` diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore deleted file mode 100644 index 67da9dfa9..000000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kafka-console-consumer -kafka-console-consumer.test diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md deleted file mode 100644 index 4e77f0b70..000000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# kafka-console-consumer - -A simple command line tool to consume partitions of a topic and print the -messages on the standard output. - -### Installation - - go get github.com/Shopify/sarama/tools/kafka-console-consumer - -### Usage - - # Minimum invocation - kafka-console-consumer -topic=test -brokers=kafka1:9092 - - # It will pick up a KAFKA_PEERS environment variable - export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 - kafka-console-consumer -topic=test - - # You can specify the offset you want to start at. It can be either - # `oldest`, `newest`. The default is `newest`. - kafka-console-consumer -topic=test -offset=oldest - kafka-console-consumer -topic=test -offset=newest - - # You can specify the partition(s) you want to consume as a comma-separated - # list. The default is `all`. - kafka-console-consumer -topic=test -partitions=1,2,3 - - # Display all command line options - kafka-console-consumer -help diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go deleted file mode 100644 index 0f1eb89a9..000000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go +++ /dev/null @@ -1,145 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "os/signal" - "strconv" - "strings" - "sync" - - "github.com/Shopify/sarama" -) - -var ( - brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") - topic = flag.String("topic", "", "REQUIRED: the topic to consume") - partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers") - offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`") - verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") - bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.") - - logger = log.New(os.Stderr, "", log.LstdFlags) -) - -func main() { - flag.Parse() - - if *brokerList == "" { - printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") - } - - if *topic == "" { - printUsageErrorAndExit("-topic is required") - } - - if *verbose { - sarama.Logger = logger - } - - var initialOffset int64 - switch *offset { - case "oldest": - initialOffset = sarama.OffsetOldest - case "newest": - initialOffset = sarama.OffsetNewest - default: - printUsageErrorAndExit("-offset should be `oldest` or `newest`") - } - - c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) - if err != nil { - printErrorAndExit(69, "Failed to start consumer: %s", err) - } - - partitionList, err := getPartitions(c) - if err != nil { - printErrorAndExit(69, "Failed to get the list of partitions: %s", err) - } - - var ( - messages = make(chan *sarama.ConsumerMessage, *bufferSize) - closing = make(chan struct{}) - wg sync.WaitGroup - ) - - go func() { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Kill, os.Interrupt) - <-signals - logger.Println("Initiating shutdown of consumer...") - close(closing) - }() - - for _, partition := range partitionList { - pc, err := c.ConsumePartition(*topic, partition, initialOffset) - if err != nil { - printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err) - } - - go func(pc sarama.PartitionConsumer) { - <-closing - pc.AsyncClose() - }(pc) - - wg.Add(1) - go func(pc sarama.PartitionConsumer) { - defer wg.Done() - for message := range pc.Messages() { - messages <- message - } - }(pc) - } - - go func() { - for msg := range messages { - fmt.Printf("Partition:\t%d\n", msg.Partition) - fmt.Printf("Offset:\t%d\n", msg.Offset) - fmt.Printf("Key:\t%s\n", string(msg.Key)) - fmt.Printf("Value:\t%s\n", string(msg.Value)) - fmt.Println() - } - }() - - wg.Wait() - logger.Println("Done consuming topic", *topic) - close(messages) - - if err := c.Close(); err != nil { - logger.Println("Failed to close consumer: ", err) - } -} - -func getPartitions(c sarama.Consumer) ([]int32, error) { - if *partitions == "all" { - return c.Partitions(*topic) - } - - tmp := strings.Split(*partitions, ",") - var pList []int32 - for i := range tmp { - val, err := strconv.ParseInt(tmp[i], 10, 32) - if err != nil { - return nil, err - } - pList = append(pList, int32(val)) - } - - return pList, nil -} - -func printErrorAndExit(code int, format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - os.Exit(code) -} - -func printUsageErrorAndExit(format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - fmt.Fprintln(os.Stderr, "Available command line options:") - flag.PrintDefaults() - os.Exit(64) -} diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore deleted file mode 100644 index 5837fe8ca..000000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kafka-console-partitionconsumer -kafka-console-partitionconsumer.test diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md deleted file mode 100644 index 646dd5f5c..000000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# kafka-console-partitionconsumer - -NOTE: this tool is deprecated in favour of the more general and more powerful -`kafka-console-consumer`. - -A simple command line tool to consume a partition of a topic and print the messages -on the standard output. - -### Installation - - go get github.com/Shopify/sarama/tools/kafka-console-partitionconsumer - -### Usage - - # Minimum invocation - kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092 - - # It will pick up a KAFKA_PEERS environment variable - export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 - kafka-console-partitionconsumer -topic=test -partition=4 - - # You can specify the offset you want to start at. It can be either - # `oldest`, `newest`, or a specific offset number - kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest - kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337 - - # Display all command line options - kafka-console-partitionconsumer -help diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go deleted file mode 100644 index d5e4464de..000000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go +++ /dev/null @@ -1,102 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "os/signal" - "strconv" - "strings" - - "github.com/Shopify/sarama" -) - -var ( - brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") - topic = flag.String("topic", "", "REQUIRED: the topic to consume") - partition = flag.Int("partition", -1, "REQUIRED: the partition to consume") - offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset") - verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") - - logger = log.New(os.Stderr, "", log.LstdFlags) -) - -func main() { - flag.Parse() - - if *brokerList == "" { - printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") - } - - if *topic == "" { - printUsageErrorAndExit("-topic is required") - } - - if *partition == -1 { - printUsageErrorAndExit("-partition is required") - } - - if *verbose { - sarama.Logger = logger - } - - var ( - initialOffset int64 - offsetError error - ) - switch *offset { - case "oldest": - initialOffset = sarama.OffsetOldest - case "newest": - initialOffset = sarama.OffsetNewest - default: - initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64) - } - - if offsetError != nil { - printUsageErrorAndExit("Invalid initial offset: %s", *offset) - } - - c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) - if err != nil { - printErrorAndExit(69, "Failed to start consumer: %s", err) - } - - pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset) - if err != nil { - printErrorAndExit(69, "Failed to start partition consumer: %s", err) - } - - go func() { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Kill, os.Interrupt) - <-signals - pc.AsyncClose() - }() - - for msg := range pc.Messages() { - fmt.Printf("Offset:\t%d\n", msg.Offset) - fmt.Printf("Key:\t%s\n", string(msg.Key)) - fmt.Printf("Value:\t%s\n", string(msg.Value)) - fmt.Println() - } - - if err := c.Close(); err != nil { - logger.Println("Failed to close consumer: ", err) - } -} - -func printErrorAndExit(code int, format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - os.Exit(code) -} - -func printUsageErrorAndExit(format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - fmt.Fprintln(os.Stderr, "Available command line options:") - flag.PrintDefaults() - os.Exit(64) -} diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore deleted file mode 100644 index 2b9e563a1..000000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kafka-console-producer -kafka-console-producer.test diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md deleted file mode 100644 index 6b3a65f21..000000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# kafka-console-producer - -A simple command line tool to produce a single message to Kafka. - -### Installation - - go get github.com/Shopify/sarama/tools/kafka-console-producer - - -### Usage - - # Minimum invocation - kafka-console-producer -topic=test -value=value -brokers=kafka1:9092 - - # It will pick up a KAFKA_PEERS environment variable - export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 - kafka-console-producer -topic=test -value=value - - # It will read the value from stdin by using pipes - echo "hello world" | kafka-console-producer -topic=test - - # Specify a key: - echo "hello world" | kafka-console-producer -topic=test -key=key - - # Partitioning: by default, kafka-console-producer will partition as follows: - # - manual partitioning if a -partition is provided - # - hash partitioning by key if a -key is provided - # - random partioning otherwise. - # - # You can override this using the -partitioner argument: - echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random - - # Display all command line options - kafka-console-producer -help diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go deleted file mode 100644 index 6a1765d7c..000000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go +++ /dev/null @@ -1,118 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "strings" - - "github.com/Shopify/sarama" -) - -var ( - brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable") - topic = flag.String("topic", "", "REQUIRED: the topic to produce to") - key = flag.String("key", "", "The key of the message to produce. Can be empty.") - value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.") - partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`") - partition = flag.Int("partition", -1, "The partition to produce to.") - verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr") - silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout") - - logger = log.New(os.Stderr, "", log.LstdFlags) -) - -func main() { - flag.Parse() - - if *brokerList == "" { - printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable") - } - - if *topic == "" { - printUsageErrorAndExit("no -topic specified") - } - - if *verbose { - sarama.Logger = logger - } - - config := sarama.NewConfig() - config.Producer.RequiredAcks = sarama.WaitForAll - - switch *partitioner { - case "": - if *partition >= 0 { - config.Producer.Partitioner = sarama.NewManualPartitioner - } else { - config.Producer.Partitioner = sarama.NewHashPartitioner - } - case "hash": - config.Producer.Partitioner = sarama.NewHashPartitioner - case "random": - config.Producer.Partitioner = sarama.NewRandomPartitioner - case "manual": - config.Producer.Partitioner = sarama.NewManualPartitioner - if *partition == -1 { - printUsageErrorAndExit("-partition is required when partitioning manually") - } - default: - printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner)) - } - - message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)} - - if *key != "" { - message.Key = sarama.StringEncoder(*key) - } - - if *value != "" { - message.Value = sarama.StringEncoder(*value) - } else if stdinAvailable() { - bytes, err := ioutil.ReadAll(os.Stdin) - if err != nil { - printErrorAndExit(66, "Failed to read data from the standard input: %s", err) - } - message.Value = sarama.ByteEncoder(bytes) - } else { - printUsageErrorAndExit("-value is required, or you have to provide the value on stdin") - } - - producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config) - if err != nil { - printErrorAndExit(69, "Failed to open Kafka producer: %s", err) - } - defer func() { - if err := producer.Close(); err != nil { - logger.Println("Failed to close Kafka producer cleanly:", err) - } - }() - - partition, offset, err := producer.SendMessage(message) - if err != nil { - printErrorAndExit(69, "Failed to produce message: %s", err) - } else if !*silent { - fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset) - } -} - -func printErrorAndExit(code int, format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - os.Exit(code) -} - -func printUsageErrorAndExit(message string) { - fmt.Fprintln(os.Stderr, "ERROR:", message) - fmt.Fprintln(os.Stderr) - fmt.Fprintln(os.Stderr, "Available command line options:") - flag.PrintDefaults() - os.Exit(64) -} - -func stdinAvailable() bool { - stat, _ := os.Stdin.Stat() - return (stat.Mode() & os.ModeCharDevice) == 0 -} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go deleted file mode 100644 index b60e53a07..000000000 --- a/vendor/github.com/Shopify/sarama/utils.go +++ /dev/null @@ -1,150 +0,0 @@ -package sarama - -import ( - "bufio" - "net" - "sort" -) - -type none struct{} - -// make []int32 sortable so we can sort partition numbers -type int32Slice []int32 - -func (slice int32Slice) Len() int { - return len(slice) -} - -func (slice int32Slice) Less(i, j int) bool { - return slice[i] < slice[j] -} - -func (slice int32Slice) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} - -func dupeAndSort(input []int32) []int32 { - ret := make([]int32, 0, len(input)) - for _, val := range input { - ret = append(ret, val) - } - - sort.Sort(int32Slice(ret)) - return ret -} - -func withRecover(fn func()) { - defer func() { - handler := PanicHandler - if handler != nil { - if err := recover(); err != nil { - handler(err) - } - } - }() - - fn() -} - -func safeAsyncClose(b *Broker) { - tmp := b // local var prevents clobbering in goroutine - go withRecover(func() { - if connected, _ := tmp.Connected(); connected { - if err := tmp.Close(); err != nil { - Logger.Println("Error closing broker", tmp.ID(), ":", err) - } - } - }) -} - -// Encoder is a simple interface for any type that can be encoded as an array of bytes -// in order to be sent as the key or value of a Kafka message. Length() is provided as an -// optimization, and must return the same as len() on the result of Encode(). -type Encoder interface { - Encode() ([]byte, error) - Length() int -} - -// make strings and byte slices encodable for convenience so they can be used as keys -// and/or values in kafka messages - -// StringEncoder implements the Encoder interface for Go strings so that they can be used -// as the Key or Value in a ProducerMessage. -type StringEncoder string - -func (s StringEncoder) Encode() ([]byte, error) { - return []byte(s), nil -} - -func (s StringEncoder) Length() int { - return len(s) -} - -// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used -// as the Key or Value in a ProducerMessage. -type ByteEncoder []byte - -func (b ByteEncoder) Encode() ([]byte, error) { - return b, nil -} - -func (b ByteEncoder) Length() int { - return len(b) -} - -// bufConn wraps a net.Conn with a buffer for reads to reduce the number of -// reads that trigger syscalls. -type bufConn struct { - net.Conn - buf *bufio.Reader -} - -func newBufConn(conn net.Conn) *bufConn { - return &bufConn{ - Conn: conn, - buf: bufio.NewReader(conn), - } -} - -func (bc *bufConn) Read(b []byte) (n int, err error) { - return bc.buf.Read(b) -} - -// KafkaVersion instances represent versions of the upstream Kafka broker. -type KafkaVersion struct { - // it's a struct rather than just typing the array directly to make it opaque and stop people - // generating their own arbitrary versions - version [4]uint -} - -func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { - return KafkaVersion{ - version: [4]uint{major, minor, veryMinor, patch}, - } -} - -// IsAtLeast return true if and only if the version it is called on is -// greater than or equal to the version passed in: -// V1.IsAtLeast(V2) // false -// V2.IsAtLeast(V1) // true -func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { - for i := range v.version { - if v.version[i] > other.version[i] { - return true - } else if v.version[i] < other.version[i] { - return false - } - } - return true -} - -// Effective constants defining the supported kafka versions. -var ( - V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) - V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) - V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) - V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) - V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) - V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) - minVersion = V0_8_2_0 -) diff --git a/vendor/github.com/Shopify/sarama/utils_test.go b/vendor/github.com/Shopify/sarama/utils_test.go deleted file mode 100644 index a9e09502c..000000000 --- a/vendor/github.com/Shopify/sarama/utils_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package sarama - -import "testing" - -func TestVersionCompare(t *testing.T) { - if V0_8_2_0.IsAtLeast(V0_8_2_1) { - t.Error("0.8.2.0 >= 0.8.2.1") - } - if !V0_8_2_1.IsAtLeast(V0_8_2_0) { - t.Error("! 0.8.2.1 >= 0.8.2.0") - } - if !V0_8_2_0.IsAtLeast(V0_8_2_0) { - t.Error("! 0.8.2.0 >= 0.8.2.0") - } - if !V0_9_0_0.IsAtLeast(V0_8_2_1) { - t.Error("! 0.9.0.0 >= 0.8.2.1") - } - if V0_8_2_1.IsAtLeast(V0_10_0_0) { - t.Error("0.8.2.1 >= 0.10.0.0") - } -} diff --git a/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh b/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh deleted file mode 100755 index 95e47dde4..000000000 --- a/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -set -ex - -# Launch and wait for toxiproxy -${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh & -while ! nc -q 1 localhost 2181 ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid -done diff --git a/vendor/github.com/Shopify/sarama/vagrant/kafka.conf b/vendor/github.com/Shopify/sarama/vagrant/kafka.conf deleted file mode 100644 index d975de438..000000000 --- a/vendor/github.com/Shopify/sarama/vagrant/kafka.conf +++ /dev/null @@ -1,5 +0,0 @@ -start on started zookeeper-ZK_PORT -stop on stopping zookeeper-ZK_PORT - -pre-start exec sleep 2 -exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties diff --git a/vendor/github.com/Shopify/sarama/vagrant/provision.sh b/vendor/github.com/Shopify/sarama/vagrant/provision.sh deleted file mode 100755 index ace768f40..000000000 --- a/vendor/github.com/Shopify/sarama/vagrant/provision.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -set -ex - -apt-get update -yes | apt-get install default-jre - -export KAFKA_INSTALL_ROOT=/opt -export KAFKA_HOSTNAME=192.168.100.67 -export KAFKA_VERSION=0.9.0.1 -export REPOSITORY_ROOT=/vagrant - -sh /vagrant/vagrant/install_cluster.sh -sh /vagrant/vagrant/setup_services.sh -sh /vagrant/vagrant/create_topics.sh diff --git a/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh b/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh deleted file mode 100755 index e52c00e7b..000000000 --- a/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -set -ex - -${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 & -PID=$! - -while ! nc -q 1 localhost 8474 - -# The number of threads handling network requests -num.network.threads=2 - -# The number of threads doing disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=1048576 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=1048576 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma seperated list of directories under which to store log files -log.dirs=KAFKA_DATADIR - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=2 - -# Create new topics with a replication factor of 2 so failover can be tested -# more easily. -default.replication.factor=2 - -auto.create.topics.enable=false -delete.topic.enable=true - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -log.retention.bytes=268435456 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=268435456 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=60000 - -# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires. -# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction. -log.cleaner.enable=false - -############################# Zookeeper ############################# - -# Zookeeper connection string (see zookeeper docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:ZK_PORT - -# Timeout in ms for connecting to zookeeper -zookeeper.session.timeout.ms=3000 -zookeeper.connection.timeout.ms=3000 diff --git a/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh b/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh deleted file mode 100755 index 81d8ea05d..000000000 --- a/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -set -ex - -stop toxiproxy || true -cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf -cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/ -start toxiproxy - -for i in 1 2 3 4 5; do - ZK_PORT=`expr $i + 2180` - KAFKA_PORT=`expr $i + 9090` - - stop zookeeper-${ZK_PORT} || true - - # set up zk service - cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf - sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf - - # set up kafka service - cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf - sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf - sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf - - start zookeeper-${ZK_PORT} -done - -# Wait for the last kafka node to finish booting -while ! nc -q 1 localhost 29095 0 { - expiry := b.lastError.Add(b.timeout) - if time.Now().After(expiry) { - b.errors = 0 - } - } - - switch b.state { - case closed: - b.errors++ - if b.errors == b.errorThreshold { - b.openBreaker() - } else { - b.lastError = time.Now() - } - case halfOpen: - b.openBreaker() - } - } -} - -func (b *Breaker) openBreaker() { - b.changeState(open) - go b.timer() -} - -func (b *Breaker) closeBreaker() { - b.changeState(closed) -} - -func (b *Breaker) timer() { - time.Sleep(b.timeout) - - b.lock.Lock() - defer b.lock.Unlock() - - b.changeState(halfOpen) -} - -func (b *Breaker) changeState(newState uint32) { - b.errors = 0 - b.successes = 0 - atomic.StoreUint32(&b.state, newState) -} diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go deleted file mode 100644 index b41308db6..000000000 --- a/vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package breaker - -import ( - "errors" - "testing" - "time" -) - -var errSomeError = errors.New("errSomeError") - -func alwaysPanics() error { - panic("foo") -} - -func returnsError() error { - return errSomeError -} - -func returnsSuccess() error { - return nil -} - -func TestBreakerErrorExpiry(t *testing.T) { - breaker := New(2, 1, 1*time.Second) - - for i := 0; i < 3; i++ { - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - time.Sleep(1 * time.Second) - } - - for i := 0; i < 3; i++ { - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - time.Sleep(1 * time.Second) - } -} - -func TestBreakerPanicsCountAsErrors(t *testing.T) { - breaker := New(3, 2, 1*time.Second) - - // three errors opens the breaker - for i := 0; i < 3; i++ { - func() { - defer func() { - val := recover() - if val.(string) != "foo" { - t.Error("incorrect panic") - } - }() - if err := breaker.Run(alwaysPanics); err != nil { - t.Error(err) - } - t.Error("shouldn't get here") - }() - } - - // breaker is open - for i := 0; i < 5; i++ { - if err := breaker.Run(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - } -} - -func TestBreakerStateTransitions(t *testing.T) { - breaker := New(3, 2, 1*time.Second) - - // three errors opens the breaker - for i := 0; i < 3; i++ { - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - } - - // breaker is open - for i := 0; i < 5; i++ { - if err := breaker.Run(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // one success works, but is not enough to fully close - if err := breaker.Run(returnsSuccess); err != nil { - t.Error(err) - } - // error works, but re-opens immediately - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - // breaker is open - if err := breaker.Run(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // two successes is enough to close it for good - for i := 0; i < 2; i++ { - if err := breaker.Run(returnsSuccess); err != nil { - t.Error(err) - } - } - // error works - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - // breaker is still closed - if err := breaker.Run(returnsSuccess); err != nil { - t.Error(err) - } -} - -func TestBreakerAsyncStateTransitions(t *testing.T) { - breaker := New(3, 2, 1*time.Second) - - // three errors opens the breaker - for i := 0; i < 3; i++ { - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - } - - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - - // breaker is open - for i := 0; i < 5; i++ { - if err := breaker.Go(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // one success works, but is not enough to fully close - if err := breaker.Go(returnsSuccess); err != nil { - t.Error(err) - } - // error works, but re-opens immediately - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - // breaker is open - if err := breaker.Go(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // two successes is enough to close it for good - for i := 0; i < 2; i++ { - if err := breaker.Go(returnsSuccess); err != nil { - t.Error(err) - } - } - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - // error works - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - // breaker is still closed - if err := breaker.Go(returnsSuccess); err != nil { - t.Error(err) - } -} - -func ExampleBreaker() { - breaker := New(3, 1, 5*time.Second) - - for { - result := breaker.Run(func() error { - // communicate with some external service and - // return an error if the communication failed - return nil - }) - - switch result { - case nil: - // success! - case ErrBreakerOpen: - // our function wasn't run because the breaker was open - default: - // some other error - } - } -} diff --git a/vendor/github.com/eapache/go-resiliency/deadline/README.md b/vendor/github.com/eapache/go-resiliency/deadline/README.md deleted file mode 100644 index ac97b460f..000000000 --- a/vendor/github.com/eapache/go-resiliency/deadline/README.md +++ /dev/null @@ -1,27 +0,0 @@ -deadline -======== - -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) -[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/deadline?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/deadline) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -The deadline/timeout resiliency pattern for golang. - -Creating a deadline takes one parameter: how long to wait. - -```go -dl := deadline.New(1 * time.Second) - -err := dl.Run(func(stopper <-chan struct{}) error { - // do something potentially slow - // give up when the `stopper` channel is closed (indicating a time-out) - return nil -}) - -switch err { -case deadline.ErrTimedOut: - // execution took too long, oops -default: - // some other error -} -``` diff --git a/vendor/github.com/eapache/go-resiliency/deadline/deadline.go b/vendor/github.com/eapache/go-resiliency/deadline/deadline.go deleted file mode 100644 index 3a6dfb0ee..000000000 --- a/vendor/github.com/eapache/go-resiliency/deadline/deadline.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package deadline implements the deadline (also known as "timeout") resiliency pattern for Go. -package deadline - -import ( - "errors" - "time" -) - -// ErrTimedOut is the error returned from Run when the deadline expires. -var ErrTimedOut = errors.New("timed out waiting for function to finish") - -// Deadline implements the deadline/timeout resiliency pattern. -type Deadline struct { - timeout time.Duration -} - -// New constructs a new Deadline with the given timeout. -func New(timeout time.Duration) *Deadline { - return &Deadline{ - timeout: timeout, - } -} - -// Run runs the given function, passing it a stopper channel. If the deadline passes before -// the function finishes executing, Run returns ErrTimeOut to the caller and closes the stopper -// channel so that the work function can attempt to exit gracefully. It does not (and cannot) -// simply kill the running function, so if it doesn't respect the stopper channel then it may -// keep running after the deadline passes. If the function finishes before the deadline, then -// the return value of the function is returned from Run. -func (d *Deadline) Run(work func(<-chan struct{}) error) error { - result := make(chan error) - stopper := make(chan struct{}) - - go func() { - result <- work(stopper) - }() - - select { - case ret := <-result: - return ret - case <-time.After(d.timeout): - close(stopper) - return ErrTimedOut - } -} diff --git a/vendor/github.com/eapache/go-resiliency/deadline/deadline_test.go b/vendor/github.com/eapache/go-resiliency/deadline/deadline_test.go deleted file mode 100644 index 6939f52e8..000000000 --- a/vendor/github.com/eapache/go-resiliency/deadline/deadline_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package deadline - -import ( - "errors" - "testing" - "time" -) - -func takesFiveMillis(stopper <-chan struct{}) error { - time.Sleep(5 * time.Millisecond) - return nil -} - -func takesTwentyMillis(stopper <-chan struct{}) error { - time.Sleep(20 * time.Millisecond) - return nil -} - -func returnsError(stopper <-chan struct{}) error { - return errors.New("foo") -} - -func TestDeadline(t *testing.T) { - dl := New(10 * time.Millisecond) - - if err := dl.Run(takesFiveMillis); err != nil { - t.Error(err) - } - - if err := dl.Run(takesTwentyMillis); err != ErrTimedOut { - t.Error(err) - } - - if err := dl.Run(returnsError); err.Error() != "foo" { - t.Error(err) - } - - done := make(chan struct{}) - err := dl.Run(func(stopper <-chan struct{}) error { - <-stopper - close(done) - return nil - }) - if err != ErrTimedOut { - t.Error(err) - } - <-done -} - -func ExampleDeadline() { - dl := New(1 * time.Second) - - err := dl.Run(func(stopper <-chan struct{}) error { - // do something possibly slow - // check stopper function and give up if timed out - return nil - }) - - switch err { - case ErrTimedOut: - // execution took too long, oops - default: - // some other error - } -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/README.md b/vendor/github.com/eapache/go-resiliency/retrier/README.md deleted file mode 100644 index dd30af7a0..000000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/README.md +++ /dev/null @@ -1,26 +0,0 @@ -retrier -======= - -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) -[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/retrier?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/retrier) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -The retriable resiliency pattern for golang. - -Creating a retrier takes two parameters: -- the times to back-off between retries (and implicitly the number of times to - retry) -- the classifier that determines which errors to retry - -```go -r := retrier.New(retrier.ConstantBackoff(3, 100*time.Millisecond), nil) - -err := r.Run(func() error { - // do some work - return nil -}) - -if err != nil { - // handle the case where the work failed three times -} -``` diff --git a/vendor/github.com/eapache/go-resiliency/retrier/backoffs.go b/vendor/github.com/eapache/go-resiliency/retrier/backoffs.go deleted file mode 100644 index faf6f8cf9..000000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/backoffs.go +++ /dev/null @@ -1,24 +0,0 @@ -package retrier - -import "time" - -// ConstantBackoff generates a simple back-off strategy of retrying 'n' times, and waiting 'amount' time after each one. -func ConstantBackoff(n int, amount time.Duration) []time.Duration { - ret := make([]time.Duration, n) - for i := range ret { - ret[i] = amount - } - return ret -} - -// ExponentialBackoff generates a simple back-off strategy of retrying 'n' times, and doubling the amount of -// time waited after each one. -func ExponentialBackoff(n int, initialAmount time.Duration) []time.Duration { - ret := make([]time.Duration, n) - next := initialAmount - for i := range ret { - ret[i] = next - next *= 2 - } - return ret -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/backoffs_test.go b/vendor/github.com/eapache/go-resiliency/retrier/backoffs_test.go deleted file mode 100644 index 1168adfeb..000000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/backoffs_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package retrier - -import ( - "testing" - "time" -) - -func TestConstantBackoff(t *testing.T) { - b := ConstantBackoff(1, 10*time.Millisecond) - if len(b) != 1 { - t.Error("incorrect length") - } - for i := range b { - if b[i] != 10*time.Millisecond { - t.Error("incorrect value at", i) - } - } - - b = ConstantBackoff(10, 250*time.Hour) - if len(b) != 10 { - t.Error("incorrect length") - } - for i := range b { - if b[i] != 250*time.Hour { - t.Error("incorrect value at", i) - } - } -} - -func TestExponentialBackoff(t *testing.T) { - b := ExponentialBackoff(1, 10*time.Millisecond) - if len(b) != 1 { - t.Error("incorrect length") - } - if b[0] != 10*time.Millisecond { - t.Error("incorrect value") - } - - b = ExponentialBackoff(4, 1*time.Minute) - if len(b) != 4 { - t.Error("incorrect length") - } - if b[0] != 1*time.Minute { - t.Error("incorrect value") - } - if b[1] != 2*time.Minute { - t.Error("incorrect value") - } - if b[2] != 4*time.Minute { - t.Error("incorrect value") - } - if b[3] != 8*time.Minute { - t.Error("incorrect value") - } -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/classifier.go b/vendor/github.com/eapache/go-resiliency/retrier/classifier.go deleted file mode 100644 index 7dd71c798..000000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/classifier.go +++ /dev/null @@ -1,66 +0,0 @@ -package retrier - -// Action is the type returned by a Classifier to indicate how the Retrier should proceed. -type Action int - -const ( - Succeed Action = iota // Succeed indicates the Retrier should treat this value as a success. - Fail // Fail indicates the Retrier should treat this value as a hard failure and not retry. - Retry // Retry indicates the Retrier should treat this value as a soft failure and retry. -) - -// Classifier is the interface implemented by anything that can classify Errors for a Retrier. -type Classifier interface { - Classify(error) Action -} - -// DefaultClassifier classifies errors in the simplest way possible. If -// the error is nil, it returns Succeed, otherwise it returns Retry. -type DefaultClassifier struct{} - -// Classify implements the Classifier interface. -func (c DefaultClassifier) Classify(err error) Action { - if err == nil { - return Succeed - } - - return Retry -} - -// WhitelistClassifier classifies errors based on a whitelist. If the error is nil, it -// returns Succeed; if the error is in the whitelist, it returns Retry; otherwise, it returns Fail. -type WhitelistClassifier []error - -// Classify implements the Classifier interface. -func (list WhitelistClassifier) Classify(err error) Action { - if err == nil { - return Succeed - } - - for _, pass := range list { - if err == pass { - return Retry - } - } - - return Fail -} - -// BlacklistClassifier classifies errors based on a blacklist. If the error is nil, it -// returns Succeed; if the error is in the blacklist, it returns Fail; otherwise, it returns Retry. -type BlacklistClassifier []error - -// Classify implements the Classifier interface. -func (list BlacklistClassifier) Classify(err error) Action { - if err == nil { - return Succeed - } - - for _, pass := range list { - if err == pass { - return Fail - } - } - - return Retry -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/classifier_test.go b/vendor/github.com/eapache/go-resiliency/retrier/classifier_test.go deleted file mode 100644 index 953102fbb..000000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/classifier_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package retrier - -import ( - "errors" - "testing" -) - -var ( - errFoo = errors.New("FOO") - errBar = errors.New("BAR") - errBaz = errors.New("BAZ") -) - -func TestDefaultClassifier(t *testing.T) { - c := DefaultClassifier{} - - if c.Classify(nil) != Succeed { - t.Error("default misclassified nil") - } - - if c.Classify(errFoo) != Retry { - t.Error("default misclassified foo") - } - if c.Classify(errBar) != Retry { - t.Error("default misclassified bar") - } - if c.Classify(errBaz) != Retry { - t.Error("default misclassified baz") - } -} - -func TestWhitelistClassifier(t *testing.T) { - c := WhitelistClassifier{errFoo, errBar} - - if c.Classify(nil) != Succeed { - t.Error("whitelist misclassified nil") - } - - if c.Classify(errFoo) != Retry { - t.Error("whitelist misclassified foo") - } - if c.Classify(errBar) != Retry { - t.Error("whitelist misclassified bar") - } - if c.Classify(errBaz) != Fail { - t.Error("whitelist misclassified baz") - } -} - -func TestBlacklistClassifier(t *testing.T) { - c := BlacklistClassifier{errBar} - - if c.Classify(nil) != Succeed { - t.Error("blacklist misclassified nil") - } - - if c.Classify(errFoo) != Retry { - t.Error("blacklist misclassified foo") - } - if c.Classify(errBar) != Fail { - t.Error("blacklist misclassified bar") - } - if c.Classify(errBaz) != Retry { - t.Error("blacklist misclassified baz") - } -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/retrier.go b/vendor/github.com/eapache/go-resiliency/retrier/retrier.go deleted file mode 100644 index ff328742b..000000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/retrier.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package retrier implements the "retriable" resiliency pattern for Go. -package retrier - -import ( - "math/rand" - "time" -) - -// Retrier implements the "retriable" resiliency pattern, abstracting out the process of retrying a failed action -// a certain number of times with an optional back-off between each retry. -type Retrier struct { - backoff []time.Duration - class Classifier - jitter float64 - rand *rand.Rand -} - -// New constructs a Retrier with the given backoff pattern and classifier. The length of the backoff pattern -// indicates how many times an action will be retried, and the value at each index indicates the amount of time -// waited before each subsequent retry. The classifier is used to determine which errors should be retried and -// which should cause the retrier to fail fast. The DefaultClassifier is used if nil is passed. -func New(backoff []time.Duration, class Classifier) *Retrier { - if class == nil { - class = DefaultClassifier{} - } - - return &Retrier{ - backoff: backoff, - class: class, - rand: rand.New(rand.NewSource(time.Now().UnixNano())), - } -} - -// Run executes the given work function, then classifies its return value based on the classifier used -// to construct the Retrier. If the result is Succeed or Fail, the return value of the work function is -// returned to the caller. If the result is Retry, then Run sleeps according to the its backoff policy -// before retrying. If the total number of retries is exceeded then the return value of the work function -// is returned to the caller regardless. -func (r *Retrier) Run(work func() error) error { - retries := 0 - for { - ret := work() - - switch r.class.Classify(ret) { - case Succeed, Fail: - return ret - case Retry: - if retries >= len(r.backoff) { - return ret - } - time.Sleep(r.calcSleep(retries)) - retries++ - } - } -} - -func (r *Retrier) calcSleep(i int) time.Duration { - // take a random float in the range (-r.jitter, +r.jitter) and multiply it by the base amount - return r.backoff[i] + time.Duration(((r.rand.Float64()*2)-1)*r.jitter*float64(r.backoff[i])) -} - -// SetJitter sets the amount of jitter on each back-off to a factor between 0.0 and 1.0 (values outside this range -// are silently ignored). When a retry occurs, the back-off is adjusted by a random amount up to this value. -func (r *Retrier) SetJitter(jit float64) { - if jit < 0 || jit > 1 { - return - } - r.jitter = jit -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/retrier_test.go b/vendor/github.com/eapache/go-resiliency/retrier/retrier_test.go deleted file mode 100644 index 2d061d9b2..000000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/retrier_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package retrier - -import ( - "testing" - "time" -) - -var i int - -func genWork(returns []error) func() error { - i = 0 - return func() error { - i++ - if i > len(returns) { - return nil - } - return returns[i-1] - } -} - -func TestRetrier(t *testing.T) { - r := New([]time.Duration{0, 10 * time.Millisecond}, WhitelistClassifier{errFoo}) - - err := r.Run(genWork([]error{errFoo, errFoo})) - if err != nil { - t.Error(err) - } - if i != 3 { - t.Error("run wrong number of times") - } - - err = r.Run(genWork([]error{errFoo, errBar})) - if err != errBar { - t.Error(err) - } - if i != 2 { - t.Error("run wrong number of times") - } - - err = r.Run(genWork([]error{errBar, errBaz})) - if err != errBar { - t.Error(err) - } - if i != 1 { - t.Error("run wrong number of times") - } -} - -func TestRetrierNone(t *testing.T) { - r := New(nil, nil) - - i = 0 - err := r.Run(func() error { - i++ - return errFoo - }) - if err != errFoo { - t.Error(err) - } - if i != 1 { - t.Error("run wrong number of times") - } - - i = 0 - err = r.Run(func() error { - i++ - return nil - }) - if err != nil { - t.Error(err) - } - if i != 1 { - t.Error("run wrong number of times") - } -} - -func TestRetrierJitter(t *testing.T) { - r := New([]time.Duration{0, 10 * time.Millisecond, 4 * time.Hour}, nil) - - if r.calcSleep(0) != 0 { - t.Error("Incorrect sleep calculated") - } - if r.calcSleep(1) != 10*time.Millisecond { - t.Error("Incorrect sleep calculated") - } - if r.calcSleep(2) != 4*time.Hour { - t.Error("Incorrect sleep calculated") - } - - r.SetJitter(0.25) - for i := 0; i < 20; i++ { - if r.calcSleep(0) != 0 { - t.Error("Incorrect sleep calculated") - } - - slp := r.calcSleep(1) - if slp < 7500*time.Microsecond || slp > 12500*time.Microsecond { - t.Error("Incorrect sleep calculated") - } - - slp = r.calcSleep(2) - if slp < 3*time.Hour || slp > 5*time.Hour { - t.Error("Incorrect sleep calculated") - } - } - - r.SetJitter(-1) - if r.jitter != 0.25 { - t.Error("Invalid jitter value accepted") - } - - r.SetJitter(2) - if r.jitter != 0.25 { - t.Error("Invalid jitter value accepted") - } -} - -func ExampleRetrier() { - r := New(ConstantBackoff(3, 100*time.Millisecond), nil) - - err := r.Run(func() error { - // do some work - return nil - }) - - if err != nil { - // handle the case where the work failed three times - } -} diff --git a/vendor/github.com/eapache/go-resiliency/semaphore/README.md b/vendor/github.com/eapache/go-resiliency/semaphore/README.md deleted file mode 100644 index a4a73ea07..000000000 --- a/vendor/github.com/eapache/go-resiliency/semaphore/README.md +++ /dev/null @@ -1,22 +0,0 @@ -semaphore -========= - -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) -[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/semaphore?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/semaphore) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -The semaphore resiliency pattern for golang. - -Creating a semaphore takes two parameters: -- ticket count (how many tickets to give out at once) -- timeout (how long to wait for a ticket if none are currently available) - -```go -sem := semaphore.New(3, 1*time.Second) - -if err := sem.Acquire(); err != nil { - // could not acquire semaphore - return err -} -defer sem.Release() -``` diff --git a/vendor/github.com/eapache/go-resiliency/semaphore/semaphore.go b/vendor/github.com/eapache/go-resiliency/semaphore/semaphore.go deleted file mode 100644 index 90cb1fd15..000000000 --- a/vendor/github.com/eapache/go-resiliency/semaphore/semaphore.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package semaphore implements the semaphore resiliency pattern for Go. -package semaphore - -import ( - "errors" - "time" -) - -// ErrNoTickets is the error returned by Acquire when it could not acquire -// a ticket from the semaphore within the configured timeout. -var ErrNoTickets = errors.New("could not aquire semaphore ticket") - -// Semaphore implements the semaphore resiliency pattern -type Semaphore struct { - sem chan struct{} - timeout time.Duration -} - -// New constructs a new Semaphore with the given ticket-count -// and timeout. -func New(tickets int, timeout time.Duration) *Semaphore { - return &Semaphore{ - sem: make(chan struct{}, tickets), - timeout: timeout, - } -} - -// Acquire tries to acquire a ticket from the semaphore. If it can, it returns nil. -// If it cannot after "timeout" amount of time, it returns ErrNoTickets. It is -// safe to call Acquire concurrently on a single Semaphore. -func (s *Semaphore) Acquire() error { - select { - case s.sem <- struct{}{}: - return nil - case <-time.After(s.timeout): - return ErrNoTickets - } -} - -// Release releases an acquired ticket back to the semaphore. It is safe to call -// Release concurrently on a single Semaphore. It is an error to call Release on -// a Semaphore from which you have not first acquired a ticket. -func (s *Semaphore) Release() { - <-s.sem -} diff --git a/vendor/github.com/eapache/go-resiliency/semaphore/semaphore_test.go b/vendor/github.com/eapache/go-resiliency/semaphore/semaphore_test.go deleted file mode 100644 index 3eb85f1a8..000000000 --- a/vendor/github.com/eapache/go-resiliency/semaphore/semaphore_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package semaphore - -import ( - "testing" - "time" -) - -func TestSemaphoreAcquireRelease(t *testing.T) { - sem := New(3, 1*time.Second) - - for i := 0; i < 10; i++ { - if err := sem.Acquire(); err != nil { - t.Error(err) - } - if err := sem.Acquire(); err != nil { - t.Error(err) - } - if err := sem.Acquire(); err != nil { - t.Error(err) - } - sem.Release() - sem.Release() - sem.Release() - } -} - -func TestSemaphoreBlockTimeout(t *testing.T) { - sem := New(1, 200*time.Millisecond) - - if err := sem.Acquire(); err != nil { - t.Error(err) - } - - start := time.Now() - if err := sem.Acquire(); err != ErrNoTickets { - t.Error(err) - } - if start.Add(200 * time.Millisecond).After(time.Now()) { - t.Error("semaphore did not wait long enough") - } - - sem.Release() - if err := sem.Acquire(); err != nil { - t.Error(err) - } -} - -func ExampleSemaphore() { - sem := New(3, 1*time.Second) - - for i := 0; i < 10; i++ { - go func() { - if err := sem.Acquire(); err != nil { - return //could not acquire semaphore - } - defer sem.Release() - - // do something semaphore-guarded - }() - } -} diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml deleted file mode 100644 index d6cf4f1fa..000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: -- 1.5.4 -- 1.6.1 - -sudo: false diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE deleted file mode 100644 index 5bf3688d9..000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md deleted file mode 100644 index 3f2695c72..000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# go-xerial-snappy - -[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy) - -Xerial-compatible Snappy framing support for golang. - -Packages using Xerial for snappy encoding use a framing format incompatible with -basically everything else in existence. This package wraps Go's built-in snappy -package to support it. - -Apps that use this format include Apache Kafka (see -https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for -details). diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go deleted file mode 100644 index b8f8b51fc..000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/snappy.go +++ /dev/null @@ -1,43 +0,0 @@ -package snappy - -import ( - "bytes" - "encoding/binary" - - master "github.com/golang/snappy" -) - -var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} - -// Encode encodes data as snappy with no framing header. -func Encode(src []byte) []byte { - return master.Encode(nil, src) -} - -// Decode decodes snappy data whether it is traditional unframed -// or includes the xerial framing format. -func Decode(src []byte) ([]byte, error) { - if !bytes.Equal(src[:8], xerialHeader) { - return master.Decode(nil, src) - } - - var ( - pos = uint32(16) - max = uint32(len(src)) - dst = make([]byte, 0, len(src)) - chunk []byte - err error - ) - for pos < max { - size := binary.BigEndian.Uint32(src[pos : pos+4]) - pos += 4 - - chunk, err = master.Decode(chunk, src[pos:pos+size]) - if err != nil { - return nil, err - } - pos += size - dst = append(dst, chunk...) - } - return dst, nil -} diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy_test.go b/vendor/github.com/eapache/go-xerial-snappy/snappy_test.go deleted file mode 100644 index e94f635df..000000000 --- a/vendor/github.com/eapache/go-xerial-snappy/snappy_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package snappy - -import ( - "bytes" - "testing" -) - -var snappyTestCases = map[string][]byte{ - "REPEATREPEATREPEATREPEATREPEATREPEAT": {36, 20, 82, 69, 80, 69, 65, 84, 118, 6, 0}, - "REALLY SHORT": {12, 44, 82, 69, 65, 76, 76, 89, 32, 83, 72, 79, 82, 84}, - "AXBXCXDXEXFX": {12, 44, 65, 88, 66, 88, 67, 88, 68, 88, 69, 88, 70, 88}, -} - -var snappyStreamTestCases = map[string][]byte{ - "PLAINDATA": {130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 11, 9, 32, 80, 76, 65, 73, 78, 68, 65, 84, 65}, - `{"a":"UtaitILHMDAAAAfU","b":"日本"}`: {130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 39, 37, 144, 123, 34, 97, 34, 58, 34, 85, 116, 97, 105, 116, 73, 76, 72, 77, 68, 65, 65, 65, 65, 102, 85, 34, 44, 34, 98, 34, 58, 34, 230, 151, 165, 230, 156, 172, 34, 125}, - `Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias except`: {130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 3, 89, 128, 8, 240, 90, 83, 101, 100, 32, 117, 116, 32, 112, 101, 114, 115, 112, 105, 99, 105, 97, 116, 105, 115, 32, 117, 110, 100, 101, 32, 111, 109, 110, 105, 115, 32, 105, 115, 116, 101, 32, 110, 97, 116, 117, 115, 32, 101, 114, 114, 111, 114, 32, 115, 105, 116, 32, 118, 111, 108, 117, 112, 116, 97, 116, 101, 109, 32, 97, 99, 99, 117, 115, 97, 110, 116, 105, 117, 109, 32, 100, 111, 108, 111, 114, 101, 109, 113, 117, 101, 32, 108, 97, 117, 100, 97, 5, 22, 240, 60, 44, 32, 116, 111, 116, 97, 109, 32, 114, 101, 109, 32, 97, 112, 101, 114, 105, 97, 109, 44, 32, 101, 97, 113, 117, 101, 32, 105, 112, 115, 97, 32, 113, 117, 97, 101, 32, 97, 98, 32, 105, 108, 108, 111, 32, 105, 110, 118, 101, 110, 116, 111, 114, 101, 32, 118, 101, 114, 105, 116, 97, 1, 141, 4, 101, 116, 1, 36, 88, 115, 105, 32, 97, 114, 99, 104, 105, 116, 101, 99, 116, 111, 32, 98, 101, 97, 116, 97, 101, 32, 118, 105, 1, 6, 120, 100, 105, 99, 116, 97, 32, 115, 117, 110, 116, 32, 101, 120, 112, 108, 105, 99, 97, 98, 111, 46, 32, 78, 101, 109, 111, 32, 101, 110, 105, 109, 5, 103, 0, 109, 46, 180, 0, 12, 113, 117, 105, 97, 17, 16, 0, 115, 5, 209, 72, 97, 115, 112, 101, 114, 110, 97, 116, 117, 114, 32, 97, 117, 116, 32, 111, 100, 105, 116, 5, 9, 36, 102, 117, 103, 105, 116, 44, 32, 115, 101, 100, 9, 53, 32, 99, 111, 110, 115, 101, 113, 117, 117, 110, 1, 42, 20, 109, 97, 103, 110, 105, 32, 9, 245, 16, 115, 32, 101, 111, 115, 1, 36, 28, 32, 114, 97, 116, 105, 111, 110, 101, 17, 96, 33, 36, 1, 51, 36, 105, 32, 110, 101, 115, 99, 105, 117, 110, 116, 1, 155, 1, 254, 16, 112, 111, 114, 114, 111, 1, 51, 36, 115, 113, 117, 97, 109, 32, 101, 115, 116, 44, 1, 14, 13, 81, 5, 183, 4, 117, 109, 1, 18, 0, 97, 9, 19, 4, 32, 115, 1, 149, 12, 109, 101, 116, 44, 9, 135, 76, 99, 116, 101, 116, 117, 114, 44, 32, 97, 100, 105, 112, 105, 115, 99, 105, 32, 118, 101, 108, 50, 173, 0, 24, 110, 111, 110, 32, 110, 117, 109, 9, 94, 84, 105, 117, 115, 32, 109, 111, 100, 105, 32, 116, 101, 109, 112, 111, 114, 97, 32, 105, 110, 99, 105, 100, 33, 52, 20, 117, 116, 32, 108, 97, 98, 33, 116, 4, 101, 116, 9, 106, 0, 101, 5, 219, 20, 97, 109, 32, 97, 108, 105, 5, 62, 33, 164, 8, 114, 97, 116, 29, 212, 12, 46, 32, 85, 116, 41, 94, 52, 97, 100, 32, 109, 105, 110, 105, 109, 97, 32, 118, 101, 110, 105, 33, 221, 72, 113, 117, 105, 115, 32, 110, 111, 115, 116, 114, 117, 109, 32, 101, 120, 101, 114, 99, 105, 33, 202, 104, 111, 110, 101, 109, 32, 117, 108, 108, 97, 109, 32, 99, 111, 114, 112, 111, 114, 105, 115, 32, 115, 117, 115, 99, 105, 112, 105, 13, 130, 8, 105, 111, 115, 1, 64, 12, 110, 105, 115, 105, 1, 150, 5, 126, 44, 105, 100, 32, 101, 120, 32, 101, 97, 32, 99, 111, 109, 5, 192, 0, 99, 41, 131, 33, 172, 8, 63, 32, 81, 1, 107, 4, 97, 117, 33, 101, 96, 118, 101, 108, 32, 101, 117, 109, 32, 105, 117, 114, 101, 32, 114, 101, 112, 114, 101, 104, 101, 110, 100, 101, 114, 105, 65, 63, 12, 105, 32, 105, 110, 1, 69, 16, 118, 111, 108, 117, 112, 65, 185, 1, 47, 24, 105, 116, 32, 101, 115, 115, 101, 1, 222, 64, 109, 32, 110, 105, 104, 105, 108, 32, 109, 111, 108, 101, 115, 116, 105, 97, 101, 46, 103, 0, 0, 44, 1, 45, 16, 32, 105, 108, 108, 117, 37, 143, 45, 36, 0, 109, 5, 110, 65, 33, 20, 97, 116, 32, 113, 117, 111, 17, 92, 44, 115, 32, 110, 117, 108, 108, 97, 32, 112, 97, 114, 105, 9, 165, 24, 65, 116, 32, 118, 101, 114, 111, 69, 34, 44, 101, 116, 32, 97, 99, 99, 117, 115, 97, 109, 117, 115, 1, 13, 104, 105, 117, 115, 116, 111, 32, 111, 100, 105, 111, 32, 100, 105, 103, 110, 105, 115, 115, 105, 109, 111, 115, 32, 100, 117, 99, 105, 1, 34, 80, 113, 117, 105, 32, 98, 108, 97, 110, 100, 105, 116, 105, 105, 115, 32, 112, 114, 97, 101, 115, 101, 101, 87, 17, 111, 56, 116, 117, 109, 32, 100, 101, 108, 101, 110, 105, 116, 105, 32, 97, 116, 65, 89, 28, 99, 111, 114, 114, 117, 112, 116, 105, 1, 150, 0, 115, 13, 174, 5, 109, 8, 113, 117, 97, 65, 5, 52, 108, 101, 115, 116, 105, 97, 115, 32, 101, 120, 99, 101, 112, 116, 0, 0, 0, 1, 0}, -} - -func TestSnappyEncode(t *testing.T) { - for src, exp := range snappyTestCases { - dst := Encode([]byte(src)) - if !bytes.Equal(dst, exp) { - t.Errorf("Expected %s to generate %v, but was %v", src, exp, dst) - } - } -} - -func TestSnappyDecode(t *testing.T) { - for exp, src := range snappyTestCases { - dst, err := Decode(src) - if err != nil { - t.Error("Encoding error: ", err) - } else if !bytes.Equal(dst, []byte(exp)) { - t.Errorf("Expected %s to be generated from %v, but was %s", exp, src, string(dst)) - } - } -} - -func TestSnappyDecodeStreams(t *testing.T) { - for exp, src := range snappyStreamTestCases { - dst, err := Decode(src) - if err != nil { - t.Error("Encoding error: ", err) - } else if !bytes.Equal(dst, []byte(exp)) { - t.Errorf("Expected %s to be generated from [%d]byte, but was %s", exp, len(src), string(dst)) - } - } -} diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore deleted file mode 100644 index 836562412..000000000 --- a/vendor/github.com/eapache/queue/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml deleted file mode 100644 index 235a40a49..000000000 --- a/vendor/github.com/eapache/queue/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -sudo: false - -go: - - 1.2 - - 1.3 - - 1.4 diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE deleted file mode 100644 index d5f36dbca..000000000 --- a/vendor/github.com/eapache/queue/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md deleted file mode 100644 index 8e782335c..000000000 --- a/vendor/github.com/eapache/queue/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Queue -===== - -[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue) -[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. -Using this instead of other, simpler, queue implementations (slice+append or linked list) provides -substantial memory and time benefits, and fewer GC pauses. - -The queue implemented here is as fast as it is in part because it is *not* thread-safe. - -Follows semantic versioning using https://gopkg.in/ - import from -[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1) -for guaranteed API stability. diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go deleted file mode 100644 index 71d1acdf2..000000000 --- a/vendor/github.com/eapache/queue/queue.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. -Using this instead of other, simpler, queue implementations (slice+append or linked list) provides -substantial memory and time benefits, and fewer GC pauses. - -The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. -*/ -package queue - -// minQueueLen is smallest capacity that queue may have. -// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). -const minQueueLen = 16 - -// Queue represents a single instance of the queue data structure. -type Queue struct { - buf []interface{} - head, tail, count int -} - -// New constructs and returns a new Queue. -func New() *Queue { - return &Queue{ - buf: make([]interface{}, minQueueLen), - } -} - -// Length returns the number of elements currently stored in the queue. -func (q *Queue) Length() int { - return q.count -} - -// resizes the queue to fit exactly twice its current contents -// this can result in shrinking if the queue is less than half-full -func (q *Queue) resize() { - newBuf := make([]interface{}, q.count<<1) - - if q.tail > q.head { - copy(newBuf, q.buf[q.head:q.tail]) - } else { - n := copy(newBuf, q.buf[q.head:]) - copy(newBuf[n:], q.buf[:q.tail]) - } - - q.head = 0 - q.tail = q.count - q.buf = newBuf -} - -// Add puts an element on the end of the queue. -func (q *Queue) Add(elem interface{}) { - if q.count == len(q.buf) { - q.resize() - } - - q.buf[q.tail] = elem - // bitwise modulus - q.tail = (q.tail + 1) & (len(q.buf) - 1) - q.count++ -} - -// Peek returns the element at the head of the queue. This call panics -// if the queue is empty. -func (q *Queue) Peek() interface{} { - if q.count <= 0 { - panic("queue: Peek() called on empty queue") - } - return q.buf[q.head] -} - -// Get returns the element at index i in the queue. If the index is -// invalid, the call will panic. This method accepts both positive and -// negative index values. Index 0 refers to the first element, and -// index -1 refers to the last. -func (q *Queue) Get(i int) interface{} { - // If indexing backwards, convert to positive index. - if i < 0 { - i += q.count - } - if i < 0 || i >= q.count { - panic("queue: Get() called with index out of range") - } - // bitwise modulus - return q.buf[(q.head+i)&(len(q.buf)-1)] -} - -// Remove removes and returns the element from the front of the queue. If the -// queue is empty, the call will panic. -func (q *Queue) Remove() interface{} { - if q.count <= 0 { - panic("queue: Remove() called on empty queue") - } - ret := q.buf[q.head] - q.buf[q.head] = nil - // bitwise modulus - q.head = (q.head + 1) & (len(q.buf) - 1) - q.count-- - // Resize down if buffer 1/4 full. - if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { - q.resize() - } - return ret -} diff --git a/vendor/github.com/eapache/queue/queue_test.go b/vendor/github.com/eapache/queue/queue_test.go deleted file mode 100644 index a87584883..000000000 --- a/vendor/github.com/eapache/queue/queue_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package queue - -import "testing" - -func TestQueueSimple(t *testing.T) { - q := New() - - for i := 0; i < minQueueLen; i++ { - q.Add(i) - } - for i := 0; i < minQueueLen; i++ { - if q.Peek().(int) != i { - t.Error("peek", i, "had value", q.Peek()) - } - x := q.Remove() - if x != i { - t.Error("remove", i, "had value", x) - } - } -} - -func TestQueueWrapping(t *testing.T) { - q := New() - - for i := 0; i < minQueueLen; i++ { - q.Add(i) - } - for i := 0; i < 3; i++ { - q.Remove() - q.Add(minQueueLen + i) - } - - for i := 0; i < minQueueLen; i++ { - if q.Peek().(int) != i+3 { - t.Error("peek", i, "had value", q.Peek()) - } - q.Remove() - } -} - -func TestQueueLength(t *testing.T) { - q := New() - - if q.Length() != 0 { - t.Error("empty queue length not 0") - } - - for i := 0; i < 1000; i++ { - q.Add(i) - if q.Length() != i+1 { - t.Error("adding: queue with", i, "elements has length", q.Length()) - } - } - for i := 0; i < 1000; i++ { - q.Remove() - if q.Length() != 1000-i-1 { - t.Error("removing: queue with", 1000-i-i, "elements has length", q.Length()) - } - } -} - -func TestQueueGet(t *testing.T) { - q := New() - - for i := 0; i < 1000; i++ { - q.Add(i) - for j := 0; j < q.Length(); j++ { - if q.Get(j).(int) != j { - t.Errorf("index %d doesn't contain %d", j, j) - } - } - } -} - -func TestQueueGetNegative(t *testing.T) { - q := New() - - for i := 0; i < 1000; i++ { - q.Add(i) - for j := 1; j <= q.Length(); j++ { - if q.Get(-j).(int) != q.Length()-j { - t.Errorf("index %d doesn't contain %d", -j, q.Length()-j) - } - } - } -} - -func TestQueueGetOutOfRangePanics(t *testing.T) { - q := New() - - q.Add(1) - q.Add(2) - q.Add(3) - - assertPanics(t, "should panic when negative index", func() { - q.Get(-4) - }) - - assertPanics(t, "should panic when index greater than length", func() { - q.Get(4) - }) -} - -func TestQueuePeekOutOfRangePanics(t *testing.T) { - q := New() - - assertPanics(t, "should panic when peeking empty queue", func() { - q.Peek() - }) - - q.Add(1) - q.Remove() - - assertPanics(t, "should panic when peeking emptied queue", func() { - q.Peek() - }) -} - -func TestQueueRemoveOutOfRangePanics(t *testing.T) { - q := New() - - assertPanics(t, "should panic when removing empty queue", func() { - q.Remove() - }) - - q.Add(1) - q.Remove() - - assertPanics(t, "should panic when removing emptied queue", func() { - q.Remove() - }) -} - -func assertPanics(t *testing.T, name string, f func()) { - defer func() { - if r := recover(); r == nil { - t.Errorf("%s: didn't panic as expected", name) - } - }() - - f() -} - -// General warning: Go's benchmark utility (go test -bench .) increases the number of -// iterations until the benchmarks take a reasonable amount of time to run; memory usage -// is *NOT* considered. On my machine, these benchmarks hit around ~1GB before they've had -// enough, but if you have less than that available and start swapping, then all bets are off. - -func BenchmarkQueueSerial(b *testing.B) { - q := New() - for i := 0; i < b.N; i++ { - q.Add(nil) - } - for i := 0; i < b.N; i++ { - q.Peek() - q.Remove() - } -} - -func BenchmarkQueueGet(b *testing.B) { - q := New() - for i := 0; i < b.N; i++ { - q.Add(i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - q.Get(i) - } -} - -func BenchmarkQueueTickTock(b *testing.B) { - q := New() - for i := 0; i < b.N; i++ { - q.Add(nil) - q.Peek() - q.Remove() - } -} diff --git a/vendor/github.com/golang/protobuf/.gitignore b/vendor/github.com/golang/protobuf/.gitignore index 8f5b596b1..2f3373923 100644 --- a/vendor/github.com/golang/protobuf/.gitignore +++ b/vendor/github.com/golang/protobuf/.gitignore @@ -13,4 +13,3 @@ _obj _test _testmain.go protoc-gen-go/testdata/multi/*.pb.go -_conformance/_conformance diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile index a1421d8b7..80b6a17d0 100644 --- a/vendor/github.com/golang/protobuf/Makefile +++ b/vendor/github.com/golang/protobuf/Makefile @@ -52,4 +52,3 @@ regenerate: make -C protoc-gen-go/testdata regenerate make -C proto/testdata regenerate make -C jsonpb/jsonpb_test_proto regenerate - make -C _conformance regenerate diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md index 037fc7c8e..8fdc89b4d 100644 --- a/vendor/github.com/golang/protobuf/README.md +++ b/vendor/github.com/golang/protobuf/README.md @@ -192,49 +192,7 @@ the --go_out argument to protoc: protoc --go_out=plugins=grpc:. *.proto -## Compatibility ## - -The library and the generated code are expected to be stable over time. -However, we reserve the right to make breaking changes without notice for the -following reasons: - -- Security. A security issue in the specification or implementation may come to - light whose resolution requires breaking compatibility. We reserve the right - to address such security issues. -- Unspecified behavior. There are some aspects of the Protocol Buffers - specification that are undefined. Programs that depend on such unspecified - behavior may break in future releases. -- Specification errors or changes. If it becomes necessary to address an - inconsistency, incompleteness, or change in the Protocol Buffers - specification, resolving the issue could affect the meaning or legality of - existing programs. We reserve the right to address such issues, including - updating the implementations. -- Bugs. If the library has a bug that violates the specification, a program - that depends on the buggy behavior may break if the bug is fixed. We reserve - the right to fix such bugs. -- Adding methods or fields to generated structs. These may conflict with field - names that already exist in a schema, causing applications to break. When the - code generator encounters a field in the schema that would collide with a - generated field or method name, the code generator will append an underscore - to the generated field or method name. -- Adding, removing, or changing methods or fields in generated structs that - start with `XXX`. These parts of the generated code are exported out of - necessity, but should not be considered part of the public API. -- Adding, removing, or changing unexported symbols in generated code. - -Any breaking changes outside of these will be announced 6 months in advance to -protobuf@googlegroups.com. - -You should, whenever possible, use generated code created by the `protoc-gen-go` -tool built at the same commit as the `proto` package. The `proto` package -declares package-level constants in the form `ProtoPackageIsVersionX`. -Application code and generated code may depend on one of these constants to -ensure that compilation will fail if the available version of the proto library -is too old. Whenever we make a change to the generated code that requires newer -library support, in the same commit we will increment the version number of the -generated code and declare a new package-level constant whose name incorporates -the latest version number. Removing a compatibility constant is considered a -breaking change and would be subject to the announcement policy stated above. +## Plugins ## The `protoc-gen-go/generator` package exposes a plugin interface, which is used by the gRPC code generation. This interface is not diff --git a/vendor/github.com/golang/protobuf/_conformance/Makefile b/vendor/github.com/golang/protobuf/_conformance/Makefile deleted file mode 100644 index 89800e2d9..000000000 --- a/vendor/github.com/golang/protobuf/_conformance/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2016 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers,Mgoogle/protobuf/field_mask.proto=google.golang.org/genproto/protobuf:. conformance_proto/conformance.proto diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance.go b/vendor/github.com/golang/protobuf/_conformance/conformance.go deleted file mode 100644 index c54212c80..000000000 --- a/vendor/github.com/golang/protobuf/_conformance/conformance.go +++ /dev/null @@ -1,161 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// conformance implements the conformance test subprocess protocol as -// documented in conformance.proto. -package main - -import ( - "encoding/binary" - "fmt" - "io" - "os" - - pb "github.com/golang/protobuf/_conformance/conformance_proto" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" -) - -func main() { - var sizeBuf [4]byte - inbuf := make([]byte, 0, 4096) - outbuf := proto.NewBuffer(nil) - for { - if _, err := io.ReadFull(os.Stdin, sizeBuf[:]); err == io.EOF { - break - } else if err != nil { - fmt.Fprintln(os.Stderr, "go conformance: read request:", err) - os.Exit(1) - } - size := binary.LittleEndian.Uint32(sizeBuf[:]) - if int(size) > cap(inbuf) { - inbuf = make([]byte, size) - } - inbuf = inbuf[:size] - if _, err := io.ReadFull(os.Stdin, inbuf); err != nil { - fmt.Fprintln(os.Stderr, "go conformance: read request:", err) - os.Exit(1) - } - - req := new(pb.ConformanceRequest) - if err := proto.Unmarshal(inbuf, req); err != nil { - fmt.Fprintln(os.Stderr, "go conformance: parse request:", err) - os.Exit(1) - } - res := handle(req) - - if err := outbuf.Marshal(res); err != nil { - fmt.Fprintln(os.Stderr, "go conformance: marshal response:", err) - os.Exit(1) - } - binary.LittleEndian.PutUint32(sizeBuf[:], uint32(len(outbuf.Bytes()))) - if _, err := os.Stdout.Write(sizeBuf[:]); err != nil { - fmt.Fprintln(os.Stderr, "go conformance: write response:", err) - os.Exit(1) - } - if _, err := os.Stdout.Write(outbuf.Bytes()); err != nil { - fmt.Fprintln(os.Stderr, "go conformance: write response:", err) - os.Exit(1) - } - outbuf.Reset() - } -} - -var jsonMarshaler = jsonpb.Marshaler{ - OrigName: true, -} - -func handle(req *pb.ConformanceRequest) *pb.ConformanceResponse { - var err error - var msg pb.TestAllTypes - switch p := req.Payload.(type) { - case *pb.ConformanceRequest_ProtobufPayload: - err = proto.Unmarshal(p.ProtobufPayload, &msg) - case *pb.ConformanceRequest_JsonPayload: - err = jsonpb.UnmarshalString(p.JsonPayload, &msg) - if err != nil && err.Error() == "unmarshaling Any not supported yet" { - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_Skipped{ - Skipped: err.Error(), - }, - } - } - default: - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_RuntimeError{ - RuntimeError: "unknown request payload type", - }, - } - } - if err != nil { - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_ParseError{ - ParseError: err.Error(), - }, - } - } - switch req.RequestedOutputFormat { - case pb.WireFormat_PROTOBUF: - p, err := proto.Marshal(&msg) - if err != nil { - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_SerializeError{ - SerializeError: err.Error(), - }, - } - } - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_ProtobufPayload{ - ProtobufPayload: p, - }, - } - case pb.WireFormat_JSON: - p, err := jsonMarshaler.MarshalToString(&msg) - if err != nil { - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_SerializeError{ - SerializeError: err.Error(), - }, - } - } - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_JsonPayload{ - JsonPayload: p, - }, - } - default: - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_RuntimeError{ - RuntimeError: "unknown output format", - }, - } - } -} diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go deleted file mode 100644 index 2caf992c7..000000000 --- a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go +++ /dev/null @@ -1,1472 +0,0 @@ -// Code generated by protoc-gen-go. -// source: conformance_proto/conformance.proto -// DO NOT EDIT! - -/* -Package conformance is a generated protocol buffer package. - -It is generated from these files: - conformance_proto/conformance.proto - -It has these top-level messages: - ConformanceRequest - ConformanceResponse - TestAllTypes - ForeignMessage -*/ -package conformance - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" -import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" -import google_protobuf2 "google.golang.org/genproto/protobuf" -import google_protobuf3 "github.com/golang/protobuf/ptypes/struct" -import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" -import google_protobuf5 "github.com/golang/protobuf/ptypes/wrappers" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type WireFormat int32 - -const ( - WireFormat_UNSPECIFIED WireFormat = 0 - WireFormat_PROTOBUF WireFormat = 1 - WireFormat_JSON WireFormat = 2 -) - -var WireFormat_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "PROTOBUF", - 2: "JSON", -} -var WireFormat_value = map[string]int32{ - "UNSPECIFIED": 0, - "PROTOBUF": 1, - "JSON": 2, -} - -func (x WireFormat) String() string { - return proto.EnumName(WireFormat_name, int32(x)) -} -func (WireFormat) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type ForeignEnum int32 - -const ( - ForeignEnum_FOREIGN_FOO ForeignEnum = 0 - ForeignEnum_FOREIGN_BAR ForeignEnum = 1 - ForeignEnum_FOREIGN_BAZ ForeignEnum = 2 -) - -var ForeignEnum_name = map[int32]string{ - 0: "FOREIGN_FOO", - 1: "FOREIGN_BAR", - 2: "FOREIGN_BAZ", -} -var ForeignEnum_value = map[string]int32{ - "FOREIGN_FOO": 0, - "FOREIGN_BAR": 1, - "FOREIGN_BAZ": 2, -} - -func (x ForeignEnum) String() string { - return proto.EnumName(ForeignEnum_name, int32(x)) -} -func (ForeignEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -type TestAllTypes_NestedEnum int32 - -const ( - TestAllTypes_FOO TestAllTypes_NestedEnum = 0 - TestAllTypes_BAR TestAllTypes_NestedEnum = 1 - TestAllTypes_BAZ TestAllTypes_NestedEnum = 2 - TestAllTypes_NEG TestAllTypes_NestedEnum = -1 -) - -var TestAllTypes_NestedEnum_name = map[int32]string{ - 0: "FOO", - 1: "BAR", - 2: "BAZ", - -1: "NEG", -} -var TestAllTypes_NestedEnum_value = map[string]int32{ - "FOO": 0, - "BAR": 1, - "BAZ": 2, - "NEG": -1, -} - -func (x TestAllTypes_NestedEnum) String() string { - return proto.EnumName(TestAllTypes_NestedEnum_name, int32(x)) -} -func (TestAllTypes_NestedEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } - -// Represents a single test case's input. The testee should: -// -// 1. parse this proto (which should always succeed) -// 2. parse the protobuf or JSON payload in "payload" (which may fail) -// 3. if the parse succeeded, serialize the message in the requested format. -type ConformanceRequest struct { - // The payload (whether protobuf of JSON) is always for a TestAllTypes proto - // (see below). - // - // Types that are valid to be assigned to Payload: - // *ConformanceRequest_ProtobufPayload - // *ConformanceRequest_JsonPayload - Payload isConformanceRequest_Payload `protobuf_oneof:"payload"` - // Which format should the testee serialize its message to? - RequestedOutputFormat WireFormat `protobuf:"varint,3,opt,name=requested_output_format,json=requestedOutputFormat,enum=conformance.WireFormat" json:"requested_output_format,omitempty"` -} - -func (m *ConformanceRequest) Reset() { *m = ConformanceRequest{} } -func (m *ConformanceRequest) String() string { return proto.CompactTextString(m) } -func (*ConformanceRequest) ProtoMessage() {} -func (*ConformanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type isConformanceRequest_Payload interface { - isConformanceRequest_Payload() -} - -type ConformanceRequest_ProtobufPayload struct { - ProtobufPayload []byte `protobuf:"bytes,1,opt,name=protobuf_payload,json=protobufPayload,proto3,oneof"` -} -type ConformanceRequest_JsonPayload struct { - JsonPayload string `protobuf:"bytes,2,opt,name=json_payload,json=jsonPayload,oneof"` -} - -func (*ConformanceRequest_ProtobufPayload) isConformanceRequest_Payload() {} -func (*ConformanceRequest_JsonPayload) isConformanceRequest_Payload() {} - -func (m *ConformanceRequest) GetPayload() isConformanceRequest_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *ConformanceRequest) GetProtobufPayload() []byte { - if x, ok := m.GetPayload().(*ConformanceRequest_ProtobufPayload); ok { - return x.ProtobufPayload - } - return nil -} - -func (m *ConformanceRequest) GetJsonPayload() string { - if x, ok := m.GetPayload().(*ConformanceRequest_JsonPayload); ok { - return x.JsonPayload - } - return "" -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ConformanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ConformanceRequest_OneofMarshaler, _ConformanceRequest_OneofUnmarshaler, _ConformanceRequest_OneofSizer, []interface{}{ - (*ConformanceRequest_ProtobufPayload)(nil), - (*ConformanceRequest_JsonPayload)(nil), - } -} - -func _ConformanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ConformanceRequest) - // payload - switch x := m.Payload.(type) { - case *ConformanceRequest_ProtobufPayload: - b.EncodeVarint(1<<3 | proto.WireBytes) - b.EncodeRawBytes(x.ProtobufPayload) - case *ConformanceRequest_JsonPayload: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeStringBytes(x.JsonPayload) - case nil: - default: - return fmt.Errorf("ConformanceRequest.Payload has unexpected type %T", x) - } - return nil -} - -func _ConformanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ConformanceRequest) - switch tag { - case 1: // payload.protobuf_payload - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Payload = &ConformanceRequest_ProtobufPayload{x} - return true, err - case 2: // payload.json_payload - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Payload = &ConformanceRequest_JsonPayload{x} - return true, err - default: - return false, nil - } -} - -func _ConformanceRequest_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ConformanceRequest) - // payload - switch x := m.Payload.(type) { - case *ConformanceRequest_ProtobufPayload: - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.ProtobufPayload))) - n += len(x.ProtobufPayload) - case *ConformanceRequest_JsonPayload: - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.JsonPayload))) - n += len(x.JsonPayload) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Represents a single test case's output. -type ConformanceResponse struct { - // Types that are valid to be assigned to Result: - // *ConformanceResponse_ParseError - // *ConformanceResponse_SerializeError - // *ConformanceResponse_RuntimeError - // *ConformanceResponse_ProtobufPayload - // *ConformanceResponse_JsonPayload - // *ConformanceResponse_Skipped - Result isConformanceResponse_Result `protobuf_oneof:"result"` -} - -func (m *ConformanceResponse) Reset() { *m = ConformanceResponse{} } -func (m *ConformanceResponse) String() string { return proto.CompactTextString(m) } -func (*ConformanceResponse) ProtoMessage() {} -func (*ConformanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -type isConformanceResponse_Result interface { - isConformanceResponse_Result() -} - -type ConformanceResponse_ParseError struct { - ParseError string `protobuf:"bytes,1,opt,name=parse_error,json=parseError,oneof"` -} -type ConformanceResponse_SerializeError struct { - SerializeError string `protobuf:"bytes,6,opt,name=serialize_error,json=serializeError,oneof"` -} -type ConformanceResponse_RuntimeError struct { - RuntimeError string `protobuf:"bytes,2,opt,name=runtime_error,json=runtimeError,oneof"` -} -type ConformanceResponse_ProtobufPayload struct { - ProtobufPayload []byte `protobuf:"bytes,3,opt,name=protobuf_payload,json=protobufPayload,proto3,oneof"` -} -type ConformanceResponse_JsonPayload struct { - JsonPayload string `protobuf:"bytes,4,opt,name=json_payload,json=jsonPayload,oneof"` -} -type ConformanceResponse_Skipped struct { - Skipped string `protobuf:"bytes,5,opt,name=skipped,oneof"` -} - -func (*ConformanceResponse_ParseError) isConformanceResponse_Result() {} -func (*ConformanceResponse_SerializeError) isConformanceResponse_Result() {} -func (*ConformanceResponse_RuntimeError) isConformanceResponse_Result() {} -func (*ConformanceResponse_ProtobufPayload) isConformanceResponse_Result() {} -func (*ConformanceResponse_JsonPayload) isConformanceResponse_Result() {} -func (*ConformanceResponse_Skipped) isConformanceResponse_Result() {} - -func (m *ConformanceResponse) GetResult() isConformanceResponse_Result { - if m != nil { - return m.Result - } - return nil -} - -func (m *ConformanceResponse) GetParseError() string { - if x, ok := m.GetResult().(*ConformanceResponse_ParseError); ok { - return x.ParseError - } - return "" -} - -func (m *ConformanceResponse) GetSerializeError() string { - if x, ok := m.GetResult().(*ConformanceResponse_SerializeError); ok { - return x.SerializeError - } - return "" -} - -func (m *ConformanceResponse) GetRuntimeError() string { - if x, ok := m.GetResult().(*ConformanceResponse_RuntimeError); ok { - return x.RuntimeError - } - return "" -} - -func (m *ConformanceResponse) GetProtobufPayload() []byte { - if x, ok := m.GetResult().(*ConformanceResponse_ProtobufPayload); ok { - return x.ProtobufPayload - } - return nil -} - -func (m *ConformanceResponse) GetJsonPayload() string { - if x, ok := m.GetResult().(*ConformanceResponse_JsonPayload); ok { - return x.JsonPayload - } - return "" -} - -func (m *ConformanceResponse) GetSkipped() string { - if x, ok := m.GetResult().(*ConformanceResponse_Skipped); ok { - return x.Skipped - } - return "" -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ConformanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ConformanceResponse_OneofMarshaler, _ConformanceResponse_OneofUnmarshaler, _ConformanceResponse_OneofSizer, []interface{}{ - (*ConformanceResponse_ParseError)(nil), - (*ConformanceResponse_SerializeError)(nil), - (*ConformanceResponse_RuntimeError)(nil), - (*ConformanceResponse_ProtobufPayload)(nil), - (*ConformanceResponse_JsonPayload)(nil), - (*ConformanceResponse_Skipped)(nil), - } -} - -func _ConformanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ConformanceResponse) - // result - switch x := m.Result.(type) { - case *ConformanceResponse_ParseError: - b.EncodeVarint(1<<3 | proto.WireBytes) - b.EncodeStringBytes(x.ParseError) - case *ConformanceResponse_SerializeError: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.SerializeError) - case *ConformanceResponse_RuntimeError: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeStringBytes(x.RuntimeError) - case *ConformanceResponse_ProtobufPayload: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeRawBytes(x.ProtobufPayload) - case *ConformanceResponse_JsonPayload: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeStringBytes(x.JsonPayload) - case *ConformanceResponse_Skipped: - b.EncodeVarint(5<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Skipped) - case nil: - default: - return fmt.Errorf("ConformanceResponse.Result has unexpected type %T", x) - } - return nil -} - -func _ConformanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ConformanceResponse) - switch tag { - case 1: // result.parse_error - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &ConformanceResponse_ParseError{x} - return true, err - case 6: // result.serialize_error - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &ConformanceResponse_SerializeError{x} - return true, err - case 2: // result.runtime_error - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &ConformanceResponse_RuntimeError{x} - return true, err - case 3: // result.protobuf_payload - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Result = &ConformanceResponse_ProtobufPayload{x} - return true, err - case 4: // result.json_payload - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &ConformanceResponse_JsonPayload{x} - return true, err - case 5: // result.skipped - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &ConformanceResponse_Skipped{x} - return true, err - default: - return false, nil - } -} - -func _ConformanceResponse_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ConformanceResponse) - // result - switch x := m.Result.(type) { - case *ConformanceResponse_ParseError: - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.ParseError))) - n += len(x.ParseError) - case *ConformanceResponse_SerializeError: - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.SerializeError))) - n += len(x.SerializeError) - case *ConformanceResponse_RuntimeError: - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.RuntimeError))) - n += len(x.RuntimeError) - case *ConformanceResponse_ProtobufPayload: - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.ProtobufPayload))) - n += len(x.ProtobufPayload) - case *ConformanceResponse_JsonPayload: - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.JsonPayload))) - n += len(x.JsonPayload) - case *ConformanceResponse_Skipped: - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Skipped))) - n += len(x.Skipped) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// This proto includes every type of field in both singular and repeated -// forms. -type TestAllTypes struct { - // Singular - OptionalInt32 int32 `protobuf:"varint,1,opt,name=optional_int32,json=optionalInt32" json:"optional_int32,omitempty"` - OptionalInt64 int64 `protobuf:"varint,2,opt,name=optional_int64,json=optionalInt64" json:"optional_int64,omitempty"` - OptionalUint32 uint32 `protobuf:"varint,3,opt,name=optional_uint32,json=optionalUint32" json:"optional_uint32,omitempty"` - OptionalUint64 uint64 `protobuf:"varint,4,opt,name=optional_uint64,json=optionalUint64" json:"optional_uint64,omitempty"` - OptionalSint32 int32 `protobuf:"zigzag32,5,opt,name=optional_sint32,json=optionalSint32" json:"optional_sint32,omitempty"` - OptionalSint64 int64 `protobuf:"zigzag64,6,opt,name=optional_sint64,json=optionalSint64" json:"optional_sint64,omitempty"` - OptionalFixed32 uint32 `protobuf:"fixed32,7,opt,name=optional_fixed32,json=optionalFixed32" json:"optional_fixed32,omitempty"` - OptionalFixed64 uint64 `protobuf:"fixed64,8,opt,name=optional_fixed64,json=optionalFixed64" json:"optional_fixed64,omitempty"` - OptionalSfixed32 int32 `protobuf:"fixed32,9,opt,name=optional_sfixed32,json=optionalSfixed32" json:"optional_sfixed32,omitempty"` - OptionalSfixed64 int64 `protobuf:"fixed64,10,opt,name=optional_sfixed64,json=optionalSfixed64" json:"optional_sfixed64,omitempty"` - OptionalFloat float32 `protobuf:"fixed32,11,opt,name=optional_float,json=optionalFloat" json:"optional_float,omitempty"` - OptionalDouble float64 `protobuf:"fixed64,12,opt,name=optional_double,json=optionalDouble" json:"optional_double,omitempty"` - OptionalBool bool `protobuf:"varint,13,opt,name=optional_bool,json=optionalBool" json:"optional_bool,omitempty"` - OptionalString string `protobuf:"bytes,14,opt,name=optional_string,json=optionalString" json:"optional_string,omitempty"` - OptionalBytes []byte `protobuf:"bytes,15,opt,name=optional_bytes,json=optionalBytes,proto3" json:"optional_bytes,omitempty"` - OptionalNestedMessage *TestAllTypes_NestedMessage `protobuf:"bytes,18,opt,name=optional_nested_message,json=optionalNestedMessage" json:"optional_nested_message,omitempty"` - OptionalForeignMessage *ForeignMessage `protobuf:"bytes,19,opt,name=optional_foreign_message,json=optionalForeignMessage" json:"optional_foreign_message,omitempty"` - OptionalNestedEnum TestAllTypes_NestedEnum `protobuf:"varint,21,opt,name=optional_nested_enum,json=optionalNestedEnum,enum=conformance.TestAllTypes_NestedEnum" json:"optional_nested_enum,omitempty"` - OptionalForeignEnum ForeignEnum `protobuf:"varint,22,opt,name=optional_foreign_enum,json=optionalForeignEnum,enum=conformance.ForeignEnum" json:"optional_foreign_enum,omitempty"` - OptionalStringPiece string `protobuf:"bytes,24,opt,name=optional_string_piece,json=optionalStringPiece" json:"optional_string_piece,omitempty"` - OptionalCord string `protobuf:"bytes,25,opt,name=optional_cord,json=optionalCord" json:"optional_cord,omitempty"` - RecursiveMessage *TestAllTypes `protobuf:"bytes,27,opt,name=recursive_message,json=recursiveMessage" json:"recursive_message,omitempty"` - // Repeated - RepeatedInt32 []int32 `protobuf:"varint,31,rep,packed,name=repeated_int32,json=repeatedInt32" json:"repeated_int32,omitempty"` - RepeatedInt64 []int64 `protobuf:"varint,32,rep,packed,name=repeated_int64,json=repeatedInt64" json:"repeated_int64,omitempty"` - RepeatedUint32 []uint32 `protobuf:"varint,33,rep,packed,name=repeated_uint32,json=repeatedUint32" json:"repeated_uint32,omitempty"` - RepeatedUint64 []uint64 `protobuf:"varint,34,rep,packed,name=repeated_uint64,json=repeatedUint64" json:"repeated_uint64,omitempty"` - RepeatedSint32 []int32 `protobuf:"zigzag32,35,rep,packed,name=repeated_sint32,json=repeatedSint32" json:"repeated_sint32,omitempty"` - RepeatedSint64 []int64 `protobuf:"zigzag64,36,rep,packed,name=repeated_sint64,json=repeatedSint64" json:"repeated_sint64,omitempty"` - RepeatedFixed32 []uint32 `protobuf:"fixed32,37,rep,packed,name=repeated_fixed32,json=repeatedFixed32" json:"repeated_fixed32,omitempty"` - RepeatedFixed64 []uint64 `protobuf:"fixed64,38,rep,packed,name=repeated_fixed64,json=repeatedFixed64" json:"repeated_fixed64,omitempty"` - RepeatedSfixed32 []int32 `protobuf:"fixed32,39,rep,packed,name=repeated_sfixed32,json=repeatedSfixed32" json:"repeated_sfixed32,omitempty"` - RepeatedSfixed64 []int64 `protobuf:"fixed64,40,rep,packed,name=repeated_sfixed64,json=repeatedSfixed64" json:"repeated_sfixed64,omitempty"` - RepeatedFloat []float32 `protobuf:"fixed32,41,rep,packed,name=repeated_float,json=repeatedFloat" json:"repeated_float,omitempty"` - RepeatedDouble []float64 `protobuf:"fixed64,42,rep,packed,name=repeated_double,json=repeatedDouble" json:"repeated_double,omitempty"` - RepeatedBool []bool `protobuf:"varint,43,rep,packed,name=repeated_bool,json=repeatedBool" json:"repeated_bool,omitempty"` - RepeatedString []string `protobuf:"bytes,44,rep,name=repeated_string,json=repeatedString" json:"repeated_string,omitempty"` - RepeatedBytes [][]byte `protobuf:"bytes,45,rep,name=repeated_bytes,json=repeatedBytes,proto3" json:"repeated_bytes,omitempty"` - RepeatedNestedMessage []*TestAllTypes_NestedMessage `protobuf:"bytes,48,rep,name=repeated_nested_message,json=repeatedNestedMessage" json:"repeated_nested_message,omitempty"` - RepeatedForeignMessage []*ForeignMessage `protobuf:"bytes,49,rep,name=repeated_foreign_message,json=repeatedForeignMessage" json:"repeated_foreign_message,omitempty"` - RepeatedNestedEnum []TestAllTypes_NestedEnum `protobuf:"varint,51,rep,packed,name=repeated_nested_enum,json=repeatedNestedEnum,enum=conformance.TestAllTypes_NestedEnum" json:"repeated_nested_enum,omitempty"` - RepeatedForeignEnum []ForeignEnum `protobuf:"varint,52,rep,packed,name=repeated_foreign_enum,json=repeatedForeignEnum,enum=conformance.ForeignEnum" json:"repeated_foreign_enum,omitempty"` - RepeatedStringPiece []string `protobuf:"bytes,54,rep,name=repeated_string_piece,json=repeatedStringPiece" json:"repeated_string_piece,omitempty"` - RepeatedCord []string `protobuf:"bytes,55,rep,name=repeated_cord,json=repeatedCord" json:"repeated_cord,omitempty"` - // Map - MapInt32Int32 map[int32]int32 `protobuf:"bytes,56,rep,name=map_int32_int32,json=mapInt32Int32" json:"map_int32_int32,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - MapInt64Int64 map[int64]int64 `protobuf:"bytes,57,rep,name=map_int64_int64,json=mapInt64Int64" json:"map_int64_int64,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - MapUint32Uint32 map[uint32]uint32 `protobuf:"bytes,58,rep,name=map_uint32_uint32,json=mapUint32Uint32" json:"map_uint32_uint32,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - MapUint64Uint64 map[uint64]uint64 `protobuf:"bytes,59,rep,name=map_uint64_uint64,json=mapUint64Uint64" json:"map_uint64_uint64,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - MapSint32Sint32 map[int32]int32 `protobuf:"bytes,60,rep,name=map_sint32_sint32,json=mapSint32Sint32" json:"map_sint32_sint32,omitempty" protobuf_key:"zigzag32,1,opt,name=key" protobuf_val:"zigzag32,2,opt,name=value"` - MapSint64Sint64 map[int64]int64 `protobuf:"bytes,61,rep,name=map_sint64_sint64,json=mapSint64Sint64" json:"map_sint64_sint64,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"zigzag64,2,opt,name=value"` - MapFixed32Fixed32 map[uint32]uint32 `protobuf:"bytes,62,rep,name=map_fixed32_fixed32,json=mapFixed32Fixed32" json:"map_fixed32_fixed32,omitempty" protobuf_key:"fixed32,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"` - MapFixed64Fixed64 map[uint64]uint64 `protobuf:"bytes,63,rep,name=map_fixed64_fixed64,json=mapFixed64Fixed64" json:"map_fixed64_fixed64,omitempty" protobuf_key:"fixed64,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` - MapSfixed32Sfixed32 map[int32]int32 `protobuf:"bytes,64,rep,name=map_sfixed32_sfixed32,json=mapSfixed32Sfixed32" json:"map_sfixed32_sfixed32,omitempty" protobuf_key:"fixed32,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"` - MapSfixed64Sfixed64 map[int64]int64 `protobuf:"bytes,65,rep,name=map_sfixed64_sfixed64,json=mapSfixed64Sfixed64" json:"map_sfixed64_sfixed64,omitempty" protobuf_key:"fixed64,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` - MapInt32Float map[int32]float32 `protobuf:"bytes,66,rep,name=map_int32_float,json=mapInt32Float" json:"map_int32_float,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"` - MapInt32Double map[int32]float64 `protobuf:"bytes,67,rep,name=map_int32_double,json=mapInt32Double" json:"map_int32_double,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` - MapBoolBool map[bool]bool `protobuf:"bytes,68,rep,name=map_bool_bool,json=mapBoolBool" json:"map_bool_bool,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - MapStringString map[string]string `protobuf:"bytes,69,rep,name=map_string_string,json=mapStringString" json:"map_string_string,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MapStringBytes map[string][]byte `protobuf:"bytes,70,rep,name=map_string_bytes,json=mapStringBytes" json:"map_string_bytes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` - MapStringNestedMessage map[string]*TestAllTypes_NestedMessage `protobuf:"bytes,71,rep,name=map_string_nested_message,json=mapStringNestedMessage" json:"map_string_nested_message,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MapStringForeignMessage map[string]*ForeignMessage `protobuf:"bytes,72,rep,name=map_string_foreign_message,json=mapStringForeignMessage" json:"map_string_foreign_message,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MapStringNestedEnum map[string]TestAllTypes_NestedEnum `protobuf:"bytes,73,rep,name=map_string_nested_enum,json=mapStringNestedEnum" json:"map_string_nested_enum,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=conformance.TestAllTypes_NestedEnum"` - MapStringForeignEnum map[string]ForeignEnum `protobuf:"bytes,74,rep,name=map_string_foreign_enum,json=mapStringForeignEnum" json:"map_string_foreign_enum,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=conformance.ForeignEnum"` - // Types that are valid to be assigned to OneofField: - // *TestAllTypes_OneofUint32 - // *TestAllTypes_OneofNestedMessage - // *TestAllTypes_OneofString - // *TestAllTypes_OneofBytes - // *TestAllTypes_OneofBool - // *TestAllTypes_OneofUint64 - // *TestAllTypes_OneofFloat - // *TestAllTypes_OneofDouble - // *TestAllTypes_OneofEnum - OneofField isTestAllTypes_OneofField `protobuf_oneof:"oneof_field"` - // Well-known types - OptionalBoolWrapper *google_protobuf5.BoolValue `protobuf:"bytes,201,opt,name=optional_bool_wrapper,json=optionalBoolWrapper" json:"optional_bool_wrapper,omitempty"` - OptionalInt32Wrapper *google_protobuf5.Int32Value `protobuf:"bytes,202,opt,name=optional_int32_wrapper,json=optionalInt32Wrapper" json:"optional_int32_wrapper,omitempty"` - OptionalInt64Wrapper *google_protobuf5.Int64Value `protobuf:"bytes,203,opt,name=optional_int64_wrapper,json=optionalInt64Wrapper" json:"optional_int64_wrapper,omitempty"` - OptionalUint32Wrapper *google_protobuf5.UInt32Value `protobuf:"bytes,204,opt,name=optional_uint32_wrapper,json=optionalUint32Wrapper" json:"optional_uint32_wrapper,omitempty"` - OptionalUint64Wrapper *google_protobuf5.UInt64Value `protobuf:"bytes,205,opt,name=optional_uint64_wrapper,json=optionalUint64Wrapper" json:"optional_uint64_wrapper,omitempty"` - OptionalFloatWrapper *google_protobuf5.FloatValue `protobuf:"bytes,206,opt,name=optional_float_wrapper,json=optionalFloatWrapper" json:"optional_float_wrapper,omitempty"` - OptionalDoubleWrapper *google_protobuf5.DoubleValue `protobuf:"bytes,207,opt,name=optional_double_wrapper,json=optionalDoubleWrapper" json:"optional_double_wrapper,omitempty"` - OptionalStringWrapper *google_protobuf5.StringValue `protobuf:"bytes,208,opt,name=optional_string_wrapper,json=optionalStringWrapper" json:"optional_string_wrapper,omitempty"` - OptionalBytesWrapper *google_protobuf5.BytesValue `protobuf:"bytes,209,opt,name=optional_bytes_wrapper,json=optionalBytesWrapper" json:"optional_bytes_wrapper,omitempty"` - RepeatedBoolWrapper []*google_protobuf5.BoolValue `protobuf:"bytes,211,rep,name=repeated_bool_wrapper,json=repeatedBoolWrapper" json:"repeated_bool_wrapper,omitempty"` - RepeatedInt32Wrapper []*google_protobuf5.Int32Value `protobuf:"bytes,212,rep,name=repeated_int32_wrapper,json=repeatedInt32Wrapper" json:"repeated_int32_wrapper,omitempty"` - RepeatedInt64Wrapper []*google_protobuf5.Int64Value `protobuf:"bytes,213,rep,name=repeated_int64_wrapper,json=repeatedInt64Wrapper" json:"repeated_int64_wrapper,omitempty"` - RepeatedUint32Wrapper []*google_protobuf5.UInt32Value `protobuf:"bytes,214,rep,name=repeated_uint32_wrapper,json=repeatedUint32Wrapper" json:"repeated_uint32_wrapper,omitempty"` - RepeatedUint64Wrapper []*google_protobuf5.UInt64Value `protobuf:"bytes,215,rep,name=repeated_uint64_wrapper,json=repeatedUint64Wrapper" json:"repeated_uint64_wrapper,omitempty"` - RepeatedFloatWrapper []*google_protobuf5.FloatValue `protobuf:"bytes,216,rep,name=repeated_float_wrapper,json=repeatedFloatWrapper" json:"repeated_float_wrapper,omitempty"` - RepeatedDoubleWrapper []*google_protobuf5.DoubleValue `protobuf:"bytes,217,rep,name=repeated_double_wrapper,json=repeatedDoubleWrapper" json:"repeated_double_wrapper,omitempty"` - RepeatedStringWrapper []*google_protobuf5.StringValue `protobuf:"bytes,218,rep,name=repeated_string_wrapper,json=repeatedStringWrapper" json:"repeated_string_wrapper,omitempty"` - RepeatedBytesWrapper []*google_protobuf5.BytesValue `protobuf:"bytes,219,rep,name=repeated_bytes_wrapper,json=repeatedBytesWrapper" json:"repeated_bytes_wrapper,omitempty"` - OptionalDuration *google_protobuf1.Duration `protobuf:"bytes,301,opt,name=optional_duration,json=optionalDuration" json:"optional_duration,omitempty"` - OptionalTimestamp *google_protobuf4.Timestamp `protobuf:"bytes,302,opt,name=optional_timestamp,json=optionalTimestamp" json:"optional_timestamp,omitempty"` - OptionalFieldMask *google_protobuf2.FieldMask `protobuf:"bytes,303,opt,name=optional_field_mask,json=optionalFieldMask" json:"optional_field_mask,omitempty"` - OptionalStruct *google_protobuf3.Struct `protobuf:"bytes,304,opt,name=optional_struct,json=optionalStruct" json:"optional_struct,omitempty"` - OptionalAny *google_protobuf.Any `protobuf:"bytes,305,opt,name=optional_any,json=optionalAny" json:"optional_any,omitempty"` - OptionalValue *google_protobuf3.Value `protobuf:"bytes,306,opt,name=optional_value,json=optionalValue" json:"optional_value,omitempty"` - RepeatedDuration []*google_protobuf1.Duration `protobuf:"bytes,311,rep,name=repeated_duration,json=repeatedDuration" json:"repeated_duration,omitempty"` - RepeatedTimestamp []*google_protobuf4.Timestamp `protobuf:"bytes,312,rep,name=repeated_timestamp,json=repeatedTimestamp" json:"repeated_timestamp,omitempty"` - RepeatedFieldmask []*google_protobuf2.FieldMask `protobuf:"bytes,313,rep,name=repeated_fieldmask,json=repeatedFieldmask" json:"repeated_fieldmask,omitempty"` - RepeatedStruct []*google_protobuf3.Struct `protobuf:"bytes,324,rep,name=repeated_struct,json=repeatedStruct" json:"repeated_struct,omitempty"` - RepeatedAny []*google_protobuf.Any `protobuf:"bytes,315,rep,name=repeated_any,json=repeatedAny" json:"repeated_any,omitempty"` - RepeatedValue []*google_protobuf3.Value `protobuf:"bytes,316,rep,name=repeated_value,json=repeatedValue" json:"repeated_value,omitempty"` - // Test field-name-to-JSON-name convention. - // (protobuf says names can be any valid C/C++ identifier.) - Fieldname1 int32 `protobuf:"varint,401,opt,name=fieldname1" json:"fieldname1,omitempty"` - FieldName2 int32 `protobuf:"varint,402,opt,name=field_name2,json=fieldName2" json:"field_name2,omitempty"` - XFieldName3 int32 `protobuf:"varint,403,opt,name=_field_name3,json=fieldName3" json:"_field_name3,omitempty"` - Field_Name4_ int32 `protobuf:"varint,404,opt,name=field__name4_,json=fieldName4" json:"field__name4_,omitempty"` - Field0Name5 int32 `protobuf:"varint,405,opt,name=field0name5" json:"field0name5,omitempty"` - Field_0Name6 int32 `protobuf:"varint,406,opt,name=field_0_name6,json=field0Name6" json:"field_0_name6,omitempty"` - FieldName7 int32 `protobuf:"varint,407,opt,name=fieldName7" json:"fieldName7,omitempty"` - FieldName8 int32 `protobuf:"varint,408,opt,name=FieldName8,json=fieldName8" json:"FieldName8,omitempty"` - Field_Name9 int32 `protobuf:"varint,409,opt,name=field_Name9,json=fieldName9" json:"field_Name9,omitempty"` - Field_Name10 int32 `protobuf:"varint,410,opt,name=Field_Name10,json=fieldName10" json:"Field_Name10,omitempty"` - FIELD_NAME11 int32 `protobuf:"varint,411,opt,name=FIELD_NAME11,json=fIELDNAME11" json:"FIELD_NAME11,omitempty"` - FIELDName12 int32 `protobuf:"varint,412,opt,name=FIELD_name12,json=fIELDName12" json:"FIELD_name12,omitempty"` - XFieldName13 int32 `protobuf:"varint,413,opt,name=__field_name13,json=fieldName13" json:"__field_name13,omitempty"` - X_FieldName14 int32 `protobuf:"varint,414,opt,name=__Field_name14,json=fieldName14" json:"__Field_name14,omitempty"` - Field_Name15 int32 `protobuf:"varint,415,opt,name=field__name15,json=fieldName15" json:"field__name15,omitempty"` - Field__Name16 int32 `protobuf:"varint,416,opt,name=field__Name16,json=fieldName16" json:"field__Name16,omitempty"` - FieldName17__ int32 `protobuf:"varint,417,opt,name=field_name17__,json=fieldName17" json:"field_name17__,omitempty"` - FieldName18__ int32 `protobuf:"varint,418,opt,name=Field_name18__,json=fieldName18" json:"Field_name18__,omitempty"` -} - -func (m *TestAllTypes) Reset() { *m = TestAllTypes{} } -func (m *TestAllTypes) String() string { return proto.CompactTextString(m) } -func (*TestAllTypes) ProtoMessage() {} -func (*TestAllTypes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -type isTestAllTypes_OneofField interface { - isTestAllTypes_OneofField() -} - -type TestAllTypes_OneofUint32 struct { - OneofUint32 uint32 `protobuf:"varint,111,opt,name=oneof_uint32,json=oneofUint32,oneof"` -} -type TestAllTypes_OneofNestedMessage struct { - OneofNestedMessage *TestAllTypes_NestedMessage `protobuf:"bytes,112,opt,name=oneof_nested_message,json=oneofNestedMessage,oneof"` -} -type TestAllTypes_OneofString struct { - OneofString string `protobuf:"bytes,113,opt,name=oneof_string,json=oneofString,oneof"` -} -type TestAllTypes_OneofBytes struct { - OneofBytes []byte `protobuf:"bytes,114,opt,name=oneof_bytes,json=oneofBytes,proto3,oneof"` -} -type TestAllTypes_OneofBool struct { - OneofBool bool `protobuf:"varint,115,opt,name=oneof_bool,json=oneofBool,oneof"` -} -type TestAllTypes_OneofUint64 struct { - OneofUint64 uint64 `protobuf:"varint,116,opt,name=oneof_uint64,json=oneofUint64,oneof"` -} -type TestAllTypes_OneofFloat struct { - OneofFloat float32 `protobuf:"fixed32,117,opt,name=oneof_float,json=oneofFloat,oneof"` -} -type TestAllTypes_OneofDouble struct { - OneofDouble float64 `protobuf:"fixed64,118,opt,name=oneof_double,json=oneofDouble,oneof"` -} -type TestAllTypes_OneofEnum struct { - OneofEnum TestAllTypes_NestedEnum `protobuf:"varint,119,opt,name=oneof_enum,json=oneofEnum,enum=conformance.TestAllTypes_NestedEnum,oneof"` -} - -func (*TestAllTypes_OneofUint32) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofNestedMessage) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofString) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofBytes) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofBool) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofUint64) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofFloat) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofDouble) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofEnum) isTestAllTypes_OneofField() {} - -func (m *TestAllTypes) GetOneofField() isTestAllTypes_OneofField { - if m != nil { - return m.OneofField - } - return nil -} - -func (m *TestAllTypes) GetOptionalNestedMessage() *TestAllTypes_NestedMessage { - if m != nil { - return m.OptionalNestedMessage - } - return nil -} - -func (m *TestAllTypes) GetOptionalForeignMessage() *ForeignMessage { - if m != nil { - return m.OptionalForeignMessage - } - return nil -} - -func (m *TestAllTypes) GetRecursiveMessage() *TestAllTypes { - if m != nil { - return m.RecursiveMessage - } - return nil -} - -func (m *TestAllTypes) GetRepeatedNestedMessage() []*TestAllTypes_NestedMessage { - if m != nil { - return m.RepeatedNestedMessage - } - return nil -} - -func (m *TestAllTypes) GetRepeatedForeignMessage() []*ForeignMessage { - if m != nil { - return m.RepeatedForeignMessage - } - return nil -} - -func (m *TestAllTypes) GetMapInt32Int32() map[int32]int32 { - if m != nil { - return m.MapInt32Int32 - } - return nil -} - -func (m *TestAllTypes) GetMapInt64Int64() map[int64]int64 { - if m != nil { - return m.MapInt64Int64 - } - return nil -} - -func (m *TestAllTypes) GetMapUint32Uint32() map[uint32]uint32 { - if m != nil { - return m.MapUint32Uint32 - } - return nil -} - -func (m *TestAllTypes) GetMapUint64Uint64() map[uint64]uint64 { - if m != nil { - return m.MapUint64Uint64 - } - return nil -} - -func (m *TestAllTypes) GetMapSint32Sint32() map[int32]int32 { - if m != nil { - return m.MapSint32Sint32 - } - return nil -} - -func (m *TestAllTypes) GetMapSint64Sint64() map[int64]int64 { - if m != nil { - return m.MapSint64Sint64 - } - return nil -} - -func (m *TestAllTypes) GetMapFixed32Fixed32() map[uint32]uint32 { - if m != nil { - return m.MapFixed32Fixed32 - } - return nil -} - -func (m *TestAllTypes) GetMapFixed64Fixed64() map[uint64]uint64 { - if m != nil { - return m.MapFixed64Fixed64 - } - return nil -} - -func (m *TestAllTypes) GetMapSfixed32Sfixed32() map[int32]int32 { - if m != nil { - return m.MapSfixed32Sfixed32 - } - return nil -} - -func (m *TestAllTypes) GetMapSfixed64Sfixed64() map[int64]int64 { - if m != nil { - return m.MapSfixed64Sfixed64 - } - return nil -} - -func (m *TestAllTypes) GetMapInt32Float() map[int32]float32 { - if m != nil { - return m.MapInt32Float - } - return nil -} - -func (m *TestAllTypes) GetMapInt32Double() map[int32]float64 { - if m != nil { - return m.MapInt32Double - } - return nil -} - -func (m *TestAllTypes) GetMapBoolBool() map[bool]bool { - if m != nil { - return m.MapBoolBool - } - return nil -} - -func (m *TestAllTypes) GetMapStringString() map[string]string { - if m != nil { - return m.MapStringString - } - return nil -} - -func (m *TestAllTypes) GetMapStringBytes() map[string][]byte { - if m != nil { - return m.MapStringBytes - } - return nil -} - -func (m *TestAllTypes) GetMapStringNestedMessage() map[string]*TestAllTypes_NestedMessage { - if m != nil { - return m.MapStringNestedMessage - } - return nil -} - -func (m *TestAllTypes) GetMapStringForeignMessage() map[string]*ForeignMessage { - if m != nil { - return m.MapStringForeignMessage - } - return nil -} - -func (m *TestAllTypes) GetMapStringNestedEnum() map[string]TestAllTypes_NestedEnum { - if m != nil { - return m.MapStringNestedEnum - } - return nil -} - -func (m *TestAllTypes) GetMapStringForeignEnum() map[string]ForeignEnum { - if m != nil { - return m.MapStringForeignEnum - } - return nil -} - -func (m *TestAllTypes) GetOneofUint32() uint32 { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofUint32); ok { - return x.OneofUint32 - } - return 0 -} - -func (m *TestAllTypes) GetOneofNestedMessage() *TestAllTypes_NestedMessage { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofNestedMessage); ok { - return x.OneofNestedMessage - } - return nil -} - -func (m *TestAllTypes) GetOneofString() string { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofString); ok { - return x.OneofString - } - return "" -} - -func (m *TestAllTypes) GetOneofBytes() []byte { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofBytes); ok { - return x.OneofBytes - } - return nil -} - -func (m *TestAllTypes) GetOneofBool() bool { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofBool); ok { - return x.OneofBool - } - return false -} - -func (m *TestAllTypes) GetOneofUint64() uint64 { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofUint64); ok { - return x.OneofUint64 - } - return 0 -} - -func (m *TestAllTypes) GetOneofFloat() float32 { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofFloat); ok { - return x.OneofFloat - } - return 0 -} - -func (m *TestAllTypes) GetOneofDouble() float64 { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofDouble); ok { - return x.OneofDouble - } - return 0 -} - -func (m *TestAllTypes) GetOneofEnum() TestAllTypes_NestedEnum { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofEnum); ok { - return x.OneofEnum - } - return TestAllTypes_FOO -} - -func (m *TestAllTypes) GetOptionalBoolWrapper() *google_protobuf5.BoolValue { - if m != nil { - return m.OptionalBoolWrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalInt32Wrapper() *google_protobuf5.Int32Value { - if m != nil { - return m.OptionalInt32Wrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalInt64Wrapper() *google_protobuf5.Int64Value { - if m != nil { - return m.OptionalInt64Wrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalUint32Wrapper() *google_protobuf5.UInt32Value { - if m != nil { - return m.OptionalUint32Wrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalUint64Wrapper() *google_protobuf5.UInt64Value { - if m != nil { - return m.OptionalUint64Wrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalFloatWrapper() *google_protobuf5.FloatValue { - if m != nil { - return m.OptionalFloatWrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalDoubleWrapper() *google_protobuf5.DoubleValue { - if m != nil { - return m.OptionalDoubleWrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalStringWrapper() *google_protobuf5.StringValue { - if m != nil { - return m.OptionalStringWrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalBytesWrapper() *google_protobuf5.BytesValue { - if m != nil { - return m.OptionalBytesWrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedBoolWrapper() []*google_protobuf5.BoolValue { - if m != nil { - return m.RepeatedBoolWrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedInt32Wrapper() []*google_protobuf5.Int32Value { - if m != nil { - return m.RepeatedInt32Wrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedInt64Wrapper() []*google_protobuf5.Int64Value { - if m != nil { - return m.RepeatedInt64Wrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedUint32Wrapper() []*google_protobuf5.UInt32Value { - if m != nil { - return m.RepeatedUint32Wrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedUint64Wrapper() []*google_protobuf5.UInt64Value { - if m != nil { - return m.RepeatedUint64Wrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedFloatWrapper() []*google_protobuf5.FloatValue { - if m != nil { - return m.RepeatedFloatWrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedDoubleWrapper() []*google_protobuf5.DoubleValue { - if m != nil { - return m.RepeatedDoubleWrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedStringWrapper() []*google_protobuf5.StringValue { - if m != nil { - return m.RepeatedStringWrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedBytesWrapper() []*google_protobuf5.BytesValue { - if m != nil { - return m.RepeatedBytesWrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalDuration() *google_protobuf1.Duration { - if m != nil { - return m.OptionalDuration - } - return nil -} - -func (m *TestAllTypes) GetOptionalTimestamp() *google_protobuf4.Timestamp { - if m != nil { - return m.OptionalTimestamp - } - return nil -} - -func (m *TestAllTypes) GetOptionalFieldMask() *google_protobuf2.FieldMask { - if m != nil { - return m.OptionalFieldMask - } - return nil -} - -func (m *TestAllTypes) GetOptionalStruct() *google_protobuf3.Struct { - if m != nil { - return m.OptionalStruct - } - return nil -} - -func (m *TestAllTypes) GetOptionalAny() *google_protobuf.Any { - if m != nil { - return m.OptionalAny - } - return nil -} - -func (m *TestAllTypes) GetOptionalValue() *google_protobuf3.Value { - if m != nil { - return m.OptionalValue - } - return nil -} - -func (m *TestAllTypes) GetRepeatedDuration() []*google_protobuf1.Duration { - if m != nil { - return m.RepeatedDuration - } - return nil -} - -func (m *TestAllTypes) GetRepeatedTimestamp() []*google_protobuf4.Timestamp { - if m != nil { - return m.RepeatedTimestamp - } - return nil -} - -func (m *TestAllTypes) GetRepeatedFieldmask() []*google_protobuf2.FieldMask { - if m != nil { - return m.RepeatedFieldmask - } - return nil -} - -func (m *TestAllTypes) GetRepeatedStruct() []*google_protobuf3.Struct { - if m != nil { - return m.RepeatedStruct - } - return nil -} - -func (m *TestAllTypes) GetRepeatedAny() []*google_protobuf.Any { - if m != nil { - return m.RepeatedAny - } - return nil -} - -func (m *TestAllTypes) GetRepeatedValue() []*google_protobuf3.Value { - if m != nil { - return m.RepeatedValue - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*TestAllTypes) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _TestAllTypes_OneofMarshaler, _TestAllTypes_OneofUnmarshaler, _TestAllTypes_OneofSizer, []interface{}{ - (*TestAllTypes_OneofUint32)(nil), - (*TestAllTypes_OneofNestedMessage)(nil), - (*TestAllTypes_OneofString)(nil), - (*TestAllTypes_OneofBytes)(nil), - (*TestAllTypes_OneofBool)(nil), - (*TestAllTypes_OneofUint64)(nil), - (*TestAllTypes_OneofFloat)(nil), - (*TestAllTypes_OneofDouble)(nil), - (*TestAllTypes_OneofEnum)(nil), - } -} - -func _TestAllTypes_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*TestAllTypes) - // oneof_field - switch x := m.OneofField.(type) { - case *TestAllTypes_OneofUint32: - b.EncodeVarint(111<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.OneofUint32)) - case *TestAllTypes_OneofNestedMessage: - b.EncodeVarint(112<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.OneofNestedMessage); err != nil { - return err - } - case *TestAllTypes_OneofString: - b.EncodeVarint(113<<3 | proto.WireBytes) - b.EncodeStringBytes(x.OneofString) - case *TestAllTypes_OneofBytes: - b.EncodeVarint(114<<3 | proto.WireBytes) - b.EncodeRawBytes(x.OneofBytes) - case *TestAllTypes_OneofBool: - t := uint64(0) - if x.OneofBool { - t = 1 - } - b.EncodeVarint(115<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *TestAllTypes_OneofUint64: - b.EncodeVarint(116<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.OneofUint64)) - case *TestAllTypes_OneofFloat: - b.EncodeVarint(117<<3 | proto.WireFixed32) - b.EncodeFixed32(uint64(math.Float32bits(x.OneofFloat))) - case *TestAllTypes_OneofDouble: - b.EncodeVarint(118<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.OneofDouble)) - case *TestAllTypes_OneofEnum: - b.EncodeVarint(119<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.OneofEnum)) - case nil: - default: - return fmt.Errorf("TestAllTypes.OneofField has unexpected type %T", x) - } - return nil -} - -func _TestAllTypes_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*TestAllTypes) - switch tag { - case 111: // oneof_field.oneof_uint32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.OneofField = &TestAllTypes_OneofUint32{uint32(x)} - return true, err - case 112: // oneof_field.oneof_nested_message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(TestAllTypes_NestedMessage) - err := b.DecodeMessage(msg) - m.OneofField = &TestAllTypes_OneofNestedMessage{msg} - return true, err - case 113: // oneof_field.oneof_string - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.OneofField = &TestAllTypes_OneofString{x} - return true, err - case 114: // oneof_field.oneof_bytes - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.OneofField = &TestAllTypes_OneofBytes{x} - return true, err - case 115: // oneof_field.oneof_bool - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.OneofField = &TestAllTypes_OneofBool{x != 0} - return true, err - case 116: // oneof_field.oneof_uint64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.OneofField = &TestAllTypes_OneofUint64{x} - return true, err - case 117: // oneof_field.oneof_float - if wire != proto.WireFixed32 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed32() - m.OneofField = &TestAllTypes_OneofFloat{math.Float32frombits(uint32(x))} - return true, err - case 118: // oneof_field.oneof_double - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.OneofField = &TestAllTypes_OneofDouble{math.Float64frombits(x)} - return true, err - case 119: // oneof_field.oneof_enum - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.OneofField = &TestAllTypes_OneofEnum{TestAllTypes_NestedEnum(x)} - return true, err - default: - return false, nil - } -} - -func _TestAllTypes_OneofSizer(msg proto.Message) (n int) { - m := msg.(*TestAllTypes) - // oneof_field - switch x := m.OneofField.(type) { - case *TestAllTypes_OneofUint32: - n += proto.SizeVarint(111<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.OneofUint32)) - case *TestAllTypes_OneofNestedMessage: - s := proto.Size(x.OneofNestedMessage) - n += proto.SizeVarint(112<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *TestAllTypes_OneofString: - n += proto.SizeVarint(113<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.OneofString))) - n += len(x.OneofString) - case *TestAllTypes_OneofBytes: - n += proto.SizeVarint(114<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.OneofBytes))) - n += len(x.OneofBytes) - case *TestAllTypes_OneofBool: - n += proto.SizeVarint(115<<3 | proto.WireVarint) - n += 1 - case *TestAllTypes_OneofUint64: - n += proto.SizeVarint(116<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.OneofUint64)) - case *TestAllTypes_OneofFloat: - n += proto.SizeVarint(117<<3 | proto.WireFixed32) - n += 4 - case *TestAllTypes_OneofDouble: - n += proto.SizeVarint(118<<3 | proto.WireFixed64) - n += 8 - case *TestAllTypes_OneofEnum: - n += proto.SizeVarint(119<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.OneofEnum)) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type TestAllTypes_NestedMessage struct { - A int32 `protobuf:"varint,1,opt,name=a" json:"a,omitempty"` - Corecursive *TestAllTypes `protobuf:"bytes,2,opt,name=corecursive" json:"corecursive,omitempty"` -} - -func (m *TestAllTypes_NestedMessage) Reset() { *m = TestAllTypes_NestedMessage{} } -func (m *TestAllTypes_NestedMessage) String() string { return proto.CompactTextString(m) } -func (*TestAllTypes_NestedMessage) ProtoMessage() {} -func (*TestAllTypes_NestedMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } - -func (m *TestAllTypes_NestedMessage) GetCorecursive() *TestAllTypes { - if m != nil { - return m.Corecursive - } - return nil -} - -type ForeignMessage struct { - C int32 `protobuf:"varint,1,opt,name=c" json:"c,omitempty"` -} - -func (m *ForeignMessage) Reset() { *m = ForeignMessage{} } -func (m *ForeignMessage) String() string { return proto.CompactTextString(m) } -func (*ForeignMessage) ProtoMessage() {} -func (*ForeignMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func init() { - proto.RegisterType((*ConformanceRequest)(nil), "conformance.ConformanceRequest") - proto.RegisterType((*ConformanceResponse)(nil), "conformance.ConformanceResponse") - proto.RegisterType((*TestAllTypes)(nil), "conformance.TestAllTypes") - proto.RegisterType((*TestAllTypes_NestedMessage)(nil), "conformance.TestAllTypes.NestedMessage") - proto.RegisterType((*ForeignMessage)(nil), "conformance.ForeignMessage") - proto.RegisterEnum("conformance.WireFormat", WireFormat_name, WireFormat_value) - proto.RegisterEnum("conformance.ForeignEnum", ForeignEnum_name, ForeignEnum_value) - proto.RegisterEnum("conformance.TestAllTypes_NestedEnum", TestAllTypes_NestedEnum_name, TestAllTypes_NestedEnum_value) -} - -func init() { proto.RegisterFile("conformance_proto/conformance.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 2731 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x5a, 0xd9, 0x72, 0xdb, 0xc8, - 0xd5, 0x16, 0x08, 0x59, 0x4b, 0x93, 0x92, 0xa8, 0xd6, 0xd6, 0x96, 0x5d, 0x63, 0x58, 0xb2, 0x7f, - 0xd3, 0xf6, 0x8c, 0x6c, 0x49, 0x30, 0x2c, 0x7b, 0xfe, 0x71, 0x2c, 0xda, 0xa4, 0x25, 0x67, 0x2c, - 0xb9, 0x20, 0x6b, 0x5c, 0xe5, 0x5c, 0x30, 0x10, 0x05, 0xa9, 0x38, 0x26, 0x09, 0x0e, 0x40, 0x7a, - 0xa2, 0x5c, 0xe6, 0x0d, 0xb2, 0xef, 0xeb, 0x7d, 0xd6, 0x9b, 0x24, 0x95, 0x5c, 0xa5, 0x72, 0x93, - 0x3d, 0xa9, 0xec, 0xc9, 0x2b, 0xe4, 0x1d, 0x92, 0xea, 0x15, 0xdd, 0x0d, 0x80, 0xa2, 0xa7, 0x6a, - 0x28, 0xf1, 0xe0, 0xeb, 0xef, 0x9c, 0x3e, 0xe7, 0xe0, 0x6b, 0xe1, 0xc0, 0x60, 0xb9, 0x1e, 0xb4, - 0x8f, 0x82, 0xb0, 0xe5, 0xb5, 0xeb, 0x7e, 0xad, 0x13, 0x06, 0xdd, 0xe0, 0x86, 0x64, 0x59, 0x21, - 0x16, 0x98, 0x97, 0x4c, 0x8b, 0x67, 0x8f, 0x83, 0xe0, 0xb8, 0xe9, 0xdf, 0x20, 0x97, 0x0e, 0x7a, - 0x47, 0x37, 0xbc, 0xf6, 0x09, 0xc5, 0x2d, 0xbe, 0xa1, 0x5f, 0x3a, 0xec, 0x85, 0x5e, 0xb7, 0x11, - 0xb4, 0xd9, 0x75, 0x4b, 0xbf, 0x7e, 0xd4, 0xf0, 0x9b, 0x87, 0xb5, 0x96, 0x17, 0xbd, 0x64, 0x88, - 0xf3, 0x3a, 0x22, 0xea, 0x86, 0xbd, 0x7a, 0x97, 0x5d, 0xbd, 0xa0, 0x5f, 0xed, 0x36, 0x5a, 0x7e, - 0xd4, 0xf5, 0x5a, 0x9d, 0xac, 0x00, 0x3e, 0x0c, 0xbd, 0x4e, 0xc7, 0x0f, 0x23, 0x7a, 0x7d, 0xe9, - 0x17, 0x06, 0x80, 0x0f, 0xe2, 0xbd, 0xb8, 0xfe, 0x07, 0x3d, 0x3f, 0xea, 0xc2, 0xeb, 0xa0, 0xc8, - 0x57, 0xd4, 0x3a, 0xde, 0x49, 0x33, 0xf0, 0x0e, 0x91, 0x61, 0x19, 0xa5, 0xc2, 0xd6, 0x90, 0x3b, - 0xc5, 0xaf, 0x3c, 0xa5, 0x17, 0xe0, 0x32, 0x28, 0xbc, 0x1f, 0x05, 0x6d, 0x01, 0xcc, 0x59, 0x46, - 0x69, 0x7c, 0x6b, 0xc8, 0xcd, 0x63, 0x2b, 0x07, 0xed, 0x82, 0x85, 0x90, 0x92, 0xfb, 0x87, 0xb5, - 0xa0, 0xd7, 0xed, 0xf4, 0xba, 0x35, 0xe2, 0xb5, 0x8b, 0x4c, 0xcb, 0x28, 0x4d, 0xae, 0x2d, 0xac, - 0xc8, 0x69, 0x7e, 0xde, 0x08, 0xfd, 0x2a, 0xb9, 0xec, 0xce, 0x89, 0x75, 0xbb, 0x64, 0x19, 0x35, - 0x97, 0xc7, 0xc1, 0x28, 0x73, 0xb8, 0xf4, 0xa9, 0x1c, 0x98, 0x51, 0x36, 0x11, 0x75, 0x82, 0x76, - 0xe4, 0xc3, 0x8b, 0x20, 0xdf, 0xf1, 0xc2, 0xc8, 0xaf, 0xf9, 0x61, 0x18, 0x84, 0x64, 0x03, 0x38, - 0x2e, 0x40, 0x8c, 0x15, 0x6c, 0x83, 0x57, 0xc1, 0x54, 0xe4, 0x87, 0x0d, 0xaf, 0xd9, 0xf8, 0x24, - 0x87, 0x8d, 0x30, 0xd8, 0xa4, 0xb8, 0x40, 0xa1, 0x97, 0xc1, 0x44, 0xd8, 0x6b, 0xe3, 0x04, 0x33, - 0x20, 0xdf, 0x67, 0x81, 0x99, 0x29, 0x2c, 0x2d, 0x75, 0xe6, 0xa0, 0xa9, 0x1b, 0x4e, 0x4b, 0xdd, - 0x22, 0x18, 0x8d, 0x5e, 0x36, 0x3a, 0x1d, 0xff, 0x10, 0x9d, 0x61, 0xd7, 0xb9, 0xa1, 0x3c, 0x06, - 0x46, 0x42, 0x3f, 0xea, 0x35, 0xbb, 0x4b, 0xff, 0xa9, 0x82, 0xc2, 0x33, 0x3f, 0xea, 0x6e, 0x36, - 0x9b, 0xcf, 0x4e, 0x3a, 0x7e, 0x04, 0x2f, 0x83, 0xc9, 0xa0, 0x83, 0x7b, 0xcd, 0x6b, 0xd6, 0x1a, - 0xed, 0xee, 0xfa, 0x1a, 0x49, 0xc0, 0x19, 0x77, 0x82, 0x5b, 0xb7, 0xb1, 0x51, 0x87, 0x39, 0x36, - 0xd9, 0x97, 0xa9, 0xc0, 0x1c, 0x1b, 0x5e, 0x01, 0x53, 0x02, 0xd6, 0xa3, 0x74, 0x78, 0x57, 0x13, - 0xae, 0x58, 0xbd, 0x4f, 0xac, 0x09, 0xa0, 0x63, 0x93, 0x5d, 0x0d, 0xab, 0x40, 0x8d, 0x31, 0xa2, - 0x8c, 0x78, 0x7b, 0xd3, 0x31, 0x70, 0x2f, 0xc9, 0x18, 0x51, 0x46, 0x5c, 0x23, 0xa8, 0x02, 0x1d, - 0x1b, 0x5e, 0x05, 0x45, 0x01, 0x3c, 0x6a, 0x7c, 0xc2, 0x3f, 0x5c, 0x5f, 0x43, 0xa3, 0x96, 0x51, - 0x1a, 0x75, 0x05, 0x41, 0x95, 0x9a, 0x93, 0x50, 0xc7, 0x46, 0x63, 0x96, 0x51, 0x1a, 0xd1, 0xa0, - 0x8e, 0x0d, 0xaf, 0x83, 0xe9, 0xd8, 0x3d, 0xa7, 0x1d, 0xb7, 0x8c, 0xd2, 0x94, 0x2b, 0x38, 0xf6, - 0x98, 0x3d, 0x05, 0xec, 0xd8, 0x08, 0x58, 0x46, 0xa9, 0xa8, 0x83, 0x1d, 0x5b, 0x49, 0xfd, 0x51, - 0x33, 0xf0, 0xba, 0x28, 0x6f, 0x19, 0xa5, 0x5c, 0x9c, 0xfa, 0x2a, 0x36, 0x2a, 0xfb, 0x3f, 0x0c, - 0x7a, 0x07, 0x4d, 0x1f, 0x15, 0x2c, 0xa3, 0x64, 0xc4, 0xfb, 0x7f, 0x48, 0xac, 0x70, 0x19, 0x88, - 0x95, 0xb5, 0x83, 0x20, 0x68, 0xa2, 0x09, 0xcb, 0x28, 0x8d, 0xb9, 0x05, 0x6e, 0x2c, 0x07, 0x41, - 0x53, 0xcd, 0x66, 0x37, 0x6c, 0xb4, 0x8f, 0xd1, 0x24, 0xee, 0x2a, 0x29, 0x9b, 0xc4, 0xaa, 0x44, - 0x77, 0x70, 0xd2, 0xf5, 0x23, 0x34, 0x85, 0xdb, 0x38, 0x8e, 0xae, 0x8c, 0x8d, 0xb0, 0x06, 0x16, - 0x04, 0xac, 0x4d, 0x6f, 0xef, 0x96, 0x1f, 0x45, 0xde, 0xb1, 0x8f, 0xa0, 0x65, 0x94, 0xf2, 0x6b, - 0x57, 0x94, 0x1b, 0x5b, 0x6e, 0xd1, 0x95, 0x1d, 0x82, 0x7f, 0x42, 0xe1, 0xee, 0x1c, 0xe7, 0x51, - 0xcc, 0x70, 0x1f, 0xa0, 0x38, 0x4b, 0x41, 0xe8, 0x37, 0x8e, 0xdb, 0xc2, 0xc3, 0x0c, 0xf1, 0x70, - 0x4e, 0xf1, 0x50, 0xa5, 0x18, 0xce, 0x3a, 0x2f, 0x92, 0xa9, 0xd8, 0xe1, 0x7b, 0x60, 0x56, 0x8f, - 0xdb, 0x6f, 0xf7, 0x5a, 0x68, 0x8e, 0xa8, 0xd1, 0xa5, 0xd3, 0x82, 0xae, 0xb4, 0x7b, 0x2d, 0x17, - 0xaa, 0x11, 0x63, 0x1b, 0x7c, 0x17, 0xcc, 0x25, 0xc2, 0x25, 0xc4, 0xf3, 0x84, 0x18, 0xa5, 0xc5, - 0x4a, 0xc8, 0x66, 0xb4, 0x40, 0x09, 0x9b, 0x23, 0xb1, 0xd1, 0x6a, 0xd5, 0x3a, 0x0d, 0xbf, 0xee, - 0x23, 0x84, 0x6b, 0x56, 0xce, 0x8d, 0xe5, 0xe2, 0x75, 0xb4, 0x6e, 0x4f, 0xf1, 0x65, 0x78, 0x45, - 0x6a, 0x85, 0x7a, 0x10, 0x1e, 0xa2, 0xb3, 0x0c, 0x6f, 0xc4, 0xed, 0xf0, 0x20, 0x08, 0x0f, 0x61, - 0x15, 0x4c, 0x87, 0x7e, 0xbd, 0x17, 0x46, 0x8d, 0x57, 0xbe, 0x48, 0xeb, 0x39, 0x92, 0xd6, 0xb3, - 0x99, 0x39, 0x70, 0x8b, 0x62, 0x0d, 0x4f, 0xe7, 0x65, 0x30, 0x19, 0xfa, 0x1d, 0xdf, 0xc3, 0x79, - 0xa4, 0x37, 0xf3, 0x05, 0xcb, 0xc4, 0x6a, 0xc3, 0xad, 0x42, 0x6d, 0x64, 0x98, 0x63, 0x23, 0xcb, - 0x32, 0xb1, 0xda, 0x48, 0x30, 0xaa, 0x0d, 0x02, 0xc6, 0xd4, 0xe6, 0xa2, 0x65, 0x62, 0xb5, 0xe1, - 0xe6, 0x58, 0x6d, 0x14, 0xa0, 0x63, 0xa3, 0x25, 0xcb, 0xc4, 0x6a, 0x23, 0x03, 0x35, 0x46, 0xa6, - 0x36, 0xcb, 0x96, 0x89, 0xd5, 0x86, 0x9b, 0xf7, 0x92, 0x8c, 0x4c, 0x6d, 0x2e, 0x59, 0x26, 0x56, - 0x1b, 0x19, 0x48, 0xd5, 0x46, 0x00, 0xb9, 0x2c, 0x5c, 0xb6, 0x4c, 0xac, 0x36, 0xdc, 0x2e, 0xa9, - 0x8d, 0x0a, 0x75, 0x6c, 0xf4, 0x7f, 0x96, 0x89, 0xd5, 0x46, 0x81, 0x52, 0xb5, 0x89, 0xdd, 0x73, - 0xda, 0x2b, 0x96, 0x89, 0xd5, 0x46, 0x04, 0x20, 0xa9, 0x8d, 0x06, 0x76, 0x6c, 0x54, 0xb2, 0x4c, - 0xac, 0x36, 0x2a, 0x98, 0xaa, 0x4d, 0x1c, 0x04, 0x51, 0x9b, 0xab, 0x96, 0x89, 0xd5, 0x46, 0x84, - 0xc0, 0xd5, 0x46, 0xc0, 0x98, 0xda, 0x5c, 0xb3, 0x4c, 0xac, 0x36, 0xdc, 0x1c, 0xab, 0x8d, 0x00, - 0x12, 0xb5, 0xb9, 0x6e, 0x99, 0x58, 0x6d, 0xb8, 0x91, 0xab, 0x4d, 0x1c, 0x21, 0x55, 0x9b, 0x37, - 0x2d, 0x13, 0xab, 0x8d, 0x88, 0x4f, 0xa8, 0x4d, 0xcc, 0x46, 0xd4, 0xe6, 0x2d, 0xcb, 0xc4, 0x6a, - 0x23, 0xe8, 0xb8, 0xda, 0x08, 0x98, 0xa6, 0x36, 0x37, 0x2d, 0xf3, 0xb5, 0xd4, 0x86, 0xf3, 0x24, - 0xd4, 0x26, 0xce, 0x92, 0xa6, 0x36, 0xab, 0xc4, 0x43, 0x7f, 0xb5, 0x11, 0xc9, 0x4c, 0xa8, 0x8d, - 0x1e, 0x37, 0x11, 0x85, 0x75, 0xcb, 0x1c, 0x5c, 0x6d, 0xd4, 0x88, 0xb9, 0xda, 0x24, 0xc2, 0x25, - 0xc4, 0x36, 0x21, 0xee, 0xa3, 0x36, 0x5a, 0xa0, 0x5c, 0x6d, 0xb4, 0x6a, 0x31, 0xb5, 0x71, 0x70, - 0xcd, 0xa8, 0xda, 0xa8, 0x75, 0x13, 0x6a, 0x23, 0xd6, 0x11, 0xb5, 0xb9, 0xcd, 0xf0, 0x46, 0xdc, - 0x0e, 0x44, 0x6d, 0x9e, 0x81, 0xa9, 0x96, 0xd7, 0xa1, 0x02, 0xc1, 0x64, 0x62, 0x83, 0x24, 0xf5, - 0xcd, 0xec, 0x0c, 0x3c, 0xf1, 0x3a, 0x44, 0x3b, 0xc8, 0x47, 0xa5, 0xdd, 0x0d, 0x4f, 0xdc, 0x89, - 0x96, 0x6c, 0x93, 0x58, 0x1d, 0x9b, 0xa9, 0xca, 0x9d, 0xc1, 0x58, 0x1d, 0x9b, 0x7c, 0x28, 0xac, - 0xcc, 0x06, 0x5f, 0x80, 0x69, 0xcc, 0x4a, 0xe5, 0x87, 0xab, 0xd0, 0x5d, 0xc2, 0xbb, 0xd2, 0x97, - 0x97, 0x4a, 0x13, 0xfd, 0xa4, 0xcc, 0x38, 0x3c, 0xd9, 0x2a, 0x73, 0x3b, 0x36, 0x17, 0xae, 0xb7, - 0x07, 0xe4, 0x76, 0x6c, 0xfa, 0xa9, 0x72, 0x73, 0x2b, 0xe7, 0xa6, 0x22, 0xc7, 0xb5, 0xee, 0xff, - 0x07, 0xe0, 0xa6, 0x02, 0xb8, 0xa7, 0xc5, 0x2d, 0x5b, 0x65, 0x6e, 0xc7, 0xe6, 0xf2, 0xf8, 0xce, - 0x80, 0xdc, 0x8e, 0xbd, 0xa7, 0xc5, 0x2d, 0x5b, 0xe1, 0xc7, 0xc1, 0x0c, 0xe6, 0x66, 0xda, 0x26, - 0x24, 0xf5, 0x1e, 0x61, 0xbf, 0xd9, 0x97, 0x9d, 0xe9, 0x2c, 0xfb, 0x41, 0xf9, 0x71, 0xa0, 0xaa, - 0x5d, 0xf1, 0xe0, 0xd8, 0x42, 0x89, 0x3f, 0x32, 0xa8, 0x07, 0xc7, 0x66, 0x3f, 0x34, 0x0f, 0xc2, - 0x0e, 0x8f, 0xc0, 0x1c, 0xc9, 0x0f, 0xdf, 0x84, 0x50, 0xf0, 0xfb, 0xc4, 0xc7, 0x5a, 0xff, 0x1c, - 0x31, 0x30, 0xff, 0x49, 0xbd, 0xe0, 0x90, 0xf5, 0x2b, 0xaa, 0x1f, 0x5c, 0x09, 0xbe, 0x97, 0xcd, - 0x81, 0xfd, 0x38, 0x36, 0xff, 0xa9, 0xfb, 0x89, 0xaf, 0xa8, 0xf7, 0x2b, 0x3d, 0x34, 0xca, 0x83, - 0xde, 0xaf, 0xe4, 0x38, 0xd1, 0xee, 0x57, 0x7a, 0xc4, 0x3c, 0x07, 0xc5, 0x98, 0x95, 0x9d, 0x31, - 0x0f, 0x08, 0xed, 0x5b, 0xa7, 0xd3, 0xd2, 0xd3, 0x87, 0xf2, 0x4e, 0xb6, 0x14, 0x23, 0xdc, 0x01, - 0xd8, 0x13, 0x39, 0x8d, 0xe8, 0x91, 0xf4, 0x90, 0xb0, 0x5e, 0xeb, 0xcb, 0x8a, 0xcf, 0x29, 0xfc, - 0x3f, 0xa5, 0xcc, 0xb7, 0x62, 0x8b, 0x68, 0x77, 0x2a, 0x85, 0xec, 0xfc, 0xaa, 0x0c, 0xd2, 0xee, - 0x04, 0x4a, 0x3f, 0xa5, 0x76, 0x97, 0xac, 0x3c, 0x09, 0x8c, 0x9b, 0x1e, 0x79, 0xd5, 0x01, 0x92, - 0x40, 0x97, 0x93, 0xd3, 0x30, 0x4e, 0x82, 0x64, 0x84, 0x1d, 0x70, 0x56, 0x22, 0xd6, 0x0e, 0xc9, - 0x47, 0xc4, 0xc3, 0xad, 0x01, 0x3c, 0x28, 0xc7, 0x22, 0xf5, 0x34, 0xdf, 0x4a, 0xbd, 0x08, 0x23, - 0xb0, 0x28, 0x79, 0xd4, 0x4f, 0xcd, 0x2d, 0xe2, 0xd2, 0x19, 0xc0, 0xa5, 0x7a, 0x66, 0x52, 0x9f, - 0x0b, 0xad, 0xf4, 0xab, 0xf0, 0x18, 0xcc, 0x27, 0xb7, 0x49, 0x8e, 0xbe, 0xed, 0x41, 0xee, 0x01, - 0x69, 0x1b, 0xf8, 0xe8, 0x93, 0xee, 0x01, 0xed, 0x0a, 0x7c, 0x1f, 0x2c, 0xa4, 0xec, 0x8e, 0x78, - 0x7a, 0x4c, 0x3c, 0xad, 0x0f, 0xbe, 0xb5, 0xd8, 0xd5, 0x6c, 0x2b, 0xe5, 0x12, 0x5c, 0x06, 0x85, - 0xa0, 0xed, 0x07, 0x47, 0xfc, 0xb8, 0x09, 0xf0, 0x23, 0xf6, 0xd6, 0x90, 0x9b, 0x27, 0x56, 0x76, - 0x78, 0x7c, 0x0c, 0xcc, 0x52, 0x90, 0x56, 0xdb, 0xce, 0x6b, 0x3d, 0x6e, 0x6d, 0x0d, 0xb9, 0x90, - 0xd0, 0xa8, 0xb5, 0x14, 0x11, 0xb0, 0x6e, 0xff, 0x80, 0x4f, 0x24, 0x88, 0x95, 0xf5, 0xee, 0x45, - 0x40, 0xbf, 0xb2, 0xb6, 0x0d, 0xd9, 0x78, 0x03, 0x10, 0x23, 0xed, 0xc2, 0x0b, 0x00, 0x30, 0x08, - 0xbe, 0x0f, 0x23, 0xfc, 0x20, 0xba, 0x35, 0xe4, 0x8e, 0x53, 0x04, 0xbe, 0xb7, 0x94, 0xad, 0x3a, - 0x36, 0xea, 0x5a, 0x46, 0x69, 0x58, 0xd9, 0xaa, 0x63, 0xc7, 0x8e, 0xa8, 0xf6, 0xf4, 0xf0, 0xe3, - 0xb1, 0x70, 0x44, 0xc5, 0x44, 0xf0, 0x30, 0x21, 0x79, 0x85, 0x1f, 0x8d, 0x05, 0x0f, 0x13, 0x86, - 0x0a, 0x8f, 0x86, 0x94, 0xed, 0xc3, 0xc1, 0x1f, 0xf1, 0x44, 0xcc, 0xa4, 0x3c, 0xbb, 0xd2, 0xd3, - 0x18, 0x11, 0x19, 0x36, 0x4d, 0x43, 0xbf, 0x32, 0x48, 0xee, 0x17, 0x57, 0xe8, 0xb8, 0x6d, 0x85, - 0xcf, 0x79, 0x56, 0xf0, 0x56, 0xdf, 0xf3, 0x9a, 0x3d, 0x3f, 0x7e, 0x4c, 0xc3, 0xa6, 0xe7, 0x74, - 0x1d, 0x74, 0xc1, 0xbc, 0x3a, 0xa3, 0x11, 0x8c, 0xbf, 0x36, 0xd8, 0xa3, 0xad, 0xce, 0x48, 0xf4, - 0x8e, 0x52, 0xce, 0x2a, 0x93, 0x9c, 0x0c, 0x4e, 0xc7, 0x16, 0x9c, 0xbf, 0xe9, 0xc3, 0xe9, 0xd8, - 0x49, 0x4e, 0xc7, 0xe6, 0x9c, 0xfb, 0xd2, 0x43, 0x7e, 0x4f, 0x0d, 0xf4, 0xb7, 0x94, 0xf4, 0x7c, - 0x82, 0x74, 0x5f, 0x8a, 0x74, 0x4e, 0x1d, 0x12, 0x65, 0xd1, 0x4a, 0xb1, 0xfe, 0xae, 0x1f, 0x2d, - 0x0f, 0x76, 0x4e, 0x1d, 0x29, 0xa5, 0x65, 0x80, 0x34, 0x8e, 0x60, 0xfd, 0x7d, 0x56, 0x06, 0x48, - 0x2f, 0x69, 0x19, 0x20, 0xb6, 0xb4, 0x50, 0x69, 0xa7, 0x09, 0xd2, 0x3f, 0x64, 0x85, 0x4a, 0x9b, - 0x4f, 0x0b, 0x95, 0x1a, 0xd3, 0x68, 0x99, 0xc2, 0x70, 0xda, 0x3f, 0x66, 0xd1, 0xd2, 0x9b, 0x50, - 0xa3, 0xa5, 0xc6, 0xb4, 0x0c, 0x90, 0x7b, 0x54, 0xb0, 0xfe, 0x29, 0x2b, 0x03, 0xe4, 0xb6, 0xd5, - 0x32, 0x40, 0x6c, 0x9c, 0x73, 0x57, 0x7a, 0x38, 0x50, 0x9a, 0xff, 0xcf, 0x06, 0x91, 0xc1, 0xbe, - 0xcd, 0x2f, 0x3f, 0x14, 0x4a, 0x41, 0xaa, 0x23, 0x03, 0xc1, 0xf8, 0x17, 0x83, 0x3d, 0x69, 0xf5, - 0x6b, 0x7e, 0x65, 0xb0, 0x90, 0xc1, 0x29, 0x35, 0xd4, 0x5f, 0xfb, 0x70, 0x8a, 0xe6, 0x57, 0xa6, - 0x10, 0x52, 0x8d, 0xb4, 0x61, 0x84, 0x20, 0xfd, 0x1b, 0x25, 0x3d, 0xa5, 0xf9, 0xd5, 0x99, 0x45, - 0x16, 0xad, 0x14, 0xeb, 0xdf, 0xfb, 0xd1, 0x8a, 0xe6, 0x57, 0x27, 0x1c, 0x69, 0x19, 0x50, 0x9b, - 0xff, 0x1f, 0x59, 0x19, 0x90, 0x9b, 0x5f, 0x19, 0x06, 0xa4, 0x85, 0xaa, 0x35, 0xff, 0x3f, 0xb3, - 0x42, 0x55, 0x9a, 0x5f, 0x1d, 0x1d, 0xa4, 0xd1, 0x6a, 0xcd, 0xff, 0xaf, 0x2c, 0x5a, 0xa5, 0xf9, - 0xd5, 0x67, 0xd1, 0xb4, 0x0c, 0xa8, 0xcd, 0xff, 0xef, 0xac, 0x0c, 0xc8, 0xcd, 0xaf, 0x0c, 0x1c, - 0x38, 0xe7, 0x23, 0x69, 0xae, 0xcb, 0xdf, 0xe1, 0xa0, 0xef, 0xe6, 0xd8, 0x9c, 0x2c, 0xb1, 0x77, - 0x86, 0x88, 0x67, 0xbe, 0xdc, 0x02, 0x1f, 0x03, 0x31, 0x34, 0xac, 0x89, 0x97, 0x35, 0xe8, 0x7b, - 0xb9, 0x8c, 0xf3, 0xe3, 0x19, 0x87, 0xb8, 0xc2, 0xbf, 0x30, 0xc1, 0x8f, 0x82, 0x19, 0x69, 0x88, - 0xcd, 0x5f, 0x1c, 0xa1, 0xef, 0x67, 0x91, 0x55, 0x31, 0xe6, 0x89, 0x17, 0xbd, 0x8c, 0xc9, 0x84, - 0x09, 0x6e, 0xaa, 0x73, 0xe1, 0x5e, 0xbd, 0x8b, 0x7e, 0x40, 0x89, 0x16, 0xd2, 0x8a, 0xd0, 0xab, - 0x77, 0x95, 0x89, 0x71, 0xaf, 0xde, 0x85, 0x1b, 0x40, 0xcc, 0x16, 0x6b, 0x5e, 0xfb, 0x04, 0xfd, - 0x90, 0xae, 0x9f, 0x4d, 0xac, 0xdf, 0x6c, 0x9f, 0xb8, 0x79, 0x0e, 0xdd, 0x6c, 0x9f, 0xc0, 0x7b, - 0xd2, 0xac, 0xf9, 0x15, 0x2e, 0x03, 0xfa, 0x11, 0x5d, 0x3b, 0x9f, 0x58, 0x4b, 0xab, 0x24, 0xa6, - 0x9b, 0xe4, 0x2b, 0x2e, 0x4f, 0xdc, 0xa0, 0xbc, 0x3c, 0x3f, 0xce, 0x91, 0x6a, 0xf7, 0x2b, 0x8f, - 0xe8, 0x4b, 0xa9, 0x3c, 0x82, 0x28, 0x2e, 0xcf, 0x4f, 0x72, 0x19, 0x0a, 0x27, 0x95, 0x87, 0x2f, - 0x8b, 0xcb, 0x23, 0x73, 0x91, 0xf2, 0x90, 0xea, 0xfc, 0x34, 0x8b, 0x4b, 0xaa, 0x4e, 0x3c, 0x14, - 0x64, 0xab, 0x70, 0x75, 0xe4, 0x5b, 0x05, 0x57, 0xe7, 0x97, 0x94, 0x28, 0xbb, 0x3a, 0xd2, 0xdd, - 0xc1, 0xaa, 0x23, 0x28, 0x70, 0x75, 0x7e, 0x46, 0xd7, 0x67, 0x54, 0x87, 0x43, 0x59, 0x75, 0xc4, - 0x4a, 0x5a, 0x9d, 0x9f, 0xd3, 0xb5, 0x99, 0xd5, 0xe1, 0x70, 0x5a, 0x9d, 0x0b, 0x00, 0x90, 0xfd, - 0xb7, 0xbd, 0x96, 0xbf, 0x8a, 0x3e, 0x6d, 0x92, 0xd7, 0x50, 0x92, 0x09, 0x5a, 0x20, 0x4f, 0xfb, - 0x17, 0x7f, 0x5d, 0x43, 0x9f, 0x91, 0x11, 0x3b, 0xd8, 0x04, 0x2f, 0x82, 0x42, 0x2d, 0x86, 0xac, - 0xa3, 0xcf, 0xea, 0x90, 0x75, 0xb8, 0x04, 0x26, 0x28, 0x82, 0x40, 0xec, 0x1a, 0xfa, 0x9c, 0x8e, - 0x21, 0x7f, 0x4f, 0x92, 0x6f, 0x37, 0x31, 0xe4, 0x16, 0xfa, 0x3c, 0x45, 0xc8, 0x36, 0xb8, 0xcc, - 0x69, 0x6e, 0x12, 0x1e, 0x07, 0x7d, 0x41, 0x01, 0x61, 0x1e, 0x47, 0xec, 0x08, 0x7f, 0xbb, 0x8d, - 0xbe, 0xa8, 0x3b, 0xba, 0x8d, 0x01, 0x55, 0xfe, 0x6d, 0x03, 0x7d, 0x49, 0x07, 0x6c, 0xc4, 0x5b, - 0xc6, 0x5f, 0xef, 0xa0, 0x2f, 0xeb, 0x88, 0x3b, 0x70, 0x09, 0x14, 0xaa, 0x02, 0xb1, 0x7a, 0x13, - 0x7d, 0x45, 0x8e, 0x83, 0xda, 0x08, 0x66, 0xbb, 0xf2, 0xee, 0xc3, 0xda, 0xce, 0xe6, 0x93, 0xca, - 0xea, 0x2a, 0xfa, 0x2a, 0xc7, 0x60, 0x23, 0xb5, 0xc5, 0x18, 0x92, 0xeb, 0x35, 0xf4, 0x35, 0x05, - 0x43, 0x6c, 0xf0, 0x12, 0x98, 0xac, 0x49, 0xf9, 0x5d, 0x5d, 0x47, 0x5f, 0x4f, 0x78, 0x5b, 0xa7, - 0xa8, 0x6a, 0x8c, 0xb2, 0xd1, 0x37, 0x12, 0x28, 0x3b, 0x4e, 0x20, 0x05, 0xdd, 0x42, 0xdf, 0x4c, - 0x80, 0xa4, 0x2c, 0xd3, 0xdd, 0x39, 0xe8, 0x5b, 0x09, 0x90, 0x83, 0xfd, 0x49, 0x31, 0xdd, 0xae, - 0xd5, 0xd0, 0xb7, 0x13, 0xa8, 0xdb, 0x18, 0x25, 0xc5, 0xb4, 0x51, 0xab, 0xa1, 0xef, 0x24, 0x50, - 0x1b, 0x8b, 0x2f, 0xc0, 0x84, 0xfa, 0xa0, 0x53, 0x00, 0x86, 0xc7, 0xde, 0x88, 0x1a, 0x1e, 0x7c, - 0x1b, 0xe4, 0xeb, 0x81, 0x78, 0xa9, 0x81, 0x72, 0xa7, 0xbd, 0x00, 0x91, 0xd1, 0x8b, 0xf7, 0x01, - 0x4c, 0x0e, 0x29, 0x61, 0x11, 0x98, 0x2f, 0xfd, 0x13, 0xe6, 0x02, 0xff, 0x0a, 0x67, 0xc1, 0x19, - 0x7a, 0xfb, 0xe4, 0x88, 0x8d, 0x7e, 0xb9, 0x9b, 0xdb, 0x30, 0x62, 0x06, 0x79, 0x20, 0x29, 0x33, - 0x98, 0x29, 0x0c, 0xa6, 0xcc, 0x50, 0x06, 0xb3, 0x69, 0xa3, 0x47, 0x99, 0x63, 0x22, 0x85, 0x63, - 0x22, 0x9d, 0x43, 0x19, 0x31, 0xca, 0x1c, 0xc3, 0x29, 0x1c, 0xc3, 0x49, 0x8e, 0xc4, 0x28, 0x51, - 0xe6, 0x98, 0x4e, 0xe1, 0x98, 0x4e, 0xe7, 0x50, 0x46, 0x86, 0x32, 0x07, 0x4c, 0xe1, 0x80, 0x32, - 0xc7, 0x43, 0x30, 0x9f, 0x3e, 0x18, 0x94, 0x59, 0x46, 0x53, 0x58, 0x46, 0x33, 0x58, 0xd4, 0xe1, - 0x9f, 0xcc, 0x32, 0x92, 0xc2, 0x32, 0x22, 0xb3, 0x54, 0x01, 0xca, 0x1a, 0xef, 0xc9, 0x3c, 0x53, - 0x29, 0x3c, 0x53, 0x59, 0x3c, 0xda, 0xf8, 0x4e, 0xe6, 0x29, 0xa6, 0xf0, 0x14, 0x53, 0xbb, 0x4d, - 0x1e, 0xd2, 0x9d, 0xd6, 0xaf, 0x39, 0x99, 0x61, 0x13, 0xcc, 0xa4, 0xcc, 0xe3, 0x4e, 0xa3, 0x30, - 0x64, 0x8a, 0x7b, 0xa0, 0xa8, 0x0f, 0xdf, 0xe4, 0xf5, 0x63, 0x29, 0xeb, 0xc7, 0x52, 0x9a, 0x44, - 0x1f, 0xb4, 0xc9, 0x1c, 0xe3, 0x29, 0x1c, 0xe3, 0xc9, 0x6d, 0xe8, 0x13, 0xb5, 0xd3, 0x28, 0x0a, - 0x32, 0x45, 0x08, 0xce, 0xf5, 0x19, 0x99, 0xa5, 0x50, 0xbd, 0x23, 0x53, 0xbd, 0xc6, 0xfb, 0x2a, - 0xc9, 0xe7, 0x31, 0x38, 0xdf, 0x6f, 0x66, 0x96, 0xe2, 0x74, 0x55, 0x75, 0xda, 0xf7, 0x15, 0x96, - 0xe4, 0xa8, 0x49, 0x1b, 0x2e, 0x6d, 0x56, 0x96, 0xe2, 0xe4, 0xae, 0xec, 0x64, 0xd0, 0x97, 0x5a, - 0x92, 0x37, 0x0f, 0x9c, 0xcd, 0x9c, 0x97, 0xa5, 0xb8, 0x5b, 0x51, 0xdd, 0x65, 0xbf, 0xea, 0x8a, - 0x5d, 0x2c, 0xdd, 0x01, 0x40, 0x9a, 0xec, 0x8d, 0x02, 0xb3, 0xba, 0xbb, 0x5b, 0x1c, 0xc2, 0xbf, - 0x94, 0x37, 0xdd, 0xa2, 0x41, 0x7f, 0x79, 0x51, 0xcc, 0x61, 0x77, 0x3b, 0x95, 0x47, 0xc5, 0xff, - 0xf2, 0xff, 0x8c, 0xf2, 0x84, 0x18, 0x45, 0xe1, 0x53, 0x65, 0xe9, 0x0d, 0x30, 0xa9, 0x0d, 0x24, - 0x0b, 0xc0, 0xa8, 0xf3, 0x03, 0xa5, 0x7e, 0xed, 0x16, 0x00, 0xf1, 0xbf, 0x61, 0x82, 0x53, 0x20, - 0xbf, 0xbf, 0xb3, 0xf7, 0xb4, 0xf2, 0x60, 0xbb, 0xba, 0x5d, 0x79, 0x58, 0x1c, 0x82, 0x05, 0x30, - 0xf6, 0xd4, 0xdd, 0x7d, 0xb6, 0x5b, 0xde, 0xaf, 0x16, 0x0d, 0x38, 0x06, 0x86, 0x1f, 0xef, 0xed, - 0xee, 0x14, 0x73, 0xd7, 0xee, 0x83, 0xbc, 0x3c, 0x0f, 0x9c, 0x02, 0xf9, 0xea, 0xae, 0x5b, 0xd9, - 0x7e, 0xb4, 0x53, 0xa3, 0x91, 0x4a, 0x06, 0x1a, 0xb1, 0x62, 0x78, 0x51, 0xcc, 0x95, 0x2f, 0x82, - 0x0b, 0xf5, 0xa0, 0x95, 0xf8, 0xc3, 0x4c, 0x4a, 0xce, 0xc1, 0x08, 0xb1, 0xae, 0xff, 0x2f, 0x00, - 0x00, 0xff, 0xff, 0x46, 0x1f, 0xdb, 0xdc, 0xeb, 0x26, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto deleted file mode 100644 index 95a8fd135..000000000 --- a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto +++ /dev/null @@ -1,285 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; -package conformance; -option java_package = "com.google.protobuf.conformance"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -// This defines the conformance testing protocol. This protocol exists between -// the conformance test suite itself and the code being tested. For each test, -// the suite will send a ConformanceRequest message and expect a -// ConformanceResponse message. -// -// You can either run the tests in two different ways: -// -// 1. in-process (using the interface in conformance_test.h). -// -// 2. as a sub-process communicating over a pipe. Information about how to -// do this is in conformance_test_runner.cc. -// -// Pros/cons of the two approaches: -// -// - running as a sub-process is much simpler for languages other than C/C++. -// -// - running as a sub-process may be more tricky in unusual environments like -// iOS apps, where fork/stdin/stdout are not available. - -enum WireFormat { - UNSPECIFIED = 0; - PROTOBUF = 1; - JSON = 2; -} - -// Represents a single test case's input. The testee should: -// -// 1. parse this proto (which should always succeed) -// 2. parse the protobuf or JSON payload in "payload" (which may fail) -// 3. if the parse succeeded, serialize the message in the requested format. -message ConformanceRequest { - // The payload (whether protobuf of JSON) is always for a TestAllTypes proto - // (see below). - oneof payload { - bytes protobuf_payload = 1; - string json_payload = 2; - } - - // Which format should the testee serialize its message to? - WireFormat requested_output_format = 3; -} - -// Represents a single test case's output. -message ConformanceResponse { - oneof result { - // This string should be set to indicate parsing failed. The string can - // provide more information about the parse error if it is available. - // - // Setting this string does not necessarily mean the testee failed the - // test. Some of the test cases are intentionally invalid input. - string parse_error = 1; - - // If the input was successfully parsed but errors occurred when - // serializing it to the requested output format, set the error message in - // this field. - string serialize_error = 6; - - // This should be set if some other error occurred. This will always - // indicate that the test failed. The string can provide more information - // about the failure. - string runtime_error = 2; - - // If the input was successfully parsed and the requested output was - // protobuf, serialize it to protobuf and set it in this field. - bytes protobuf_payload = 3; - - // If the input was successfully parsed and the requested output was JSON, - // serialize to JSON and set it in this field. - string json_payload = 4; - - // For when the testee skipped the test, likely because a certain feature - // wasn't supported, like JSON input/output. - string skipped = 5; - } -} - -// This proto includes every type of field in both singular and repeated -// forms. -message TestAllTypes { - message NestedMessage { - int32 a = 1; - TestAllTypes corecursive = 2; - } - - enum NestedEnum { - FOO = 0; - BAR = 1; - BAZ = 2; - NEG = -1; // Intentionally negative. - } - - // Singular - int32 optional_int32 = 1; - int64 optional_int64 = 2; - uint32 optional_uint32 = 3; - uint64 optional_uint64 = 4; - sint32 optional_sint32 = 5; - sint64 optional_sint64 = 6; - fixed32 optional_fixed32 = 7; - fixed64 optional_fixed64 = 8; - sfixed32 optional_sfixed32 = 9; - sfixed64 optional_sfixed64 = 10; - float optional_float = 11; - double optional_double = 12; - bool optional_bool = 13; - string optional_string = 14; - bytes optional_bytes = 15; - - NestedMessage optional_nested_message = 18; - ForeignMessage optional_foreign_message = 19; - - NestedEnum optional_nested_enum = 21; - ForeignEnum optional_foreign_enum = 22; - - string optional_string_piece = 24 [ctype=STRING_PIECE]; - string optional_cord = 25 [ctype=CORD]; - - TestAllTypes recursive_message = 27; - - // Repeated - repeated int32 repeated_int32 = 31; - repeated int64 repeated_int64 = 32; - repeated uint32 repeated_uint32 = 33; - repeated uint64 repeated_uint64 = 34; - repeated sint32 repeated_sint32 = 35; - repeated sint64 repeated_sint64 = 36; - repeated fixed32 repeated_fixed32 = 37; - repeated fixed64 repeated_fixed64 = 38; - repeated sfixed32 repeated_sfixed32 = 39; - repeated sfixed64 repeated_sfixed64 = 40; - repeated float repeated_float = 41; - repeated double repeated_double = 42; - repeated bool repeated_bool = 43; - repeated string repeated_string = 44; - repeated bytes repeated_bytes = 45; - - repeated NestedMessage repeated_nested_message = 48; - repeated ForeignMessage repeated_foreign_message = 49; - - repeated NestedEnum repeated_nested_enum = 51; - repeated ForeignEnum repeated_foreign_enum = 52; - - repeated string repeated_string_piece = 54 [ctype=STRING_PIECE]; - repeated string repeated_cord = 55 [ctype=CORD]; - - // Map - map < int32, int32> map_int32_int32 = 56; - map < int64, int64> map_int64_int64 = 57; - map < uint32, uint32> map_uint32_uint32 = 58; - map < uint64, uint64> map_uint64_uint64 = 59; - map < sint32, sint32> map_sint32_sint32 = 60; - map < sint64, sint64> map_sint64_sint64 = 61; - map < fixed32, fixed32> map_fixed32_fixed32 = 62; - map < fixed64, fixed64> map_fixed64_fixed64 = 63; - map map_sfixed32_sfixed32 = 64; - map map_sfixed64_sfixed64 = 65; - map < int32, float> map_int32_float = 66; - map < int32, double> map_int32_double = 67; - map < bool, bool> map_bool_bool = 68; - map < string, string> map_string_string = 69; - map < string, bytes> map_string_bytes = 70; - map < string, NestedMessage> map_string_nested_message = 71; - map < string, ForeignMessage> map_string_foreign_message = 72; - map < string, NestedEnum> map_string_nested_enum = 73; - map < string, ForeignEnum> map_string_foreign_enum = 74; - - oneof oneof_field { - uint32 oneof_uint32 = 111; - NestedMessage oneof_nested_message = 112; - string oneof_string = 113; - bytes oneof_bytes = 114; - bool oneof_bool = 115; - uint64 oneof_uint64 = 116; - float oneof_float = 117; - double oneof_double = 118; - NestedEnum oneof_enum = 119; - } - - // Well-known types - google.protobuf.BoolValue optional_bool_wrapper = 201; - google.protobuf.Int32Value optional_int32_wrapper = 202; - google.protobuf.Int64Value optional_int64_wrapper = 203; - google.protobuf.UInt32Value optional_uint32_wrapper = 204; - google.protobuf.UInt64Value optional_uint64_wrapper = 205; - google.protobuf.FloatValue optional_float_wrapper = 206; - google.protobuf.DoubleValue optional_double_wrapper = 207; - google.protobuf.StringValue optional_string_wrapper = 208; - google.protobuf.BytesValue optional_bytes_wrapper = 209; - - repeated google.protobuf.BoolValue repeated_bool_wrapper = 211; - repeated google.protobuf.Int32Value repeated_int32_wrapper = 212; - repeated google.protobuf.Int64Value repeated_int64_wrapper = 213; - repeated google.protobuf.UInt32Value repeated_uint32_wrapper = 214; - repeated google.protobuf.UInt64Value repeated_uint64_wrapper = 215; - repeated google.protobuf.FloatValue repeated_float_wrapper = 216; - repeated google.protobuf.DoubleValue repeated_double_wrapper = 217; - repeated google.protobuf.StringValue repeated_string_wrapper = 218; - repeated google.protobuf.BytesValue repeated_bytes_wrapper = 219; - - google.protobuf.Duration optional_duration = 301; - google.protobuf.Timestamp optional_timestamp = 302; - google.protobuf.FieldMask optional_field_mask = 303; - google.protobuf.Struct optional_struct = 304; - google.protobuf.Any optional_any = 305; - google.protobuf.Value optional_value = 306; - - repeated google.protobuf.Duration repeated_duration = 311; - repeated google.protobuf.Timestamp repeated_timestamp = 312; - repeated google.protobuf.FieldMask repeated_fieldmask = 313; - repeated google.protobuf.Struct repeated_struct = 324; - repeated google.protobuf.Any repeated_any = 315; - repeated google.protobuf.Value repeated_value = 316; - - // Test field-name-to-JSON-name convention. - // (protobuf says names can be any valid C/C++ identifier.) - int32 fieldname1 = 401; - int32 field_name2 = 402; - int32 _field_name3 = 403; - int32 field__name4_ = 404; - int32 field0name5 = 405; - int32 field_0_name6 = 406; - int32 fieldName7 = 407; - int32 FieldName8 = 408; - int32 field_Name9 = 409; - int32 Field_Name10 = 410; - int32 FIELD_NAME11 = 411; - int32 FIELD_name12 = 412; - int32 __field_name13 = 413; - int32 __Field_name14 = 414; - int32 field__name15 = 415; - int32 field__Name16 = 416; - int32 field_name17__ = 417; - int32 Field_name18__ = 418; -} - -message ForeignMessage { - int32 c = 1; -} - -enum ForeignEnum { - FOREIGN_FOO = 0; - FOREIGN_BAR = 1; - FOREIGN_BAZ = 2; -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go index 1fc8ae8d7..6308548cb 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -510,63 +510,41 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle return out.err } -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // Whether to allow messages to contain unknown fields, as opposed to - // failing to unmarshal. - AllowUnknownFields bool -} - // UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. // This function is lenient and will decode any options permutations of the // related Marshaler. -func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { inputValue := json.RawMessage{} if err := dec.Decode(&inputValue); err != nil { return err } - return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { - dec := json.NewDecoder(r) - return u.UnmarshalNext(dec, pb) -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - return new(Unmarshaler).UnmarshalNext(dec, pb) + return unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) } // Unmarshal unmarshals a JSON object stream into a protocol // buffer. This function is lenient and will decode any options // permutations of the related Marshaler. func Unmarshal(r io.Reader, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(r, pb) + dec := json.NewDecoder(r) + return UnmarshalNext(dec, pb) } // UnmarshalString will populate the fields of a protocol buffer based // on a JSON string. This function is lenient and will decode any options // permutations of the related Marshaler. func UnmarshalString(str string, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) + return Unmarshal(strings.NewReader(str), pb) } // unmarshalValue converts/copies a value into the target. // prop may be nil. -func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { +func unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { targetType := target.Type() // Allocate memory for pointer fields. if targetType.Kind() == reflect.Ptr { target.Set(reflect.New(targetType.Elem())) - return u.unmarshalValue(target.Elem(), inputValue, prop) + return unmarshalValue(target.Elem(), inputValue, prop) } // Handle well-known types. @@ -581,7 +559,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe // as the wrapped primitive type, except that null is allowed." // encoding/json will turn JSON `null` into Go `nil`, // so we don't have to do any extra work. - return u.unmarshalValue(target.Field(0), inputValue, prop) + return unmarshalValue(target.Field(0), inputValue, prop) case "Any": return fmt.Errorf("unmarshaling Any not supported yet") case "Duration": @@ -608,8 +586,11 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe if err != nil { return fmt.Errorf("bad Timestamp: %v", err) } - target.Field(0).SetInt(int64(t.Unix())) - target.Field(1).SetInt(int64(t.Nanosecond())) + ns := t.UnixNano() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) return nil } } @@ -676,7 +657,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe continue } - if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + if err := unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { return err } } @@ -689,12 +670,12 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe } nv := reflect.New(oop.Type.Elem()) target.Field(oop.Field).Set(nv) - if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + if err := unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { return err } } } - if !u.AllowUnknownFields && len(jsonFields) > 0 { + if len(jsonFields) > 0 { // Pick any field to be the scapegoat. var f string for fname := range jsonFields { @@ -715,7 +696,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe len := len(slc) target.Set(reflect.MakeSlice(targetType, len, len)) for i := 0; i < len; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + if err := unmarshalValue(target.Index(i), slc[i], prop); err != nil { return err } } @@ -744,14 +725,14 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe k = reflect.ValueOf(ks) } else { k = reflect.New(targetType.Key()).Elem() - if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + if err := unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { return err } } // Unmarshal map value. v := reflect.New(targetType.Elem()).Elem() - if err := u.unmarshalValue(v, raw, valprop); err != nil { + if err := unmarshalValue(v, raw, valprop); err != nil { return err } target.SetMapIndex(k, v) @@ -809,21 +790,10 @@ func (w *errWriter) write(str string) { // The easiest way to sort them in some deterministic order is to use fmt. // If this turns out to be inefficient we can always consider other options, // such as doing a Schwartzian transform. -// -// Numeric keys are sorted in numeric order per -// https://developers.google.com/protocol-buffers/docs/proto#maps. type mapKeys []reflect.Value func (s mapKeys) Len() int { return len(s) } func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s mapKeys) Less(i, j int) bool { - if k := s[i].Kind(); k == s[j].Kind() { - switch k { - case reflect.Int32, reflect.Int64: - return s[i].Int() < s[j].Int() - case reflect.Uint32, reflect.Uint64: - return s[i].Uint() < s[j].Uint() - } - } return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) } diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go index 78f67c4d7..659dbede4 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go @@ -36,7 +36,6 @@ import ( "encoding/json" "io" "reflect" - "strings" "testing" "github.com/golang/protobuf/proto" @@ -108,7 +107,7 @@ var ( RSint32: []int32{-1, -2, -3}, RSint64: []int64{-6789012345, -3456789012}, RFloat: []float32{3.14, 6.28}, - RDouble: []float64{299792458 * 1e20, 6.62606957e-34}, + RDouble: []float64{299792458, 6.62606957e-34}, RString: []string{"happy", "days"}, RBytes: [][]byte{[]byte("skittles"), []byte("m&m's")}, } @@ -122,7 +121,7 @@ var ( `"rSint32":[-1,-2,-3],` + `"rSint64":["-6789012345","-3456789012"],` + `"rFloat":[3.14,6.28],` + - `"rDouble":[2.99792458e+28,6.62606957e-34],` + + `"rDouble":[2.99792458e+08,6.62606957e-34],` + `"rString":["happy","days"],` + `"rBytes":["c2tpdHRsZXM=","bSZtJ3M="]` + `}` @@ -165,7 +164,7 @@ var ( 6.28 ], "rDouble": [ - 2.99792458e+28, + 2.99792458e+08, 6.62606957e-34 ], "rString": [ @@ -365,10 +364,6 @@ var marshalingTests = []struct { // TODO: This is broken. //{"map", marshaler, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":"ROMAN"}`}, {"map", Marshaler{EnumsAsInts: true}, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":2}}`}, - {"map", marshaler, &pb.Mappy{S32Booly: map[int32]bool{1: true, 3: false, 10: true, 12: false}}, `{"s32booly":{"1":true,"3":false,"10":true,"12":false}}`}, - {"map", marshaler, &pb.Mappy{S64Booly: map[int64]bool{1: true, 3: false, 10: true, 12: false}}, `{"s64booly":{"1":true,"3":false,"10":true,"12":false}}`}, - {"map", marshaler, &pb.Mappy{U32Booly: map[uint32]bool{1: true, 3: false, 10: true, 12: false}}, `{"u32booly":{"1":true,"3":false,"10":true,"12":false}}`}, - {"map", marshaler, &pb.Mappy{U64Booly: map[uint64]bool{1: true, 3: false, 10: true, 12: false}}, `{"u64booly":{"1":true,"3":false,"10":true,"12":false}}`}, {"proto2 map", marshaler, &pb.Maps{MInt64Str: map[int64]string{213: "cat"}}, `{"mInt64Str":{"213":"cat"}}`}, {"proto2 map", marshaler, @@ -415,73 +410,66 @@ func TestMarshaling(t *testing.T) { } var unmarshalingTests = []struct { - desc string - unmarshaler Unmarshaler - json string - pb proto.Message + desc string + json string + pb proto.Message }{ - {"simple flat object", Unmarshaler{}, simpleObjectJSON, simpleObject}, - {"simple pretty object", Unmarshaler{}, simpleObjectPrettyJSON, simpleObject}, - {"repeated fields flat object", Unmarshaler{}, repeatsObjectJSON, repeatsObject}, - {"repeated fields pretty object", Unmarshaler{}, repeatsObjectPrettyJSON, repeatsObject}, - {"nested message/enum flat object", Unmarshaler{}, complexObjectJSON, complexObject}, - {"nested message/enum pretty object", Unmarshaler{}, complexObjectPrettyJSON, complexObject}, - {"enum-string object", Unmarshaler{}, `{"color":"BLUE"}`, &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, - {"enum-value object", Unmarshaler{}, "{\n \"color\": 2\n}", &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, - {"unknown field with allowed option", Unmarshaler{AllowUnknownFields: true}, `{"unknown": "foo"}`, new(pb.Simple)}, - {"proto3 enum string", Unmarshaler{}, `{"hilarity":"PUNS"}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"proto3 enum value", Unmarshaler{}, `{"hilarity":1}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"simple flat object", simpleObjectJSON, simpleObject}, + {"simple pretty object", simpleObjectPrettyJSON, simpleObject}, + {"repeated fields flat object", repeatsObjectJSON, repeatsObject}, + {"repeated fields pretty object", repeatsObjectPrettyJSON, repeatsObject}, + {"nested message/enum flat object", complexObjectJSON, complexObject}, + {"nested message/enum pretty object", complexObjectPrettyJSON, complexObject}, + {"enum-string object", `{"color":"BLUE"}`, &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, + {"enum-value object", "{\n \"color\": 2\n}", &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, + {"proto3 enum string", `{"hilarity":"PUNS"}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 enum value", `{"hilarity":1}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, {"unknown enum value object", - Unmarshaler{}, "{\n \"color\": 1000,\n \"r_color\": [\n \"RED\"\n ]\n}", &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}}, - {"repeated proto3 enum", Unmarshaler{}, `{"rFunny":["PUNS","SLAPSTICK"]}`, + {"repeated proto3 enum", `{"rFunny":["PUNS","SLAPSTICK"]}`, &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ proto3pb.Message_PUNS, proto3pb.Message_SLAPSTICK, }}}, - {"repeated proto3 enum as int", Unmarshaler{}, `{"rFunny":[1,2]}`, + {"repeated proto3 enum as int", `{"rFunny":[1,2]}`, &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ proto3pb.Message_PUNS, proto3pb.Message_SLAPSTICK, }}}, - {"repeated proto3 enum as mix of strings and ints", Unmarshaler{}, `{"rFunny":["PUNS",2]}`, + {"repeated proto3 enum as mix of strings and ints", `{"rFunny":["PUNS",2]}`, &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ proto3pb.Message_PUNS, proto3pb.Message_SLAPSTICK, }}}, - {"unquoted int64 object", Unmarshaler{}, `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}}, - {"unquoted uint64 object", Unmarshaler{}, `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}}, - {"map", Unmarshaler{}, `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}}, - {"map", Unmarshaler{}, `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}}, - {"map", Unmarshaler{}, `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: &pb.Simple3{Dub: 1}}}}, + {"unquoted int64 object", `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}}, + {"unquoted uint64 object", `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}}, + {"map", `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}}, + {"map", `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}}, + {"map", `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: &pb.Simple3{Dub: 1}}}}, // TODO: This is broken. - //{"map", Unmarshaler{}, `{"enumy":{"XIV":"ROMAN"}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, - {"map", Unmarshaler{}, `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, - {"oneof", Unmarshaler{}, `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{31000}}}, - {"oneof spec name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, - {"oneof orig_name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, - {"oneof spec name2", Unmarshaler{}, `{"homeAddress":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}}, - {"oneof orig_name2", Unmarshaler{}, `{"home_address":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}}, - {"orig_name input", Unmarshaler{}, `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, - {"camelName input", Unmarshaler{}, `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, - - {"Duration", Unmarshaler{}, `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}}, - {"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}}, - {"PreEpochTimestamp", Unmarshaler{}, `{"ts":"1969-12-31T23:59:58.999999995Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -2, Nanos: 999999995}}}, - {"ZeroTimeTimestamp", Unmarshaler{}, `{"ts":"0001-01-01T00:00:00Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -62135596800, Nanos: 0}}}, - - {"DoubleValue", Unmarshaler{}, `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}}, - {"FloatValue", Unmarshaler{}, `{"flt":1.2}`, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}}, - {"Int64Value", Unmarshaler{}, `{"i64":"-3"}`, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}}, - {"UInt64Value", Unmarshaler{}, `{"u64":"3"}`, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}}, - {"Int32Value", Unmarshaler{}, `{"i32":-4}`, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}}, - {"UInt32Value", Unmarshaler{}, `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}}, - {"BoolValue", Unmarshaler{}, `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}}, - {"StringValue", Unmarshaler{}, `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}}, - {"BytesValue", Unmarshaler{}, `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}}, + //{"map", `{"enumy":{"XIV":"ROMAN"}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, + {"map", `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, + {"oneof", `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{31000}}}, + {"oneof spec name", `{"country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, + {"oneof orig_name", `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, + {"orig_name input", `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + {"camelName input", `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + + {"Duration", `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}}, + {"Timestamp", `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}}, + + {"DoubleValue", `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}}, + {"FloatValue", `{"flt":1.2}`, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}}, + {"Int64Value", `{"i64":"-3"}`, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}}, + {"UInt64Value", `{"u64":"3"}`, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}}, + {"Int32Value", `{"i32":-4}`, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}}, + {"UInt32Value", `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}}, + {"BoolValue", `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}}, + {"StringValue", `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}}, + {"BytesValue", `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}}, // `null` is also a permissible value. Let's just test one. - {"null DoubleValue", Unmarshaler{}, `{"dbl":null}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{}}}, + {"null DoubleValue", `{"dbl":null}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{}}}, } func TestUnmarshaling(t *testing.T) { @@ -489,7 +477,7 @@ func TestUnmarshaling(t *testing.T) { // Make a new instance of the type of our expected object. p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) - err := tt.unmarshaler.Unmarshal(strings.NewReader(tt.json), p) + err := UnmarshalString(tt.json, p) if err != nil { t.Errorf("%s: %v", tt.desc, err) continue @@ -519,7 +507,7 @@ func TestUnmarshalNext(t *testing.T) { // Make a new instance of the type of our expected object. p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) - err := tt.unmarshaler.UnmarshalNext(dec, p) + err := UnmarshalNext(dec, p) if err != nil { t.Errorf("%s: %v", tt.desc, err) continue @@ -534,7 +522,7 @@ func TestUnmarshalNext(t *testing.T) { } p := &pb.Simple{} - err := new(Unmarshaler).UnmarshalNext(dec, p) + err := UnmarshalNext(dec, p) if err != io.EOF { t.Errorf("eof: got %v, expected io.EOF", err) } @@ -547,7 +535,6 @@ var unmarshalingShouldError = []struct { }{ {"a value", "666", new(pb.Simple)}, {"gibberish", "{adskja123;l23=-=", new(pb.Simple)}, - {"unknown field", `{"unknown": "foo"}`, new(pb.Simple)}, {"unknown enum name", `{"hilarity":"DAVE"}`, new(proto3pb.Message)}, } diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go index cd6ff1cde..a5444a2de 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go @@ -72,16 +72,12 @@ func (*Simple3) ProtoMessage() {} func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } type Mappy struct { - Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"` - S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"` } func (m *Mappy) Reset() { *m = Mappy{} } @@ -131,34 +127,6 @@ func (m *Mappy) GetEnumy() map[string]Numeral { return nil } -func (m *Mappy) GetS32Booly() map[int32]bool { - if m != nil { - return m.S32Booly - } - return nil -} - -func (m *Mappy) GetS64Booly() map[int64]bool { - if m != nil { - return m.S64Booly - } - return nil -} - -func (m *Mappy) GetU32Booly() map[uint32]bool { - if m != nil { - return m.U32Booly - } - return nil -} - -func (m *Mappy) GetU64Booly() map[uint64]bool { - if m != nil { - return m.U64Booly - } - return nil -} - func init() { proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3") proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy") @@ -168,33 +136,28 @@ func init() { func init() { proto.RegisterFile("more_test_objects.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 444 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x94, 0xc1, 0x6b, 0xdb, 0x30, - 0x14, 0x87, 0xe7, 0xa4, 0x4e, 0xec, 0x17, 0xba, 0x19, 0x31, 0x98, 0x58, 0x2f, 0xa1, 0x30, 0x08, - 0x83, 0xf9, 0x90, 0x8c, 0xad, 0x6c, 0xa7, 0x74, 0xf4, 0x50, 0x46, 0x1d, 0x70, 0x09, 0x3b, 0x96, - 0x78, 0x13, 0x65, 0x9e, 0x6d, 0x19, 0xdb, 0x1a, 0xe8, 0x8f, 0x1f, 0x8c, 0x27, 0xcb, 0xb5, 0x6c, - 0x14, 0xd2, 0x9b, 0xcc, 0xef, 0xfb, 0xf2, 0x9e, 0xf4, 0x1e, 0x81, 0x37, 0x39, 0xaf, 0xd8, 0x43, - 0xc3, 0xea, 0xe6, 0x81, 0x27, 0x29, 0xfb, 0xd9, 0xd4, 0x61, 0x59, 0xf1, 0x86, 0x93, 0x59, 0x5a, - 0xf3, 0xa2, 0x4c, 0x2e, 0x2f, 0x60, 0x7e, 0xff, 0x3b, 0x2f, 0x33, 0xb6, 0x21, 0x01, 0x4c, 0x7f, - 0x89, 0x84, 0x3a, 0x4b, 0x67, 0xe5, 0xc4, 0x78, 0xbc, 0xfc, 0xe7, 0x81, 0x7b, 0x77, 0x28, 0x4b, - 0x49, 0x42, 0x70, 0x0b, 0x91, 0xe7, 0x92, 0x3a, 0xcb, 0xe9, 0x6a, 0xb1, 0xa6, 0x61, 0xab, 0x87, - 0x2a, 0x0d, 0x23, 0x8c, 0x6e, 0x8a, 0xa6, 0x92, 0x71, 0x8b, 0x21, 0x5f, 0x37, 0x55, 0x25, 0xe9, - 0xc4, 0xc6, 0xdf, 0x63, 0xa4, 0x79, 0x85, 0x21, 0xcf, 0x93, 0x34, 0x95, 0x74, 0x6a, 0xe3, 0x77, - 0x18, 0x69, 0x5e, 0x61, 0xc8, 0x27, 0xe2, 0xf1, 0x51, 0xd2, 0x33, 0x1b, 0x7f, 0x8d, 0x91, 0xe6, - 0x15, 0xa6, 0x78, 0xce, 0x33, 0x49, 0x5d, 0x2b, 0x8f, 0x51, 0xc7, 0xe3, 0x19, 0x79, 0x56, 0x88, - 0x5c, 0xd2, 0x99, 0x8d, 0xbf, 0xc1, 0x48, 0xf3, 0x0a, 0x23, 0x9f, 0xc1, 0xab, 0x37, 0xeb, 0xb6, - 0xc4, 0x5c, 0x29, 0x17, 0xa3, 0x2b, 0xeb, 0xb4, 0xb5, 0x9e, 0x60, 0x25, 0x7e, 0xfa, 0xd8, 0x8a, - 0x9e, 0x55, 0xd4, 0x69, 0x27, 0xea, 0x4f, 0x14, 0x45, 0x57, 0xd1, 0xb7, 0x89, 0xfb, 0x61, 0x45, - 0x61, 0x54, 0x14, 0x5d, 0x45, 0xb0, 0x8a, 0xc3, 0x8a, 0x1d, 0xfc, 0xf6, 0x0a, 0xa0, 0x1f, 0x34, - 0x6e, 0xcb, 0x1f, 0x26, 0xd5, 0xb6, 0x4c, 0x63, 0x3c, 0x92, 0xd7, 0xe0, 0xfe, 0x3d, 0x64, 0x82, - 0xd1, 0xc9, 0xd2, 0x59, 0xb9, 0x71, 0xfb, 0xf1, 0x65, 0x72, 0xe5, 0xa0, 0xd9, 0x8f, 0xdc, 0x34, - 0x7d, 0x8b, 0xe9, 0x9b, 0xe6, 0x2d, 0x40, 0x3f, 0x7c, 0xd3, 0x74, 0x5b, 0xf3, 0x9d, 0x69, 0x2e, - 0xd6, 0xaf, 0xba, 0x9b, 0xe8, 0x9d, 0x1e, 0x35, 0xd1, 0xef, 0xc5, 0xa9, 0xf6, 0xfd, 0xb1, 0xf9, - 0xf4, 0x20, 0xa6, 0xe9, 0x59, 0x4c, 0x6f, 0xd4, 0x7e, 0xbf, 0x2b, 0x96, 0x8b, 0x0f, 0xda, 0x7f, - 0xd9, 0xb7, 0x1f, 0x89, 0x9c, 0x55, 0x87, 0xcc, 0xfc, 0xa9, 0xaf, 0x70, 0x3e, 0xd8, 0x21, 0xcb, - 0x63, 0x1c, 0xef, 0x03, 0x65, 0x73, 0xaa, 0xa7, 0xae, 0x3f, 0x96, 0xf7, 0xc7, 0x2a, 0x9f, 0x3f, - 0x47, 0x3e, 0x56, 0xf9, 0xec, 0x84, 0xfc, 0xfe, 0x03, 0xcc, 0xf5, 0x4b, 0x90, 0x05, 0xcc, 0xf7, - 0xd1, 0xf7, 0x68, 0xf7, 0x23, 0x0a, 0x5e, 0x10, 0x80, 0xd9, 0x36, 0xde, 0x5e, 0xdf, 0x7e, 0x0b, - 0x1c, 0xe2, 0x83, 0x1b, 0xef, 0xee, 0xb6, 0x51, 0x30, 0x49, 0x66, 0xea, 0xaf, 0x6d, 0xf3, 0x3f, - 0x00, 0x00, 0xff, 0xff, 0xa2, 0x4b, 0xe1, 0x77, 0xf5, 0x04, 0x00, 0x00, + // 357 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x93, 0xcd, 0x4a, 0xf3, 0x40, + 0x18, 0x85, 0xbf, 0x34, 0xdf, 0xa4, 0xcd, 0x5b, 0xd0, 0x30, 0x08, 0x0e, 0xba, 0x91, 0x82, 0x50, + 0x04, 0xb3, 0x68, 0x37, 0xc5, 0x5d, 0x2b, 0x5d, 0x14, 0x69, 0x0a, 0x29, 0xe2, 0xb2, 0x34, 0x3a, + 0x14, 0x6b, 0xd2, 0x09, 0xf9, 0x11, 0xe6, 0x82, 0xbc, 0x4f, 0xf3, 0x4e, 0x52, 0x33, 0x96, 0x01, + 0x77, 0xd3, 0x9e, 0xe7, 0x09, 0x67, 0x4e, 0x08, 0x5c, 0x26, 0x22, 0xe3, 0x9b, 0x82, 0xe7, 0xc5, + 0x46, 0x44, 0x7b, 0xfe, 0x5a, 0xe4, 0x7e, 0x9a, 0x89, 0x42, 0x50, 0x67, 0x9f, 0x8b, 0x43, 0x1a, + 0x0d, 0xae, 0xa1, 0xbb, 0x7e, 0x4f, 0xd2, 0x98, 0x8f, 0xa9, 0x07, 0xf6, 0x5b, 0x19, 0x31, 0xeb, + 0xc6, 0x1a, 0x5a, 0x21, 0x1e, 0x07, 0x5f, 0x04, 0xc8, 0x72, 0x9b, 0xa6, 0x92, 0xfa, 0x40, 0x0e, + 0x65, 0x92, 0xc8, 0x2a, 0xb5, 0x87, 0xfd, 0x11, 0xf3, 0x6b, 0xdd, 0x57, 0xa9, 0x1f, 0x60, 0x34, + 0x3f, 0x14, 0x99, 0x0c, 0x6b, 0x0c, 0xf9, 0xbc, 0xc8, 0x32, 0xc9, 0x3a, 0x26, 0x7e, 0x8d, 0x51, + 0xc3, 0x2b, 0x0c, 0xf9, 0xaa, 0xdf, 0x5e, 0x32, 0xdb, 0xc4, 0xaf, 0x30, 0x6a, 0x78, 0x85, 0x21, + 0x1f, 0x95, 0xbb, 0x9d, 0x64, 0xff, 0x4d, 0xfc, 0x0c, 0xa3, 0x86, 0x57, 0x98, 0xe2, 0x85, 0x88, + 0x25, 0x23, 0x46, 0x1e, 0xa3, 0x23, 0x8f, 0x67, 0xe4, 0x79, 0x75, 0x13, 0xc9, 0x1c, 0x13, 0x3f, + 0xc7, 0xa8, 0xe1, 0x15, 0x76, 0x35, 0x01, 0x68, 0x47, 0xc0, 0x25, 0x3f, 0xb8, 0x54, 0x4b, 0xda, + 0x21, 0x1e, 0xe9, 0x05, 0x90, 0xcf, 0x6d, 0x5c, 0xf2, 0x6a, 0x0f, 0x6b, 0x48, 0xc2, 0xfa, 0xc7, + 0x43, 0x67, 0x62, 0xa1, 0xd9, 0xce, 0xa1, 0x9b, 0xae, 0xc1, 0x74, 0x75, 0x73, 0x01, 0xd0, 0x0e, + 0xa3, 0x9b, 0xa4, 0x36, 0x6f, 0x75, 0xb3, 0x3f, 0x3a, 0x3f, 0xde, 0xa1, 0x79, 0xdf, 0x27, 0x25, + 0xda, 0xcd, 0xfe, 0xaa, 0xef, 0x9e, 0x9a, 0x3f, 0xeb, 0xe9, 0x66, 0xcf, 0x60, 0xf6, 0x4e, 0xea, + 0xb7, 0x3b, 0x1a, 0x2e, 0xfe, 0xab, 0xfe, 0x59, 0x5b, 0xbf, 0xda, 0x99, 0x67, 0xdb, 0x58, 0x7b, + 0xd4, 0xdd, 0x3d, 0x74, 0x9b, 0x7f, 0x69, 0x1f, 0xba, 0xcf, 0xc1, 0x53, 0xb0, 0x7a, 0x09, 0xbc, + 0x7f, 0x14, 0xc0, 0x99, 0x86, 0xd3, 0xd9, 0xe2, 0xd1, 0xb3, 0xa8, 0x0b, 0x24, 0x5c, 0x2d, 0xa7, + 0x81, 0xd7, 0x89, 0x1c, 0xf5, 0x09, 0x8c, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x3d, 0x04, 0xff, + 0x62, 0x1d, 0x03, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto index 43b440e2d..511f021f9 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto @@ -50,8 +50,4 @@ message Mappy { map buggy = 4; map booly = 5; map enumy = 6; - map s32booly = 7; - map s64booly = 8; - map u32booly = 9; - map u64booly = 10; } diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go index 104a308d8..284f7a87f 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go @@ -339,7 +339,6 @@ type MsgWithOneof struct { // *MsgWithOneof_Title // *MsgWithOneof_Salary // *MsgWithOneof_Country - // *MsgWithOneof_HomeAddress Union isMsgWithOneof_Union `protobuf_oneof:"union"` XXX_unrecognized []byte `json:"-"` } @@ -362,14 +361,10 @@ type MsgWithOneof_Salary struct { type MsgWithOneof_Country struct { Country string `protobuf:"bytes,3,opt,name=Country,json=country,oneof"` } -type MsgWithOneof_HomeAddress struct { - HomeAddress string `protobuf:"bytes,4,opt,name=home_address,json=homeAddress,oneof"` -} -func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} -func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} -func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} -func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union { if m != nil { @@ -399,20 +394,12 @@ func (m *MsgWithOneof) GetCountry() string { return "" } -func (m *MsgWithOneof) GetHomeAddress() string { - if x, ok := m.GetUnion().(*MsgWithOneof_HomeAddress); ok { - return x.HomeAddress - } - return "" -} - // XXX_OneofFuncs is for the internal use of the proto package. func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{ (*MsgWithOneof_Title)(nil), (*MsgWithOneof_Salary)(nil), (*MsgWithOneof_Country)(nil), - (*MsgWithOneof_HomeAddress)(nil), } } @@ -429,9 +416,6 @@ func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { case *MsgWithOneof_Country: b.EncodeVarint(3<<3 | proto.WireBytes) b.EncodeStringBytes(x.Country) - case *MsgWithOneof_HomeAddress: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeStringBytes(x.HomeAddress) case nil: default: return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x) @@ -463,13 +447,6 @@ func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.B x, err := b.DecodeStringBytes() m.Union = &MsgWithOneof_Country{x} return true, err - case 4: // union.home_address - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &MsgWithOneof_HomeAddress{x} - return true, err default: return false, nil } @@ -490,10 +467,6 @@ func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Country))) n += len(x.Country) - case *MsgWithOneof_HomeAddress: - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.HomeAddress))) - n += len(x.HomeAddress) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -699,71 +672,68 @@ func init() { func init() { proto.RegisterFile("test_objects.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ - // 1055 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x95, 0x51, 0x73, 0xdb, 0x44, - 0x10, 0xc7, 0x23, 0xc9, 0x96, 0xec, 0x73, 0x12, 0xcc, 0x4d, 0x4a, 0x15, 0x13, 0x40, 0x63, 0x4a, - 0x11, 0x85, 0xba, 0x83, 0xe2, 0xf1, 0x30, 0x85, 0x97, 0xa4, 0x31, 0x94, 0x81, 0x94, 0x99, 0x4b, - 0x43, 0x1f, 0x3d, 0x72, 0x7c, 0x71, 0x55, 0x64, 0x9d, 0xe7, 0xee, 0x44, 0xea, 0x81, 0x87, 0x3c, - 0xf3, 0xcc, 0x57, 0x80, 0x8f, 0xc0, 0x27, 0xe2, 0x83, 0x74, 0x76, 0x4f, 0xb2, 0x12, 0x3b, 0x7e, - 0x8a, 0xf7, 0xf6, 0xbf, 0xff, 0x9c, 0x7e, 0xb7, 0x77, 0x4b, 0xa8, 0xe6, 0x4a, 0x8f, 0xc4, 0xf8, - 0x0d, 0xbf, 0xd0, 0xaa, 0x37, 0x97, 0x42, 0x0b, 0xea, 0xbe, 0x51, 0x22, 0x9b, 0x8f, 0x3b, 0xfb, - 0x53, 0x21, 0xa6, 0x29, 0x7f, 0x82, 0xab, 0xe3, 0xfc, 0xf2, 0x49, 0x9c, 0x2d, 0x8c, 0xa4, 0xf3, - 0xf1, 0x6a, 0x6a, 0x92, 0xcb, 0x58, 0x27, 0x22, 0x2b, 0xf2, 0x07, 0xab, 0x79, 0xa5, 0x65, 0x7e, - 0xa1, 0x8b, 0xec, 0x27, 0xab, 0x59, 0x9d, 0xcc, 0xb8, 0xd2, 0xf1, 0x6c, 0xbe, 0xc9, 0xfe, 0x4a, - 0xc6, 0xf3, 0x39, 0x97, 0xc5, 0x0e, 0xbb, 0xff, 0xd8, 0xc4, 0x3d, 0x4b, 0x66, 0xf3, 0x94, 0xd3, - 0x7b, 0xc4, 0x15, 0xa3, 0xb1, 0x10, 0xa9, 0x6f, 0x05, 0x56, 0xd8, 0x60, 0x75, 0x71, 0x2c, 0x44, - 0x4a, 0xef, 0x13, 0x4f, 0x8c, 0x92, 0x4c, 0x1f, 0x46, 0xbe, 0x1d, 0x58, 0x61, 0x9d, 0xb9, 0xe2, - 0x47, 0x88, 0x96, 0x89, 0x41, 0xdf, 0x77, 0x02, 0x2b, 0x74, 0x4c, 0x62, 0xd0, 0xa7, 0xfb, 0xa4, - 0x21, 0x46, 0xb9, 0x29, 0xa9, 0x05, 0x56, 0xb8, 0xc3, 0x3c, 0x71, 0x8e, 0x61, 0x95, 0x1a, 0xf4, - 0xfd, 0x7a, 0x60, 0x85, 0xb5, 0x22, 0x55, 0x56, 0x29, 0x53, 0xe5, 0x06, 0x56, 0xf8, 0x3e, 0xf3, - 0xc4, 0xd9, 0x8d, 0x2a, 0x65, 0xaa, 0xbc, 0xc0, 0x0a, 0x69, 0x91, 0x1a, 0xf4, 0xcd, 0x26, 0x2e, - 0x53, 0x11, 0x6b, 0xbf, 0x11, 0x58, 0xa1, 0xcd, 0x5c, 0xf1, 0x3d, 0x44, 0xa6, 0x66, 0x22, 0xf2, - 0x71, 0xca, 0xfd, 0x66, 0x60, 0x85, 0x16, 0xf3, 0xc4, 0x09, 0x86, 0x85, 0x9d, 0x96, 0x49, 0x36, - 0xf5, 0x49, 0x60, 0x85, 0x4d, 0xb0, 0xc3, 0xd0, 0xd8, 0x8d, 0x17, 0x9a, 0x2b, 0xbf, 0x15, 0x58, - 0xe1, 0x36, 0x73, 0xc5, 0x31, 0x44, 0xdd, 0x7f, 0x6d, 0xe2, 0x31, 0x3e, 0xe7, 0xb1, 0x56, 0x00, - 0x4a, 0x96, 0xa0, 0x1c, 0x00, 0x25, 0x4b, 0x50, 0x72, 0x09, 0xca, 0x01, 0x50, 0x72, 0x09, 0x4a, - 0x2e, 0x41, 0x39, 0x00, 0x4a, 0x2e, 0x41, 0xc9, 0x0a, 0x94, 0x03, 0xa0, 0x64, 0x05, 0x4a, 0x56, - 0xa0, 0x1c, 0x00, 0x25, 0x2b, 0x50, 0xb2, 0x02, 0xe5, 0x00, 0x28, 0x79, 0x76, 0xa3, 0x6a, 0x09, - 0xca, 0x01, 0x50, 0xb2, 0x02, 0x25, 0x97, 0xa0, 0x1c, 0x00, 0x25, 0x97, 0xa0, 0x64, 0x05, 0xca, - 0x01, 0x50, 0xb2, 0x02, 0x25, 0x2b, 0x50, 0x0e, 0x80, 0x92, 0x15, 0x28, 0xb9, 0x04, 0xe5, 0x00, - 0x28, 0x69, 0x40, 0xfd, 0x67, 0x13, 0xf7, 0x55, 0x32, 0x99, 0x72, 0x4d, 0x1f, 0x91, 0xfa, 0x85, - 0x48, 0x85, 0xc4, 0x7e, 0xda, 0x8d, 0xf6, 0x7a, 0xe6, 0x36, 0xf4, 0x4c, 0xba, 0xf7, 0x0c, 0x72, - 0xcc, 0x48, 0xe8, 0x63, 0xf0, 0x33, 0x6a, 0x80, 0xb7, 0x49, 0xed, 0x4a, 0xfc, 0x4b, 0x1f, 0x12, - 0x57, 0x61, 0xd7, 0xe2, 0x01, 0xb6, 0xa2, 0xdd, 0x52, 0x6d, 0x7a, 0x99, 0x15, 0x59, 0xfa, 0x85, - 0x01, 0x82, 0x4a, 0xd8, 0xe7, 0xba, 0x12, 0x00, 0x15, 0x52, 0x4f, 0x9a, 0x03, 0xf6, 0xf7, 0xd0, - 0xf3, 0xbd, 0x52, 0x59, 0x9c, 0x3b, 0x2b, 0xf3, 0xf4, 0x2b, 0xd2, 0x94, 0xa3, 0x52, 0x7c, 0x0f, - 0x6d, 0xd7, 0xc4, 0x0d, 0x59, 0xfc, 0xea, 0x7e, 0x46, 0xea, 0x66, 0xd3, 0x1e, 0x71, 0xd8, 0xf0, - 0xa4, 0xbd, 0x45, 0x9b, 0xa4, 0xfe, 0x03, 0x1b, 0x0e, 0x5f, 0xb4, 0x2d, 0xda, 0x20, 0xb5, 0xe3, - 0x9f, 0xcf, 0x87, 0x6d, 0xbb, 0xfb, 0xb7, 0x4d, 0x6a, 0xa7, 0xf1, 0x5c, 0xd1, 0x6f, 0x49, 0x6b, - 0x66, 0xda, 0x05, 0xd8, 0x63, 0x8f, 0xb5, 0xa2, 0x0f, 0x4b, 0x7f, 0x90, 0xf4, 0x4e, 0xb1, 0x7f, - 0xce, 0xb4, 0x1c, 0x66, 0x5a, 0x2e, 0x58, 0x73, 0x56, 0xc6, 0xf4, 0x88, 0xec, 0xcc, 0xb0, 0x37, - 0xcb, 0xaf, 0xb6, 0xb1, 0xfc, 0xa3, 0xdb, 0xe5, 0xd0, 0xaf, 0xe6, 0xb3, 0x8d, 0x41, 0x6b, 0x56, - 0xad, 0x74, 0xbe, 0x23, 0xbb, 0xb7, 0xfd, 0x69, 0x9b, 0x38, 0xbf, 0xf1, 0x05, 0x1e, 0xa3, 0xc3, - 0xe0, 0x27, 0xdd, 0x23, 0xf5, 0xdf, 0xe3, 0x34, 0xe7, 0xf8, 0x24, 0x34, 0x99, 0x09, 0x9e, 0xda, - 0xdf, 0x58, 0x9d, 0x17, 0xa4, 0xbd, 0x6a, 0x7f, 0xb3, 0xbe, 0x61, 0xea, 0x1f, 0xdc, 0xac, 0x5f, - 0x3f, 0x94, 0xca, 0xaf, 0xfb, 0x97, 0x45, 0xb6, 0x4f, 0xd5, 0xf4, 0x55, 0xa2, 0x5f, 0xff, 0x92, - 0x71, 0x71, 0x49, 0x3f, 0x20, 0x75, 0x9d, 0xe8, 0x94, 0xa3, 0x5d, 0xf3, 0xf9, 0x16, 0x33, 0x21, - 0xf5, 0x89, 0xab, 0xe2, 0x34, 0x96, 0x0b, 0xf4, 0x74, 0x9e, 0x6f, 0xb1, 0x22, 0xa6, 0x1d, 0xe2, - 0x3d, 0x13, 0x39, 0xec, 0x04, 0x1f, 0x2a, 0xa8, 0xf1, 0x2e, 0xcc, 0x02, 0xfd, 0x94, 0x6c, 0xbf, - 0x16, 0x33, 0x3e, 0x8a, 0x27, 0x13, 0xc9, 0x95, 0xc2, 0xf7, 0x0a, 0x04, 0x2d, 0x58, 0x3d, 0x32, - 0x8b, 0xc7, 0x1e, 0xa9, 0xe7, 0x59, 0x22, 0xb2, 0xee, 0x43, 0x52, 0x63, 0x3c, 0x4e, 0xab, 0xcf, - 0xb7, 0xf0, 0x65, 0x31, 0xc1, 0xa3, 0x46, 0x63, 0xd2, 0xbe, 0xbe, 0xbe, 0xbe, 0xb6, 0xbb, 0x57, - 0xf0, 0x1f, 0xe1, 0x4b, 0xde, 0xd2, 0x03, 0xd2, 0x4c, 0x66, 0xf1, 0x34, 0xc9, 0x60, 0x67, 0x46, - 0x5e, 0x2d, 0x54, 0x25, 0xd1, 0x09, 0xd9, 0x95, 0x3c, 0x4e, 0x47, 0xfc, 0xad, 0xe6, 0x99, 0x4a, - 0x44, 0x46, 0xb7, 0xab, 0x96, 0x8a, 0x53, 0xff, 0x8f, 0xdb, 0x3d, 0x59, 0xd8, 0xb3, 0x1d, 0x28, - 0x1a, 0x96, 0x35, 0xdd, 0xff, 0x6b, 0x84, 0xfc, 0x94, 0x89, 0xab, 0xec, 0xe5, 0x62, 0xce, 0x15, - 0x7d, 0x40, 0xec, 0x38, 0xf3, 0x77, 0xb1, 0x74, 0xaf, 0x67, 0x46, 0x41, 0xaf, 0x1c, 0x05, 0xbd, - 0xa3, 0x6c, 0xc1, 0xec, 0x38, 0xa3, 0x5f, 0x12, 0x67, 0x92, 0x9b, 0x5b, 0xda, 0x8a, 0xf6, 0xd7, - 0x64, 0x27, 0xc5, 0x40, 0x62, 0xa0, 0xa2, 0x9f, 0x13, 0x5b, 0x69, 0x7f, 0x1b, 0xb5, 0xf7, 0xd7, - 0xb4, 0x67, 0x38, 0x9c, 0x98, 0xad, 0xe0, 0xf6, 0xdb, 0x5a, 0x15, 0xe7, 0xdb, 0x59, 0x13, 0xbe, - 0x2c, 0xe7, 0x14, 0xb3, 0xb5, 0xa2, 0x3d, 0xe2, 0x4c, 0xc6, 0x29, 0x9e, 0x4e, 0x2b, 0x3a, 0x58, - 0xdf, 0x01, 0x3e, 0x47, 0xbf, 0x02, 0x64, 0x06, 0x42, 0xfa, 0x98, 0x38, 0x97, 0xa9, 0xc6, 0xc3, - 0x82, 0xab, 0xb1, 0xaa, 0xc7, 0x87, 0xad, 0x90, 0x5f, 0xa6, 0x1a, 0xe4, 0x49, 0x31, 0x70, 0xee, - 0x92, 0x63, 0xb3, 0x17, 0xf2, 0x64, 0xd0, 0x87, 0xdd, 0xe4, 0x83, 0x3e, 0x0e, 0xa1, 0xbb, 0x76, - 0x73, 0x7e, 0x53, 0x9f, 0x0f, 0xfa, 0x68, 0x7f, 0x18, 0xe1, 0x64, 0xda, 0x60, 0x7f, 0x18, 0x95, - 0xf6, 0x87, 0x11, 0xda, 0x1f, 0x46, 0x38, 0xae, 0x36, 0xd9, 0x2f, 0xf5, 0x39, 0xea, 0x6b, 0x38, - 0x6c, 0x9a, 0x1b, 0x50, 0xc2, 0x6d, 0x33, 0x72, 0xd4, 0x81, 0x3f, 0xbc, 0x1b, 0x64, 0x83, 0xbf, - 0x79, 0xc0, 0x0b, 0x7f, 0xa5, 0x25, 0xfd, 0x9a, 0xd4, 0xab, 0x89, 0x77, 0xd7, 0x07, 0xe0, 0xc3, - 0x6e, 0x0a, 0x8c, 0xf2, 0x69, 0x40, 0x6a, 0x59, 0x3c, 0xe3, 0x2b, 0x2d, 0xfa, 0x27, 0xbe, 0x05, - 0x98, 0x79, 0x17, 0x00, 0x00, 0xff, 0xff, 0xda, 0x8b, 0x4a, 0x7a, 0x0e, 0x09, 0x00, 0x00, + // 1006 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x55, 0xdd, 0x72, 0xdb, 0x44, + 0x14, 0xae, 0xb5, 0x96, 0x65, 0xaf, 0x53, 0x63, 0x76, 0x52, 0xaa, 0x98, 0x00, 0x1d, 0x0f, 0x14, + 0x28, 0xe0, 0x0e, 0x6e, 0xa7, 0xc3, 0x14, 0x6e, 0x9a, 0xc6, 0xfc, 0x0c, 0xa4, 0xcc, 0x6c, 0x1a, + 0x7a, 0xe9, 0x91, 0x13, 0xc5, 0xa8, 0xc8, 0x5a, 0xcf, 0x6a, 0x45, 0xea, 0x81, 0x0b, 0x1e, 0x82, + 0x57, 0x80, 0x47, 0xe0, 0x89, 0x78, 0x10, 0xce, 0x39, 0x2b, 0x69, 0x1d, 0xbb, 0xa6, 0x37, 0xcd, + 0xd1, 0xf7, 0xe3, 0xa3, 0x6f, 0x8f, 0xce, 0x72, 0x61, 0xe2, 0xdc, 0x4c, 0xd5, 0xec, 0x65, 0x7c, + 0x6e, 0xf2, 0xd1, 0x52, 0x2b, 0xa3, 0x44, 0xeb, 0x65, 0xae, 0xb2, 0xe5, 0x6c, 0x70, 0x30, 0x57, + 0x6a, 0x9e, 0xc6, 0xf7, 0xe9, 0xe9, 0xac, 0xb8, 0xbc, 0x1f, 0x65, 0x2b, 0x4b, 0x19, 0xbc, 0xbb, + 0x09, 0x5d, 0x14, 0x3a, 0x32, 0x89, 0xca, 0x4a, 0xfc, 0x70, 0x13, 0xcf, 0x8d, 0x2e, 0xce, 0x4d, + 0x89, 0xbe, 0xb7, 0x89, 0x9a, 0x64, 0x01, 0x6d, 0x44, 0x8b, 0xe5, 0x2e, 0xfb, 0x2b, 0x1d, 0x2d, + 0x97, 0xb1, 0x2e, 0x3b, 0x1c, 0xfe, 0xe5, 0xf1, 0xd6, 0x69, 0xb2, 0x58, 0xa6, 0xb1, 0xb8, 0xc5, + 0x5b, 0x6a, 0x3a, 0x53, 0x2a, 0x0d, 0x1b, 0x77, 0x1a, 0x1f, 0xb5, 0xa5, 0xaf, 0x8e, 0xa0, 0x10, + 0xb7, 0x79, 0xa0, 0xa6, 0x49, 0x66, 0x1e, 0x8c, 0x43, 0x0f, 0x9e, 0xfb, 0xb2, 0xa5, 0xbe, 0xc3, + 0xaa, 0x06, 0x1e, 0x3d, 0x0c, 0x19, 0x00, 0xcc, 0x02, 0x8f, 0x1e, 0x8a, 0x03, 0xde, 0x56, 0xd3, + 0xc2, 0x4a, 0x9a, 0x80, 0xdc, 0x94, 0x81, 0x3a, 0xa3, 0xd2, 0x41, 0x20, 0xf2, 0x01, 0x6a, 0x96, + 0x50, 0xa5, 0xca, 0xad, 0xaa, 0x05, 0xd0, 0x9b, 0x00, 0x9d, 0xae, 0xa9, 0x72, 0xab, 0x0a, 0x00, + 0x12, 0x25, 0x04, 0x2a, 0x6a, 0xe2, 0x32, 0x55, 0x91, 0x09, 0xdb, 0x80, 0x78, 0xd0, 0xc4, 0xd7, + 0x58, 0x59, 0xcd, 0x85, 0x2a, 0x66, 0x69, 0x1c, 0x76, 0x00, 0x69, 0x80, 0xe6, 0x98, 0xca, 0xd2, + 0xce, 0xe8, 0x24, 0x9b, 0x87, 0x1c, 0xa0, 0x0e, 0xda, 0x51, 0x69, 0xed, 0x66, 0x2b, 0x38, 0xca, + 0xb0, 0x0b, 0xc8, 0x1e, 0xd8, 0x1d, 0x61, 0x35, 0xfc, 0xdb, 0xe3, 0x81, 0x8c, 0x97, 0x71, 0x64, + 0x72, 0x0c, 0x4a, 0x57, 0x41, 0x31, 0x0c, 0x4a, 0x57, 0x41, 0xe9, 0x3a, 0x28, 0x86, 0x41, 0xe9, + 0x3a, 0x28, 0x5d, 0x07, 0xc5, 0x30, 0x28, 0x5d, 0x07, 0xa5, 0x5d, 0x50, 0x0c, 0x83, 0xd2, 0x2e, + 0x28, 0xed, 0x82, 0x62, 0x18, 0x94, 0x76, 0x41, 0x69, 0x17, 0x14, 0xc3, 0xa0, 0xf4, 0xe9, 0x9a, + 0xaa, 0x0e, 0x8a, 0x61, 0x50, 0xda, 0x05, 0xa5, 0xeb, 0xa0, 0x18, 0x06, 0xa5, 0xeb, 0xa0, 0xb4, + 0x0b, 0x8a, 0x61, 0x50, 0xda, 0x05, 0xa5, 0x5d, 0x50, 0x0c, 0x83, 0xd2, 0x2e, 0x28, 0x5d, 0x07, + 0xc5, 0x30, 0x28, 0x6d, 0x83, 0xfa, 0x07, 0x06, 0xea, 0x45, 0x72, 0x31, 0x8f, 0x8d, 0xb8, 0xc7, + 0xfd, 0x73, 0x95, 0x2a, 0x4d, 0xf3, 0xd4, 0x1b, 0xef, 0x8f, 0xec, 0xd7, 0x30, 0xb2, 0xf0, 0xe8, + 0x29, 0x62, 0xd2, 0x52, 0xc4, 0x67, 0xe8, 0x67, 0xd9, 0x18, 0xde, 0x2e, 0x76, 0x4b, 0xd3, 0xff, + 0xe2, 0x2e, 0x6f, 0xe5, 0x34, 0xb5, 0x74, 0x80, 0xdd, 0x71, 0xaf, 0x62, 0xdb, 0x59, 0x96, 0x25, + 0x2a, 0x3e, 0xb6, 0x81, 0x10, 0x13, 0xfb, 0xdc, 0x66, 0x62, 0x40, 0x25, 0x35, 0xd0, 0xf6, 0x80, + 0xc3, 0x7d, 0xf2, 0x7c, 0xa3, 0x62, 0x96, 0xe7, 0x2e, 0x2b, 0x5c, 0x7c, 0xca, 0x3b, 0x7a, 0x5a, + 0x91, 0x6f, 0x91, 0xed, 0x16, 0xb9, 0xad, 0xcb, 0xbf, 0x86, 0x1f, 0x70, 0xdf, 0x36, 0x1d, 0x70, + 0x26, 0x27, 0xc7, 0xfd, 0x1b, 0xa2, 0xc3, 0xfd, 0x6f, 0xe4, 0x64, 0xf2, 0xac, 0xdf, 0x10, 0x6d, + 0xde, 0x3c, 0xfa, 0xe1, 0x6c, 0xd2, 0xf7, 0x86, 0x7f, 0x7a, 0xbc, 0x79, 0x12, 0x2d, 0x73, 0xf1, + 0x25, 0xef, 0x2e, 0xec, 0xb8, 0x60, 0xf6, 0x34, 0x63, 0xdd, 0xf1, 0xdb, 0x95, 0x3f, 0x52, 0x46, + 0x27, 0x34, 0x3f, 0x70, 0x14, 0x93, 0xcc, 0xe8, 0x95, 0xec, 0x2c, 0xaa, 0x5a, 0x3c, 0xe1, 0x37, + 0x17, 0x34, 0x9b, 0xd5, 0x5b, 0x7b, 0x24, 0x7f, 0xe7, 0xba, 0x1c, 0xe7, 0xd5, 0xbe, 0xb6, 0x35, + 0xe8, 0x2e, 0xdc, 0x93, 0xc1, 0x57, 0xbc, 0x77, 0xdd, 0x5f, 0xf4, 0x39, 0xfb, 0x25, 0x5e, 0xd1, + 0x31, 0x32, 0x89, 0x7f, 0x8a, 0x7d, 0xee, 0xff, 0x1a, 0xa5, 0x45, 0x4c, 0x2b, 0xa1, 0x23, 0x6d, + 0xf1, 0xd8, 0xfb, 0xa2, 0x31, 0x78, 0xc6, 0xfb, 0x9b, 0xf6, 0xeb, 0xfa, 0xb6, 0xd5, 0xbf, 0xbf, + 0xae, 0xdf, 0x3e, 0x14, 0xe7, 0x37, 0x8c, 0xf9, 0xde, 0x49, 0x3e, 0x7f, 0x91, 0x98, 0x9f, 0x7f, + 0xcc, 0x62, 0x75, 0x29, 0xde, 0xe2, 0xbe, 0x49, 0x0c, 0xbc, 0x18, 0xba, 0x75, 0xbe, 0xbd, 0x21, + 0x6d, 0x29, 0x42, 0x98, 0x88, 0x28, 0x8d, 0xf4, 0x8a, 0x2c, 0x19, 0x00, 0x65, 0x2d, 0x06, 0x3c, + 0x78, 0xaa, 0x0a, 0x6c, 0x84, 0xf6, 0x14, 0x6a, 0x82, 0x73, 0xfb, 0xe0, 0x28, 0xe0, 0x7e, 0x91, + 0xc1, 0xb2, 0x1d, 0xde, 0xe5, 0x4d, 0x19, 0x47, 0xa9, 0x7b, 0xb1, 0x06, 0xed, 0x0c, 0x5b, 0xdc, + 0x6b, 0xb7, 0x2f, 0xfa, 0x7f, 0xc0, 0x3f, 0x6f, 0x78, 0x85, 0x66, 0xd8, 0xe3, 0x2b, 0x71, 0xc8, + 0x3b, 0xc9, 0x22, 0x9a, 0x27, 0x19, 0xfe, 0xa8, 0xa5, 0xbb, 0x07, 0x4e, 0x32, 0x3e, 0xe6, 0x3d, + 0x0d, 0xd6, 0xd3, 0xf8, 0x95, 0x89, 0xb3, 0x1c, 0x7e, 0x4c, 0xec, 0xb9, 0x61, 0x89, 0xd2, 0xf0, + 0xb7, 0xeb, 0xd3, 0x56, 0xda, 0xcb, 0x9b, 0x28, 0x9a, 0x54, 0x9a, 0xe1, 0xbf, 0x4d, 0xce, 0xbf, + 0xcf, 0xd4, 0x55, 0xf6, 0x7c, 0xb5, 0x8c, 0x73, 0x08, 0xd0, 0x8b, 0xb2, 0xb0, 0x47, 0xd2, 0xfd, + 0x91, 0x5d, 0xf2, 0xa3, 0x6a, 0xc9, 0x8f, 0x9e, 0x64, 0x2b, 0x09, 0xb8, 0xf8, 0x84, 0x33, 0xb8, + 0x4e, 0xa8, 0xb9, 0xee, 0xf8, 0x60, 0x8b, 0x76, 0x5c, 0x5e, 0x35, 0x12, 0x59, 0xe2, 0x43, 0xee, + 0xe5, 0x26, 0xdc, 0x23, 0xee, 0xed, 0x2d, 0xee, 0x29, 0x5d, 0x3b, 0x12, 0x28, 0xf0, 0x5d, 0x7b, + 0x30, 0xf7, 0xf6, 0xe4, 0x06, 0x5b, 0xc4, 0xe7, 0xd5, 0x0d, 0x24, 0x81, 0x25, 0x46, 0xd0, 0xc1, + 0x2c, 0xa5, 0xe0, 0xbb, 0xe3, 0xc3, 0xed, 0x0e, 0x68, 0xd1, 0xfc, 0x84, 0x21, 0x4b, 0x24, 0xc2, + 0x1e, 0x60, 0x97, 0xa9, 0xa1, 0x6b, 0x03, 0x87, 0x7e, 0x93, 0x4f, 0x2b, 0xab, 0xa4, 0x03, 0x0f, + 0xe9, 0x49, 0x79, 0x95, 0xbc, 0x8e, 0x4e, 0x63, 0x5c, 0xd2, 0x81, 0x87, 0xdd, 0x14, 0x40, 0x6f, + 0xed, 0xe8, 0xe6, 0x6c, 0x9d, 0x0f, 0x44, 0xb2, 0x87, 0x2d, 0x1b, 0xec, 0xb6, 0x7f, 0x30, 0xae, + 0xec, 0x61, 0xfd, 0xa2, 0x3d, 0xd0, 0xdb, 0xff, 0x63, 0x5f, 0xf3, 0x0b, 0xe2, 0x37, 0xe9, 0x1a, + 0xe9, 0xec, 0x88, 0x12, 0xbf, 0x23, 0x4b, 0x27, 0x1e, 0xfa, 0xe3, 0x46, 0xe0, 0x3b, 0xfc, 0xed, + 0x6a, 0x2e, 0xfd, 0x81, 0x28, 0x3e, 0xe7, 0xbe, 0xbb, 0xcb, 0x5e, 0xf7, 0x02, 0xb4, 0xb2, 0xad, + 0xc0, 0x32, 0x1f, 0xdf, 0xe1, 0xcd, 0x2c, 0x5a, 0xc4, 0x1b, 0x23, 0xfa, 0x3b, 0x7d, 0xe5, 0x84, + 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xca, 0xa2, 0x76, 0x34, 0xe8, 0x08, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto index d1934a074..911a9d53a 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto @@ -96,7 +96,6 @@ message MsgWithOneof { string title = 1; int64 salary = 2; string Country = 3; - string home_address = 4; } } diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go index 41451a407..fd4a94eaf 100644 --- a/vendor/github.com/golang/protobuf/proto/all_test.go +++ b/vendor/github.com/golang/protobuf/proto/all_test.go @@ -420,7 +420,7 @@ func TestMarshalerEncoding(t *testing.T) { name string m Message want []byte - errType reflect.Type + wantErr error }{ { name: "Marshaler that fails", @@ -428,11 +428,9 @@ func TestMarshalerEncoding(t *testing.T) { err: errors.New("some marshal err"), b: []byte{5, 6, 7}, }, - // Since the Marshal method returned bytes, they should be written to the - // buffer. (For efficiency, we assume that Marshal implementations are - // always correct w.r.t. RequiredNotSetError and output.) - want: []byte{5, 6, 7}, - errType: reflect.TypeOf(errors.New("some marshal err")), + // Since there's an error, nothing should be written to buffer. + want: nil, + wantErr: errors.New("some marshal err"), }, { name: "Marshaler that fails with RequiredNotSetError", @@ -448,37 +446,30 @@ func TestMarshalerEncoding(t *testing.T) { 10, 3, // for &msgWithFakeMarshaler 5, 6, 7, // for &fakeMarshaler }, - errType: reflect.TypeOf(&RequiredNotSetError{}), + wantErr: &RequiredNotSetError{}, }, { name: "Marshaler that succeeds", m: &fakeMarshaler{ b: []byte{0, 1, 2, 3, 4, 127, 255}, }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + wantErr: nil, }, } for _, test := range tests { b := NewBuffer(nil) err := b.Marshal(test.m) - if reflect.TypeOf(err) != test.errType { - t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) + if _, ok := err.(*RequiredNotSetError); ok { + // We're not in package proto, so we can only assert the type in this case. + err = &RequiredNotSetError{} + } + if !reflect.DeepEqual(test.wantErr, err) { + t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) } if !reflect.DeepEqual(test.want, b.Bytes()) { t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) } - if size := Size(test.m); size != len(b.Bytes()) { - t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) - } - - m, mErr := Marshal(test.m) - if !bytes.Equal(b.Bytes(), m) { - t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) - } - if !reflect.DeepEqual(err, mErr) { - t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", - test.name, fmt.Sprint(mErr), fmt.Sprint(err)) - } } } @@ -1311,7 +1302,7 @@ func TestEnum(t *testing.T) { // We don't care what the value actually is, just as long as it doesn't crash. func TestPrintingNilEnumFields(t *testing.T) { pb := new(GoEnum) - _ = fmt.Sprintf("%+v", pb) + fmt.Sprintf("%+v", pb) } // Verify that absent required fields cause Marshal/Unmarshal to return errors. @@ -1320,7 +1311,7 @@ func TestRequiredFieldEnforcement(t *testing.T) { _, err := Marshal(pb) if err == nil { t.Error("marshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { + } else if strings.Index(err.Error(), "Label") < 0 { t.Errorf("marshal: bad error type: %v", err) } @@ -1331,24 +1322,7 @@ func TestRequiredFieldEnforcement(t *testing.T) { err = Unmarshal(buf, pb) if err == nil { t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcementGroups(t *testing.T) { - pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} - if _, err := Marshal(pb); err == nil { - t.Error("marshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { - t.Errorf("marshal: bad error type: %v", err) - } - - buf := []byte{11, 12} - if err := Unmarshal(buf, pb); err == nil { - t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { + } else if strings.Index(err.Error(), "{Unknown}") < 0 { t.Errorf("unmarshal: bad error type: %v", err) } } diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go index 1a3c22ed4..83492c56a 100644 --- a/vendor/github.com/golang/protobuf/proto/any_test.go +++ b/vendor/github.com/golang/protobuf/proto/any_test.go @@ -239,7 +239,7 @@ func TestUnmarshalGolden(t *testing.T) { } } -func TestMarshalUnknownAny(t *testing.T) { +func TestMarsahlUnknownAny(t *testing.T) { m := &pb.Message{ Anything: &anypb.Any{ TypeUrl: "foo", @@ -260,41 +260,13 @@ func TestMarshalUnknownAny(t *testing.T) { func TestAmbiguousAny(t *testing.T) { pb := &anypb.Any{} err := proto.UnmarshalText(` + [type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" + > type_url: "ttt/proto3_proto.Nested" - value: "\n\x05Monty" `, pb) t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) if err != nil { t.Errorf("failed to parse ambiguous Any message: %v", err) } } - -func TestUnmarshalOverwriteAny(t *testing.T) { - pb := &anypb.Any{} - err := proto.UnmarshalText(` - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Monty" - > - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Rabbit of Caerbannog" - > - `, pb) - want := `line 7: Any message unpacked multiple times, or "type_url" already set` - if err.Error() != want { - t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) - } -} - -func TestUnmarshalAnyMixAndMatch(t *testing.T) { - pb := &anypb.Any{} - err := proto.UnmarshalText(` - value: "\n\x05Monty" - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Rabbit of Caerbannog" - > - `, pb) - want := `line 5: Any message unpacked multiple times, or "value" already set` - if err.Error() != want { - t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go index f607ff49e..76720f18b 100644 --- a/vendor/github.com/golang/protobuf/proto/clone_test.go +++ b/vendor/github.com/golang/protobuf/proto/clone_test.go @@ -195,9 +195,6 @@ var mergeTests = []struct { NameMapping: map[int32]string{6: "Nigel"}, MsgMapping: map[int64]*pb.FloatingPoint{ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - 0x4002: &pb.FloatingPoint{ - F: proto.Float64(2.0), - }, }, ByteMapping: map[bool][]byte{true: []byte("wowsa")}, }, @@ -206,12 +203,6 @@ var mergeTests = []struct { 6: "Bruce", // should be overwritten 7: "Andrew", }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4002: &pb.FloatingPoint{ - F: proto.Float64(3.0), - Exact: proto.Bool(true), - }, // the entire message should be overwritten - }, }, want: &pb.MessageWithMap{ NameMapping: map[int32]string{ @@ -220,9 +211,6 @@ var mergeTests = []struct { }, MsgMapping: map[int64]*pb.FloatingPoint{ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - 0x4002: &pb.FloatingPoint{ - F: proto.Float64(2.0), - }, }, ByteMapping: map[bool][]byte{true: []byte("wowsa")}, }, @@ -266,27 +254,6 @@ var mergeTests = []struct { Union: &pb.Communique_Name{"Bobby Tables"}, }, }, - { - src: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": &proto3pb.Nested{Cute: true}, // replace - "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert - }, - }, - dst: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced - "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep - }, - }, - want: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": &proto3pb.Nested{Cute: true}, - "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, - "kay_c": &proto3pb.Nested{Bunny: "bunny"}, - }, - }, - }, } func TestMerge(t *testing.T) { diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index aa207298f..07288a250 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -61,6 +61,7 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 @@ -77,7 +78,13 @@ func DecodeVarint(buf []byte) (x uint64, n int) { return 0, 0 } -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + i := p.index l := len(p.buf) @@ -100,107 +107,6 @@ func (p *Buffer) decodeVarintSlow() (x uint64, err error) { return } -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. @@ -434,8 +340,6 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { @@ -474,11 +378,6 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group wire := int(u & 0x7) if wire == WireEndGroup { if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } return nil // input is satisfied } return fmt.Errorf("proto: %s: wiretype end group for non-group", st) diff --git a/vendor/github.com/golang/protobuf/proto/decode_test.go b/vendor/github.com/golang/protobuf/proto/decode_test.go deleted file mode 100644 index b1f130449..000000000 --- a/vendor/github.com/golang/protobuf/proto/decode_test.go +++ /dev/null @@ -1,256 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "fmt" - "testing" - - "github.com/golang/protobuf/proto" - tpb "github.com/golang/protobuf/proto/proto3_proto" -) - -var ( - bytesBlackhole []byte - msgBlackhole = new(tpb.Message) -) - -// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and -// 2 bytes long). -func BenchmarkVarint32ArraySmall(b *testing.B) { - for i := uint(1); i <= 10; i++ { - dist := genInt32Dist([7]int{0, 3, 1}, 1< maxMarshalSize { @@ -302,7 +309,7 @@ func Size(pb Message) (n int) { } if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. + stats.Size++ } return @@ -1007,6 +1014,7 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) { if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() + n += len(p.tagcode) n += sizeRawBytes(data) continue } diff --git a/vendor/github.com/golang/protobuf/proto/encode_test.go b/vendor/github.com/golang/protobuf/proto/encode_test.go deleted file mode 100644 index 0b36a0e9f..000000000 --- a/vendor/github.com/golang/protobuf/proto/encode_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "strconv" - "testing" - - "github.com/golang/protobuf/proto" - tpb "github.com/golang/protobuf/proto/proto3_proto" - "github.com/golang/protobuf/ptypes" -) - -var ( - blackhole []byte -) - -// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the -// same. -func BenchmarkAny(b *testing.B) { - data := make([]byte, 1<<20) - quantum := 1 << 10 - for i := uint(0); i <= 10; i++ { - b.Run(strconv.Itoa(quantum<`), buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), @@ -635,17 +546,6 @@ func TestOneofParsing(t *testing.T) { if !Equal(m, want) { t.Errorf("\n got %v\nwant %v", m, want) } - - const inOverwrite = `name:"Shrek" number:42` - m = new(Communique) - testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'" - if err := UnmarshalText(inOverwrite, m); err == nil { - t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr) - } else if err.Error() != testErr { - t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v", - err.Error(), testErr) - } - } var benchInput string diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index 2e06cb3a9..9690d1dc7 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -21,7 +21,6 @@ It has these top-level messages: FileOptions MessageOptions FieldOptions - OneofOptions EnumOptions EnumValueOptions ServiceOptions @@ -690,9 +689,8 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } @@ -707,13 +705,6 @@ func (m *OneofDescriptorProto) GetName() string { return "" } -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - // Describes an enum type. type EnumDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -1328,33 +1319,6 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { return nil } -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -var extRange_OneofOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - type EnumOptions struct { // Set this option to true to allow mapping different tag names to the same // value. @@ -1373,7 +1337,7 @@ type EnumOptions struct { func (m *EnumOptions) Reset() { *m = EnumOptions{} } func (m *EnumOptions) String() string { return proto.CompactTextString(m) } func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } var extRange_EnumOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1421,7 +1385,7 @@ type EnumValueOptions struct { func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } var extRange_EnumValueOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1462,7 +1426,7 @@ type ServiceOptions struct { func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } var extRange_ServiceOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1503,7 +1467,7 @@ type MethodOptions struct { func (m *MethodOptions) Reset() { *m = MethodOptions{} } func (m *MethodOptions) String() string { return proto.CompactTextString(m) } func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } var extRange_MethodOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1551,7 +1515,7 @@ type UninterpretedOption struct { func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1617,7 +1581,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{17, 0} + return fileDescriptor0, []int{16, 0} } func (m *UninterpretedOption_NamePart) GetNamePart() string { @@ -1687,7 +1651,7 @@ type SourceCodeInfo struct { func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1783,7 +1747,7 @@ type SourceCodeInfo_Location struct { func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} } +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17, 0} } func (m *SourceCodeInfo_Location) GetPath() []int32 { if m != nil { @@ -1833,7 +1797,7 @@ type GeneratedCodeInfo struct { func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { if m != nil { @@ -1862,7 +1826,7 @@ func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_ func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{19, 0} + return fileDescriptor0, []int{18, 0} } func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -1908,7 +1872,6 @@ func init() { proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") @@ -1929,148 +1892,146 @@ func init() { func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 2287 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x73, 0xdb, 0xc6, - 0x15, 0x0f, 0xf8, 0x25, 0xf2, 0x91, 0xa2, 0x56, 0x2b, 0xc5, 0x86, 0xe5, 0x38, 0x96, 0x19, 0x3b, - 0x96, 0xed, 0x96, 0xce, 0xc8, 0x1f, 0x71, 0x94, 0x4e, 0x3a, 0x94, 0x08, 0x2b, 0xf4, 0x50, 0x22, - 0x0b, 0x4a, 0xad, 0x93, 0x1e, 0x30, 0x2b, 0x60, 0x49, 0xc1, 0x06, 0x17, 0x28, 0x00, 0xda, 0x56, - 0x4e, 0x9e, 0xe9, 0xa9, 0xc7, 0xde, 0x3a, 0x6d, 0xa7, 0xd3, 0xc9, 0x25, 0x33, 0xfd, 0x03, 0x7a, - 0xe8, 0xbd, 0xd7, 0xce, 0xf4, 0xde, 0x63, 0x67, 0xda, 0xff, 0xa0, 0xd7, 0xce, 0xee, 0x02, 0x20, - 0xf8, 0x15, 0xab, 0x99, 0x49, 0xd2, 0x93, 0xb8, 0xbf, 0xf7, 0x7b, 0x0f, 0x6f, 0xdf, 0x3e, 0xbc, - 0xf7, 0xb0, 0x82, 0xcd, 0x81, 0xeb, 0x0e, 0x1c, 0x7a, 0xd7, 0xf3, 0xdd, 0xd0, 0x3d, 0x19, 0xf5, - 0xef, 0x5a, 0x34, 0x30, 0x7d, 0xdb, 0x0b, 0x5d, 0xbf, 0x2e, 0x30, 0xbc, 0x22, 0x19, 0xf5, 0x98, - 0x51, 0x3b, 0x80, 0xd5, 0xc7, 0xb6, 0x43, 0x9b, 0x09, 0xb1, 0x47, 0x43, 0xfc, 0x08, 0x72, 0x7d, - 0xdb, 0xa1, 0xaa, 0xb2, 0x99, 0xdd, 0x2a, 0x6f, 0x5f, 0xaf, 0x4f, 0x29, 0xd5, 0x27, 0x35, 0xba, - 0x1c, 0xd6, 0x85, 0x46, 0xed, 0x9f, 0x39, 0x58, 0x9b, 0x23, 0xc5, 0x18, 0x72, 0x8c, 0x0c, 0xb9, - 0x45, 0x65, 0xab, 0xa4, 0x8b, 0xdf, 0x58, 0x85, 0x25, 0x8f, 0x98, 0xcf, 0xc9, 0x80, 0xaa, 0x19, - 0x01, 0xc7, 0x4b, 0xfc, 0x2e, 0x80, 0x45, 0x3d, 0xca, 0x2c, 0xca, 0xcc, 0x33, 0x35, 0xbb, 0x99, - 0xdd, 0x2a, 0xe9, 0x29, 0x04, 0xdf, 0x81, 0x55, 0x6f, 0x74, 0xe2, 0xd8, 0xa6, 0x91, 0xa2, 0xc1, - 0x66, 0x76, 0x2b, 0xaf, 0x23, 0x29, 0x68, 0x8e, 0xc9, 0x37, 0x61, 0xe5, 0x25, 0x25, 0xcf, 0xd3, - 0xd4, 0xb2, 0xa0, 0x56, 0x39, 0x9c, 0x22, 0xee, 0x41, 0x65, 0x48, 0x83, 0x80, 0x0c, 0xa8, 0x11, - 0x9e, 0x79, 0x54, 0xcd, 0x89, 0xdd, 0x6f, 0xce, 0xec, 0x7e, 0x7a, 0xe7, 0xe5, 0x48, 0xeb, 0xe8, - 0xcc, 0xa3, 0xb8, 0x01, 0x25, 0xca, 0x46, 0x43, 0x69, 0x21, 0xbf, 0x20, 0x7e, 0x1a, 0x1b, 0x0d, - 0xa7, 0xad, 0x14, 0xb9, 0x5a, 0x64, 0x62, 0x29, 0xa0, 0xfe, 0x0b, 0xdb, 0xa4, 0x6a, 0x41, 0x18, - 0xb8, 0x39, 0x63, 0xa0, 0x27, 0xe5, 0xd3, 0x36, 0x62, 0x3d, 0xbc, 0x07, 0x25, 0xfa, 0x2a, 0xa4, - 0x2c, 0xb0, 0x5d, 0xa6, 0x2e, 0x09, 0x23, 0x37, 0xe6, 0x9c, 0x22, 0x75, 0xac, 0x69, 0x13, 0x63, - 0x3d, 0xfc, 0x10, 0x96, 0x5c, 0x2f, 0xb4, 0x5d, 0x16, 0xa8, 0xc5, 0x4d, 0x65, 0xab, 0xbc, 0xfd, - 0xce, 0xdc, 0x44, 0xe8, 0x48, 0x8e, 0x1e, 0x93, 0x71, 0x0b, 0x50, 0xe0, 0x8e, 0x7c, 0x93, 0x1a, - 0xa6, 0x6b, 0x51, 0xc3, 0x66, 0x7d, 0x57, 0x2d, 0x09, 0x03, 0x57, 0x67, 0x37, 0x22, 0x88, 0x7b, - 0xae, 0x45, 0x5b, 0xac, 0xef, 0xea, 0xd5, 0x60, 0x62, 0x8d, 0x2f, 0x40, 0x21, 0x38, 0x63, 0x21, - 0x79, 0xa5, 0x56, 0x44, 0x86, 0x44, 0xab, 0xda, 0x7f, 0xf2, 0xb0, 0x72, 0x9e, 0x14, 0xfb, 0x18, - 0xf2, 0x7d, 0xbe, 0x4b, 0x35, 0xf3, 0xbf, 0xc4, 0x40, 0xea, 0x4c, 0x06, 0xb1, 0xf0, 0x0d, 0x83, - 0xd8, 0x80, 0x32, 0xa3, 0x41, 0x48, 0x2d, 0x99, 0x11, 0xd9, 0x73, 0xe6, 0x14, 0x48, 0xa5, 0xd9, - 0x94, 0xca, 0x7d, 0xa3, 0x94, 0x7a, 0x0a, 0x2b, 0x89, 0x4b, 0x86, 0x4f, 0xd8, 0x20, 0xce, 0xcd, - 0xbb, 0x6f, 0xf2, 0xa4, 0xae, 0xc5, 0x7a, 0x3a, 0x57, 0xd3, 0xab, 0x74, 0x62, 0x8d, 0x9b, 0x00, - 0x2e, 0xa3, 0x6e, 0xdf, 0xb0, 0xa8, 0xe9, 0xa8, 0xc5, 0x05, 0x51, 0xea, 0x70, 0xca, 0x4c, 0x94, - 0x5c, 0x89, 0x9a, 0x0e, 0xfe, 0x68, 0x9c, 0x6a, 0x4b, 0x0b, 0x32, 0xe5, 0x40, 0xbe, 0x64, 0x33, - 0xd9, 0x76, 0x0c, 0x55, 0x9f, 0xf2, 0xbc, 0xa7, 0x56, 0xb4, 0xb3, 0x92, 0x70, 0xa2, 0xfe, 0xc6, - 0x9d, 0xe9, 0x91, 0x9a, 0xdc, 0xd8, 0xb2, 0x9f, 0x5e, 0xe2, 0xf7, 0x20, 0x01, 0x0c, 0x91, 0x56, - 0x20, 0xaa, 0x50, 0x25, 0x06, 0x0f, 0xc9, 0x90, 0x6e, 0x3c, 0x82, 0xea, 0x64, 0x78, 0xf0, 0x3a, - 0xe4, 0x83, 0x90, 0xf8, 0xa1, 0xc8, 0xc2, 0xbc, 0x2e, 0x17, 0x18, 0x41, 0x96, 0x32, 0x4b, 0x54, - 0xb9, 0xbc, 0xce, 0x7f, 0x6e, 0x7c, 0x08, 0xcb, 0x13, 0x8f, 0x3f, 0xaf, 0x62, 0xed, 0x37, 0x05, - 0x58, 0x9f, 0x97, 0x73, 0x73, 0xd3, 0xff, 0x02, 0x14, 0xd8, 0x68, 0x78, 0x42, 0x7d, 0x35, 0x2b, - 0x2c, 0x44, 0x2b, 0xdc, 0x80, 0xbc, 0x43, 0x4e, 0xa8, 0xa3, 0xe6, 0x36, 0x95, 0xad, 0xea, 0xf6, - 0x9d, 0x73, 0x65, 0x75, 0xbd, 0xcd, 0x55, 0x74, 0xa9, 0x89, 0x3f, 0x81, 0x5c, 0x54, 0xe2, 0xb8, - 0x85, 0xdb, 0xe7, 0xb3, 0xc0, 0x73, 0x51, 0x17, 0x7a, 0xf8, 0x32, 0x94, 0xf8, 0x5f, 0x19, 0xdb, - 0x82, 0xf0, 0xb9, 0xc8, 0x01, 0x1e, 0x57, 0xbc, 0x01, 0x45, 0x91, 0x66, 0x16, 0x8d, 0x5b, 0x43, - 0xb2, 0xe6, 0x07, 0x63, 0xd1, 0x3e, 0x19, 0x39, 0xa1, 0xf1, 0x82, 0x38, 0x23, 0x2a, 0x12, 0xa6, - 0xa4, 0x57, 0x22, 0xf0, 0xa7, 0x1c, 0xc3, 0x57, 0xa1, 0x2c, 0xb3, 0xd2, 0x66, 0x16, 0x7d, 0x25, - 0xaa, 0x4f, 0x5e, 0x97, 0x89, 0xda, 0xe2, 0x08, 0x7f, 0xfc, 0xb3, 0xc0, 0x65, 0xf1, 0xd1, 0x8a, - 0x47, 0x70, 0x40, 0x3c, 0xfe, 0xc3, 0xe9, 0xc2, 0x77, 0x65, 0xfe, 0xf6, 0xa6, 0x73, 0xb1, 0xf6, - 0xe7, 0x0c, 0xe4, 0xc4, 0xfb, 0xb6, 0x02, 0xe5, 0xa3, 0xcf, 0xba, 0x9a, 0xd1, 0xec, 0x1c, 0xef, - 0xb6, 0x35, 0xa4, 0xe0, 0x2a, 0x80, 0x00, 0x1e, 0xb7, 0x3b, 0x8d, 0x23, 0x94, 0x49, 0xd6, 0xad, - 0xc3, 0xa3, 0x87, 0xf7, 0x51, 0x36, 0x51, 0x38, 0x96, 0x40, 0x2e, 0x4d, 0xb8, 0xb7, 0x8d, 0xf2, - 0x18, 0x41, 0x45, 0x1a, 0x68, 0x3d, 0xd5, 0x9a, 0x0f, 0xef, 0xa3, 0xc2, 0x24, 0x72, 0x6f, 0x1b, - 0x2d, 0xe1, 0x65, 0x28, 0x09, 0x64, 0xb7, 0xd3, 0x69, 0xa3, 0x62, 0x62, 0xb3, 0x77, 0xa4, 0xb7, - 0x0e, 0xf7, 0x51, 0x29, 0xb1, 0xb9, 0xaf, 0x77, 0x8e, 0xbb, 0x08, 0x12, 0x0b, 0x07, 0x5a, 0xaf, - 0xd7, 0xd8, 0xd7, 0x50, 0x39, 0x61, 0xec, 0x7e, 0x76, 0xa4, 0xf5, 0x50, 0x65, 0xc2, 0xad, 0x7b, - 0xdb, 0x68, 0x39, 0x79, 0x84, 0x76, 0x78, 0x7c, 0x80, 0xaa, 0x78, 0x15, 0x96, 0xe5, 0x23, 0x62, - 0x27, 0x56, 0xa6, 0xa0, 0x87, 0xf7, 0x11, 0x1a, 0x3b, 0x22, 0xad, 0xac, 0x4e, 0x00, 0x0f, 0xef, - 0x23, 0x5c, 0xdb, 0x83, 0xbc, 0xc8, 0x2e, 0x8c, 0xa1, 0xda, 0x6e, 0xec, 0x6a, 0x6d, 0xa3, 0xd3, - 0x3d, 0x6a, 0x75, 0x0e, 0x1b, 0x6d, 0xa4, 0x8c, 0x31, 0x5d, 0xfb, 0xc9, 0x71, 0x4b, 0xd7, 0x9a, - 0x28, 0x93, 0xc6, 0xba, 0x5a, 0xe3, 0x48, 0x6b, 0xa2, 0x6c, 0xcd, 0x84, 0xf5, 0x79, 0x75, 0x66, - 0xee, 0x9b, 0x91, 0x3a, 0xe2, 0xcc, 0x82, 0x23, 0x16, 0xb6, 0x66, 0x8e, 0xf8, 0x4b, 0x05, 0xd6, - 0xe6, 0xd4, 0xda, 0xb9, 0x0f, 0xf9, 0x31, 0xe4, 0x65, 0x8a, 0xca, 0xee, 0x73, 0x6b, 0x6e, 0xd1, - 0x16, 0x09, 0x3b, 0xd3, 0x81, 0x84, 0x5e, 0xba, 0x03, 0x67, 0x17, 0x74, 0x60, 0x6e, 0x62, 0xc6, - 0xc9, 0x5f, 0x2a, 0xa0, 0x2e, 0xb2, 0xfd, 0x86, 0x42, 0x91, 0x99, 0x28, 0x14, 0x1f, 0x4f, 0x3b, - 0x70, 0x6d, 0xf1, 0x1e, 0x66, 0xbc, 0xf8, 0x4a, 0x81, 0x0b, 0xf3, 0x07, 0x95, 0xb9, 0x3e, 0x7c, - 0x02, 0x85, 0x21, 0x0d, 0x4f, 0xdd, 0xb8, 0x59, 0xbf, 0x3f, 0xa7, 0x05, 0x70, 0xf1, 0x74, 0xac, - 0x22, 0xad, 0x74, 0x0f, 0xc9, 0x2e, 0x9a, 0x36, 0xa4, 0x37, 0x33, 0x9e, 0xfe, 0x2a, 0x03, 0x6f, - 0xcf, 0x35, 0x3e, 0xd7, 0xd1, 0x2b, 0x00, 0x36, 0xf3, 0x46, 0xa1, 0x6c, 0xc8, 0xb2, 0x3e, 0x95, - 0x04, 0x22, 0xde, 0x7d, 0x5e, 0x7b, 0x46, 0x61, 0x22, 0xcf, 0x0a, 0x39, 0x48, 0x48, 0x10, 0x1e, - 0x8d, 0x1d, 0xcd, 0x09, 0x47, 0xdf, 0x5d, 0xb0, 0xd3, 0x99, 0x5e, 0xf7, 0x01, 0x20, 0xd3, 0xb1, - 0x29, 0x0b, 0x8d, 0x20, 0xf4, 0x29, 0x19, 0xda, 0x6c, 0x20, 0x0a, 0x70, 0x71, 0x27, 0xdf, 0x27, - 0x4e, 0x40, 0xf5, 0x15, 0x29, 0xee, 0xc5, 0x52, 0xae, 0x21, 0xba, 0x8c, 0x9f, 0xd2, 0x28, 0x4c, - 0x68, 0x48, 0x71, 0xa2, 0x51, 0xfb, 0xf5, 0x12, 0x94, 0x53, 0x63, 0x1d, 0xbe, 0x06, 0x95, 0x67, - 0xe4, 0x05, 0x31, 0xe2, 0x51, 0x5d, 0x46, 0xa2, 0xcc, 0xb1, 0x6e, 0x34, 0xae, 0x7f, 0x00, 0xeb, - 0x82, 0xe2, 0x8e, 0x42, 0xea, 0x1b, 0xa6, 0x43, 0x82, 0x40, 0x04, 0xad, 0x28, 0xa8, 0x98, 0xcb, - 0x3a, 0x5c, 0xb4, 0x17, 0x4b, 0xf0, 0x03, 0x58, 0x13, 0x1a, 0xc3, 0x91, 0x13, 0xda, 0x9e, 0x43, - 0x0d, 0xfe, 0xf1, 0x10, 0x88, 0x42, 0x9c, 0x78, 0xb6, 0xca, 0x19, 0x07, 0x11, 0x81, 0x7b, 0x14, - 0xe0, 0x7d, 0xb8, 0x22, 0xd4, 0x06, 0x94, 0x51, 0x9f, 0x84, 0xd4, 0xa0, 0xbf, 0x18, 0x11, 0x27, - 0x30, 0x08, 0xb3, 0x8c, 0x53, 0x12, 0x9c, 0xaa, 0xeb, 0x69, 0x03, 0x97, 0x38, 0x77, 0x3f, 0xa2, - 0x6a, 0x82, 0xd9, 0x60, 0xd6, 0xa7, 0x24, 0x38, 0xc5, 0x3b, 0x70, 0x41, 0x18, 0x0a, 0x42, 0xdf, - 0x66, 0x03, 0xc3, 0x3c, 0xa5, 0xe6, 0x73, 0x63, 0x14, 0xf6, 0x1f, 0xa9, 0x97, 0xd3, 0x16, 0x84, - 0x93, 0x3d, 0xc1, 0xd9, 0xe3, 0x94, 0xe3, 0xb0, 0xff, 0x08, 0xf7, 0xa0, 0xc2, 0xcf, 0x63, 0x68, - 0x7f, 0x41, 0x8d, 0xbe, 0xeb, 0x8b, 0xe6, 0x52, 0x9d, 0xf3, 0x72, 0xa7, 0x82, 0x58, 0xef, 0x44, - 0x0a, 0x07, 0xae, 0x45, 0x77, 0xf2, 0xbd, 0xae, 0xa6, 0x35, 0xf5, 0x72, 0x6c, 0xe5, 0xb1, 0xeb, - 0xf3, 0x9c, 0x1a, 0xb8, 0x49, 0x8c, 0xcb, 0x32, 0xa7, 0x06, 0x6e, 0x1c, 0xe1, 0x07, 0xb0, 0x66, - 0x9a, 0x72, 0xdb, 0xb6, 0x69, 0x44, 0x53, 0x7e, 0xa0, 0xa2, 0x89, 0x78, 0x99, 0xe6, 0xbe, 0x24, - 0x44, 0x69, 0x1e, 0xe0, 0x8f, 0xe0, 0xed, 0x71, 0xbc, 0xd2, 0x8a, 0xab, 0x33, 0xbb, 0x9c, 0x56, - 0x7d, 0x00, 0x6b, 0xde, 0xd9, 0xac, 0x22, 0x9e, 0x78, 0xa2, 0x77, 0x36, 0xad, 0x76, 0x43, 0x7c, - 0xb9, 0xf9, 0xd4, 0x24, 0x21, 0xb5, 0xd4, 0x8b, 0x69, 0x76, 0x4a, 0x80, 0xef, 0x02, 0x32, 0x4d, - 0x83, 0x32, 0x72, 0xe2, 0x50, 0x83, 0xf8, 0x94, 0x91, 0x40, 0xbd, 0x9a, 0x26, 0x57, 0x4d, 0x53, - 0x13, 0xd2, 0x86, 0x10, 0xe2, 0xdb, 0xb0, 0xea, 0x9e, 0x3c, 0x33, 0x65, 0x72, 0x19, 0x9e, 0x4f, - 0xfb, 0xf6, 0x2b, 0xf5, 0xba, 0x08, 0xd3, 0x0a, 0x17, 0x88, 0xd4, 0xea, 0x0a, 0x18, 0xdf, 0x02, - 0x64, 0x06, 0xa7, 0xc4, 0xf7, 0x44, 0x77, 0x0f, 0x3c, 0x62, 0x52, 0xf5, 0x86, 0xa4, 0x4a, 0xfc, - 0x30, 0x86, 0xf1, 0x53, 0x58, 0x1f, 0x31, 0x9b, 0x85, 0xd4, 0xf7, 0x7c, 0xca, 0x87, 0x74, 0xf9, - 0xa6, 0xa9, 0xff, 0x5a, 0x5a, 0x30, 0x66, 0x1f, 0xa7, 0xd9, 0xf2, 0x74, 0xf5, 0xb5, 0xd1, 0x2c, - 0x58, 0xdb, 0x81, 0x4a, 0xfa, 0xd0, 0x71, 0x09, 0xe4, 0xb1, 0x23, 0x85, 0xf7, 0xd0, 0xbd, 0x4e, - 0x93, 0x77, 0xbf, 0xcf, 0x35, 0x94, 0xe1, 0x5d, 0xb8, 0xdd, 0x3a, 0xd2, 0x0c, 0xfd, 0xf8, 0xf0, - 0xa8, 0x75, 0xa0, 0xa1, 0xec, 0xed, 0x52, 0xf1, 0xdf, 0x4b, 0xe8, 0xf5, 0xeb, 0xd7, 0xaf, 0x33, - 0x4f, 0x72, 0xc5, 0xf7, 0xd1, 0xcd, 0xda, 0x5f, 0x33, 0x50, 0x9d, 0x9c, 0x7f, 0xf1, 0x8f, 0xe0, - 0x62, 0xfc, 0xb1, 0x1a, 0xd0, 0xd0, 0x78, 0x69, 0xfb, 0x22, 0x1b, 0x87, 0x44, 0x4e, 0x90, 0x49, - 0x20, 0xd7, 0x23, 0x56, 0x8f, 0x86, 0x3f, 0xb3, 0x7d, 0x9e, 0x6b, 0x43, 0x12, 0xe2, 0x36, 0x5c, - 0x65, 0xae, 0x11, 0x84, 0x84, 0x59, 0xc4, 0xb7, 0x8c, 0xf1, 0x35, 0x81, 0x41, 0x4c, 0x93, 0x06, - 0x81, 0x2b, 0x1b, 0x41, 0x62, 0xe5, 0x1d, 0xe6, 0xf6, 0x22, 0xf2, 0xb8, 0x42, 0x36, 0x22, 0xea, - 0xd4, 0xa1, 0x67, 0x17, 0x1d, 0xfa, 0x65, 0x28, 0x0d, 0x89, 0x67, 0x50, 0x16, 0xfa, 0x67, 0x62, - 0x6a, 0x2b, 0xea, 0xc5, 0x21, 0xf1, 0x34, 0xbe, 0xfe, 0xf6, 0x4e, 0x22, 0x15, 0xcd, 0xda, 0x3f, - 0xb2, 0x50, 0x49, 0x4f, 0x6e, 0x7c, 0x10, 0x36, 0x45, 0x95, 0x56, 0xc4, 0x4b, 0xfc, 0xde, 0xd7, - 0xce, 0x79, 0xf5, 0x3d, 0x5e, 0xbe, 0x77, 0x0a, 0x72, 0x9e, 0xd2, 0xa5, 0x26, 0x6f, 0x9d, 0xfc, - 0xb5, 0xa5, 0x72, 0x4a, 0x2f, 0xea, 0xd1, 0x0a, 0xef, 0x43, 0xe1, 0x59, 0x20, 0x6c, 0x17, 0x84, - 0xed, 0xeb, 0x5f, 0x6f, 0xfb, 0x49, 0x4f, 0x18, 0x2f, 0x3d, 0xe9, 0x19, 0x87, 0x1d, 0xfd, 0xa0, - 0xd1, 0xd6, 0x23, 0x75, 0x7c, 0x09, 0x72, 0x0e, 0xf9, 0xe2, 0x6c, 0xb2, 0xd0, 0x0b, 0xe8, 0xbc, - 0x81, 0xbf, 0x04, 0xb9, 0x97, 0x94, 0x3c, 0x9f, 0x2c, 0xaf, 0x02, 0xfa, 0x16, 0x5f, 0x80, 0xbb, - 0x90, 0x17, 0xf1, 0xc2, 0x00, 0x51, 0xc4, 0xd0, 0x5b, 0xb8, 0x08, 0xb9, 0xbd, 0x8e, 0xce, 0x5f, - 0x02, 0x04, 0x15, 0x89, 0x1a, 0xdd, 0x96, 0xb6, 0xa7, 0xa1, 0x4c, 0xed, 0x01, 0x14, 0x64, 0x10, - 0xf8, 0x0b, 0x92, 0x84, 0x01, 0xbd, 0x15, 0x2d, 0x23, 0x1b, 0x4a, 0x2c, 0x3d, 0x3e, 0xd8, 0xd5, - 0x74, 0x94, 0x49, 0x1f, 0x6f, 0x00, 0x95, 0xf4, 0xd0, 0xf6, 0xdd, 0xe4, 0xd4, 0x5f, 0x14, 0x28, - 0xa7, 0x86, 0x30, 0xde, 0xfe, 0x89, 0xe3, 0xb8, 0x2f, 0x0d, 0xe2, 0xd8, 0x24, 0x88, 0x92, 0x02, - 0x04, 0xd4, 0xe0, 0xc8, 0x79, 0x0f, 0xed, 0x3b, 0x71, 0xfe, 0x0f, 0x0a, 0xa0, 0xe9, 0x01, 0x6e, - 0xca, 0x41, 0xe5, 0x7b, 0x75, 0xf0, 0xf7, 0x0a, 0x54, 0x27, 0xa7, 0xb6, 0x29, 0xf7, 0xae, 0x7d, - 0xaf, 0xee, 0xfd, 0x4e, 0x81, 0xe5, 0x89, 0x59, 0xed, 0xff, 0xca, 0xbb, 0xdf, 0x66, 0x61, 0x6d, - 0x8e, 0x1e, 0x6e, 0x44, 0x43, 0xad, 0x9c, 0xb3, 0x7f, 0x78, 0x9e, 0x67, 0xd5, 0x79, 0xcf, 0xec, - 0x12, 0x3f, 0x8c, 0x66, 0xe0, 0x5b, 0x80, 0x6c, 0x8b, 0xb2, 0xd0, 0xee, 0xdb, 0xd4, 0x8f, 0x3e, - 0xc4, 0xe5, 0xa4, 0xbb, 0x32, 0xc6, 0xe5, 0xb7, 0xf8, 0x0f, 0x00, 0x7b, 0x6e, 0x60, 0x87, 0xf6, - 0x0b, 0x6a, 0xd8, 0x2c, 0xfe, 0x6a, 0xe7, 0x93, 0x6f, 0x4e, 0x47, 0xb1, 0xa4, 0xc5, 0xc2, 0x84, - 0xcd, 0xe8, 0x80, 0x4c, 0xb1, 0x79, 0xed, 0xcb, 0xea, 0x28, 0x96, 0x24, 0xec, 0x6b, 0x50, 0xb1, - 0xdc, 0x11, 0x1f, 0x22, 0x24, 0x8f, 0x97, 0x5a, 0x45, 0x2f, 0x4b, 0x2c, 0xa1, 0x44, 0x53, 0xde, - 0xf8, 0xba, 0xa0, 0xa2, 0x97, 0x25, 0x26, 0x29, 0x37, 0x61, 0x85, 0x0c, 0x06, 0x3e, 0x37, 0x1e, - 0x1b, 0x92, 0xa3, 0x6b, 0x35, 0x81, 0x05, 0x71, 0xe3, 0x09, 0x14, 0xe3, 0x38, 0xf0, 0x6e, 0xc6, - 0x23, 0x61, 0x78, 0xf2, 0xd2, 0x26, 0xb3, 0x55, 0xd2, 0x8b, 0x2c, 0x16, 0x5e, 0x83, 0x8a, 0x1d, - 0x18, 0xe3, 0xdb, 0xc3, 0xcc, 0x66, 0x66, 0xab, 0xa8, 0x97, 0xed, 0x20, 0xb9, 0x2e, 0xaa, 0x7d, - 0x95, 0x81, 0xea, 0xe4, 0xed, 0x27, 0x6e, 0x42, 0xd1, 0x71, 0x4d, 0x22, 0x12, 0x41, 0x5e, 0xbd, - 0x6f, 0xbd, 0xe1, 0xc2, 0xb4, 0xde, 0x8e, 0xf8, 0x7a, 0xa2, 0xb9, 0xf1, 0x37, 0x05, 0x8a, 0x31, - 0x8c, 0x2f, 0x40, 0xce, 0x23, 0xe1, 0xa9, 0x30, 0x97, 0xdf, 0xcd, 0x20, 0x45, 0x17, 0x6b, 0x8e, - 0x07, 0x1e, 0x61, 0x22, 0x05, 0x22, 0x9c, 0xaf, 0xf9, 0xb9, 0x3a, 0x94, 0x58, 0x62, 0x28, 0x76, - 0x87, 0x43, 0xca, 0xc2, 0x20, 0x3e, 0xd7, 0x08, 0xdf, 0x8b, 0x60, 0x7c, 0x07, 0x56, 0x43, 0x9f, - 0xd8, 0xce, 0x04, 0x37, 0x27, 0xb8, 0x28, 0x16, 0x24, 0xe4, 0x1d, 0xb8, 0x14, 0xdb, 0xb5, 0x68, - 0x48, 0xcc, 0x53, 0x6a, 0x8d, 0x95, 0x0a, 0xe2, 0x6a, 0xed, 0x62, 0x44, 0x68, 0x46, 0xf2, 0x58, - 0xb7, 0xf6, 0x77, 0x05, 0x56, 0xe3, 0x31, 0xde, 0x4a, 0x82, 0x75, 0x00, 0x40, 0x18, 0x73, 0xc3, - 0x74, 0xb8, 0x66, 0x53, 0x79, 0x46, 0xaf, 0xde, 0x48, 0x94, 0xf4, 0x94, 0x81, 0x8d, 0x21, 0xc0, - 0x58, 0xb2, 0x30, 0x6c, 0x57, 0xa1, 0x1c, 0x5d, 0x6d, 0x8b, 0xff, 0x8f, 0xc8, 0x6f, 0x3f, 0x90, - 0x10, 0x9f, 0xf7, 0xf1, 0x3a, 0xe4, 0x4f, 0xe8, 0xc0, 0x66, 0xd1, 0x85, 0x9b, 0x5c, 0xc4, 0xd7, - 0x78, 0xb9, 0xe4, 0x1a, 0x6f, 0xf7, 0xe7, 0xb0, 0x66, 0xba, 0xc3, 0x69, 0x77, 0x77, 0xd1, 0xd4, - 0xf7, 0x67, 0xf0, 0xa9, 0xf2, 0x39, 0x8c, 0xa7, 0xb3, 0x3f, 0x2a, 0xca, 0x97, 0x99, 0xec, 0x7e, - 0x77, 0xf7, 0x4f, 0x99, 0x8d, 0x7d, 0xa9, 0xda, 0x8d, 0x77, 0xaa, 0xd3, 0xbe, 0x43, 0x4d, 0xee, - 0xfd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x54, 0xc8, 0x7d, 0x07, 0x1a, 0x00, 0x00, + // 2247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x8f, 0xdb, 0xc6, + 0x15, 0xaf, 0xfe, 0xae, 0xf4, 0xa4, 0x95, 0xb8, 0xe3, 0x8d, 0x2d, 0x6f, 0xe2, 0xd8, 0x56, 0xec, + 0xd8, 0x71, 0x5a, 0x39, 0x70, 0x9b, 0xc4, 0xdd, 0x14, 0x29, 0xb4, 0x12, 0xbd, 0x91, 0x21, 0xad, + 0x54, 0x4a, 0x6a, 0x9d, 0x5c, 0x08, 0x2e, 0x35, 0xd2, 0xd2, 0xa6, 0x48, 0x95, 0xa4, 0x6c, 0x6f, + 0x4e, 0x05, 0x7a, 0xea, 0xb1, 0xb7, 0xa2, 0x2d, 0x7a, 0xc8, 0x25, 0x40, 0x3f, 0x40, 0x0f, 0xbd, + 0xf7, 0x5a, 0xa0, 0xf7, 0x1e, 0x0b, 0xb4, 0xdf, 0xa0, 0xd7, 0xbe, 0x99, 0x21, 0x29, 0x52, 0xa2, + 0xe2, 0x6d, 0x80, 0x34, 0xf5, 0xc5, 0x9a, 0x37, 0xbf, 0xf7, 0xf8, 0xe6, 0xcd, 0x6f, 0xde, 0x7b, + 0x33, 0x0b, 0x37, 0x66, 0xb6, 0x3d, 0x33, 0xe9, 0xfd, 0x85, 0x63, 0x7b, 0xf6, 0xe9, 0x72, 0x7a, + 0x7f, 0x42, 0x5d, 0xdd, 0x31, 0x16, 0x9e, 0xed, 0x34, 0xb8, 0x8c, 0x54, 0x05, 0xa2, 0x11, 0x20, + 0xea, 0x3d, 0xd8, 0x7b, 0x64, 0x98, 0xb4, 0x1d, 0x02, 0x87, 0xd4, 0x23, 0x0f, 0x21, 0x3b, 0x45, + 0x61, 0x2d, 0x75, 0x23, 0x73, 0xb7, 0xf4, 0xe0, 0x56, 0x63, 0x4d, 0xa9, 0x11, 0xd7, 0x18, 0x30, + 0xb1, 0xc2, 0x35, 0xea, 0xff, 0xc8, 0xc2, 0xa5, 0x84, 0x59, 0x42, 0x20, 0x6b, 0x69, 0x73, 0x66, + 0x31, 0x75, 0xb7, 0xa8, 0xf0, 0xdf, 0xa4, 0x06, 0x3b, 0x0b, 0x4d, 0x7f, 0xa6, 0xcd, 0x68, 0x2d, + 0xcd, 0xc5, 0xc1, 0x90, 0xbc, 0x09, 0x30, 0xa1, 0x0b, 0x6a, 0x4d, 0xa8, 0xa5, 0x9f, 0xd7, 0x32, + 0xe8, 0x45, 0x51, 0x89, 0x48, 0xc8, 0xbb, 0xb0, 0xb7, 0x58, 0x9e, 0x9a, 0x86, 0xae, 0x46, 0x60, + 0x80, 0xb0, 0x9c, 0x22, 0x89, 0x89, 0xf6, 0x0a, 0x7c, 0x07, 0xaa, 0x2f, 0xa8, 0xf6, 0x2c, 0x0a, + 0x2d, 0x71, 0x68, 0x85, 0x89, 0x23, 0xc0, 0x16, 0x94, 0xe7, 0xd4, 0x75, 0xd1, 0x01, 0xd5, 0x3b, + 0x5f, 0xd0, 0x5a, 0x96, 0xaf, 0xfe, 0xc6, 0xc6, 0xea, 0xd7, 0x57, 0x5e, 0xf2, 0xb5, 0x46, 0xa8, + 0x44, 0x9a, 0x50, 0xa4, 0xd6, 0x72, 0x2e, 0x2c, 0xe4, 0xb6, 0xc4, 0x4f, 0x46, 0xc4, 0xba, 0x95, + 0x02, 0x53, 0xf3, 0x4d, 0xec, 0xb8, 0xd4, 0x79, 0x6e, 0xe8, 0xb4, 0x96, 0xe7, 0x06, 0xee, 0x6c, + 0x18, 0x18, 0x8a, 0xf9, 0x75, 0x1b, 0x81, 0x1e, 0x2e, 0xa5, 0x48, 0x5f, 0x7a, 0xd4, 0x72, 0x0d, + 0xdb, 0xaa, 0xed, 0x70, 0x23, 0xb7, 0x13, 0x76, 0x91, 0x9a, 0x93, 0x75, 0x13, 0x2b, 0x3d, 0xf2, + 0x01, 0xec, 0xd8, 0x0b, 0x0f, 0x7f, 0xb9, 0xb5, 0x02, 0xee, 0x4f, 0xe9, 0xc1, 0x1b, 0x89, 0x44, + 0xe8, 0x0b, 0x8c, 0x12, 0x80, 0x49, 0x07, 0x24, 0xd7, 0x5e, 0x3a, 0x3a, 0x55, 0x75, 0x7b, 0x42, + 0x55, 0xc3, 0x9a, 0xda, 0xb5, 0x22, 0x37, 0x70, 0x7d, 0x73, 0x21, 0x1c, 0xd8, 0x42, 0x5c, 0x07, + 0x61, 0x4a, 0xc5, 0x8d, 0x8d, 0xc9, 0x65, 0xc8, 0xbb, 0xe7, 0x96, 0xa7, 0xbd, 0xac, 0x95, 0x39, + 0x43, 0xfc, 0x51, 0xfd, 0xdf, 0x39, 0xa8, 0x5e, 0x84, 0x62, 0x1f, 0x41, 0x6e, 0xca, 0x56, 0x89, + 0x04, 0xfb, 0x2f, 0x62, 0x20, 0x74, 0xe2, 0x41, 0xcc, 0x7f, 0xcd, 0x20, 0x36, 0xa1, 0x64, 0x51, + 0xd7, 0xa3, 0x13, 0xc1, 0x88, 0xcc, 0x05, 0x39, 0x05, 0x42, 0x69, 0x93, 0x52, 0xd9, 0xaf, 0x45, + 0xa9, 0x27, 0x50, 0x0d, 0x5d, 0x52, 0x1d, 0xcd, 0x9a, 0x05, 0xdc, 0xbc, 0xff, 0x2a, 0x4f, 0x1a, + 0x72, 0xa0, 0xa7, 0x30, 0x35, 0xa5, 0x42, 0x63, 0x63, 0xd2, 0x06, 0xb0, 0x2d, 0x6a, 0x4f, 0xf1, + 0x78, 0xe9, 0x26, 0xf2, 0x24, 0x39, 0x4a, 0x7d, 0x06, 0xd9, 0x88, 0x92, 0x2d, 0xa4, 0xba, 0x49, + 0x7e, 0xb8, 0xa2, 0xda, 0xce, 0x16, 0xa6, 0xf4, 0xc4, 0x21, 0xdb, 0x60, 0xdb, 0x18, 0x2a, 0x0e, + 0x65, 0xbc, 0xc7, 0x10, 0x8b, 0x95, 0x15, 0xb9, 0x13, 0x8d, 0x57, 0xae, 0x4c, 0xf1, 0xd5, 0xc4, + 0xc2, 0x76, 0x9d, 0xe8, 0x90, 0xbc, 0x05, 0xa1, 0x40, 0xe5, 0xb4, 0x02, 0x9e, 0x85, 0xca, 0x81, + 0xf0, 0x04, 0x65, 0x07, 0x0f, 0xa1, 0x12, 0x0f, 0x0f, 0xd9, 0x87, 0x9c, 0xeb, 0x69, 0x8e, 0xc7, + 0x59, 0x98, 0x53, 0xc4, 0x80, 0x48, 0x90, 0xc1, 0x24, 0xc3, 0xb3, 0x5c, 0x4e, 0x61, 0x3f, 0x0f, + 0x3e, 0x84, 0xdd, 0xd8, 0xe7, 0x2f, 0xaa, 0x58, 0xff, 0x4d, 0x1e, 0xf6, 0x93, 0x38, 0x97, 0x48, + 0x7f, 0x3c, 0x3e, 0xc8, 0x80, 0x53, 0xea, 0x20, 0xef, 0x98, 0x05, 0x7f, 0x84, 0x8c, 0xca, 0x99, + 0xda, 0x29, 0x35, 0x91, 0x4d, 0xa9, 0xbb, 0x95, 0x07, 0xef, 0x5e, 0x88, 0xd5, 0x8d, 0x2e, 0x53, + 0x51, 0x84, 0x26, 0xf9, 0x18, 0xb2, 0x7e, 0x8a, 0x63, 0x16, 0xee, 0x5d, 0xcc, 0x02, 0xe3, 0xa2, + 0xc2, 0xf5, 0xc8, 0xeb, 0x50, 0x64, 0xff, 0x8b, 0xd8, 0xe6, 0xb9, 0xcf, 0x05, 0x26, 0x60, 0x71, + 0x25, 0x07, 0x50, 0xe0, 0x34, 0x9b, 0xd0, 0xa0, 0x34, 0x84, 0x63, 0xb6, 0x31, 0x13, 0x3a, 0xd5, + 0x96, 0xa6, 0xa7, 0x3e, 0xd7, 0xcc, 0x25, 0xe5, 0x84, 0xc1, 0x8d, 0xf1, 0x85, 0x3f, 0x65, 0x32, + 0x72, 0x1d, 0x4a, 0x82, 0x95, 0x06, 0xea, 0xbc, 0xe4, 0xd9, 0x27, 0xa7, 0x08, 0xa2, 0x76, 0x98, + 0x84, 0x7d, 0xfe, 0xa9, 0x8b, 0x67, 0xc1, 0xdf, 0x5a, 0xfe, 0x09, 0x26, 0xe0, 0x9f, 0xff, 0x70, + 0x3d, 0xf1, 0x5d, 0x4b, 0x5e, 0xde, 0x3a, 0x17, 0xeb, 0x7f, 0x4a, 0x43, 0x96, 0x9f, 0xb7, 0x2a, + 0x94, 0x46, 0x9f, 0x0e, 0x64, 0xb5, 0xdd, 0x1f, 0x1f, 0x75, 0x65, 0x29, 0x45, 0x2a, 0x00, 0x5c, + 0xf0, 0xa8, 0xdb, 0x6f, 0x8e, 0xa4, 0x74, 0x38, 0xee, 0x9c, 0x8c, 0x3e, 0xf8, 0x81, 0x94, 0x09, + 0x15, 0xc6, 0x42, 0x90, 0x8d, 0x02, 0xbe, 0xff, 0x40, 0xca, 0x21, 0x13, 0xca, 0xc2, 0x40, 0xe7, + 0x89, 0xdc, 0x46, 0x44, 0x3e, 0x2e, 0x41, 0xcc, 0x0e, 0xd9, 0x85, 0x22, 0x97, 0x1c, 0xf5, 0xfb, + 0x5d, 0xa9, 0x10, 0xda, 0x1c, 0x8e, 0x94, 0xce, 0xc9, 0xb1, 0x54, 0x0c, 0x6d, 0x1e, 0x2b, 0xfd, + 0xf1, 0x40, 0x82, 0xd0, 0x42, 0x4f, 0x1e, 0x0e, 0x9b, 0xc7, 0xb2, 0x54, 0x0a, 0x11, 0x47, 0x9f, + 0x8e, 0xe4, 0xa1, 0x54, 0x8e, 0xb9, 0x85, 0x9f, 0xd8, 0x0d, 0x3f, 0x21, 0x9f, 0x8c, 0x7b, 0x52, + 0x85, 0xec, 0xc1, 0xae, 0xf8, 0x44, 0xe0, 0x44, 0x75, 0x4d, 0x84, 0x9e, 0x4a, 0x2b, 0x47, 0x84, + 0x95, 0xbd, 0x98, 0x00, 0x11, 0xa4, 0xde, 0x82, 0x1c, 0x67, 0x17, 0xb2, 0xb8, 0xd2, 0x6d, 0x1e, + 0xc9, 0x5d, 0xb5, 0x3f, 0x18, 0x75, 0xfa, 0x27, 0xcd, 0x2e, 0xc6, 0x2e, 0x94, 0x29, 0xf2, 0x4f, + 0xc6, 0x1d, 0x45, 0x6e, 0x63, 0xfc, 0x22, 0xb2, 0x81, 0xdc, 0x1c, 0xa1, 0x2c, 0x53, 0xbf, 0x07, + 0xfb, 0x49, 0x79, 0x26, 0xe9, 0x64, 0xd4, 0xbf, 0x48, 0xc1, 0xa5, 0x84, 0x94, 0x99, 0x78, 0x8a, + 0x7e, 0x0c, 0x39, 0xc1, 0x34, 0x51, 0x44, 0xde, 0x49, 0xcc, 0xbd, 0x9c, 0x77, 0x1b, 0x85, 0x84, + 0xeb, 0x45, 0x0b, 0x69, 0x66, 0x4b, 0x21, 0x65, 0x26, 0x36, 0xe8, 0xf4, 0xcb, 0x14, 0xd4, 0xb6, + 0xd9, 0x7e, 0xc5, 0x79, 0x4f, 0xc7, 0xce, 0xfb, 0x47, 0xeb, 0x0e, 0xdc, 0xdc, 0xbe, 0x86, 0x0d, + 0x2f, 0xbe, 0x4c, 0xc1, 0xe5, 0xe4, 0x7e, 0x23, 0xd1, 0x87, 0x8f, 0x21, 0x3f, 0xa7, 0xde, 0x99, + 0x1d, 0xd4, 0xdc, 0xb7, 0x13, 0x32, 0x39, 0x9b, 0x5e, 0x8f, 0x95, 0xaf, 0x15, 0x2d, 0x05, 0x99, + 0x6d, 0x4d, 0x83, 0xf0, 0x66, 0xc3, 0xd3, 0x5f, 0xa5, 0xe1, 0xb5, 0x44, 0xe3, 0x89, 0x8e, 0x5e, + 0x03, 0x30, 0xac, 0xc5, 0xd2, 0x13, 0x75, 0x55, 0xa4, 0x99, 0x22, 0x97, 0xf0, 0x23, 0xcc, 0x52, + 0xc8, 0xd2, 0x0b, 0xe7, 0x33, 0x7c, 0x1e, 0x84, 0x88, 0x03, 0x1e, 0xae, 0x1c, 0xcd, 0x72, 0x47, + 0xdf, 0xdc, 0xb2, 0xd2, 0x8d, 0x92, 0xf5, 0x1e, 0x48, 0xba, 0x69, 0x50, 0xcb, 0x53, 0x5d, 0xcf, + 0xa1, 0xda, 0xdc, 0xb0, 0x66, 0x3c, 0x8f, 0x16, 0x0e, 0x73, 0x53, 0xcd, 0x74, 0xa9, 0x52, 0x15, + 0xd3, 0xc3, 0x60, 0x96, 0x69, 0xf0, 0x62, 0xe1, 0x44, 0x34, 0xf2, 0x31, 0x0d, 0x31, 0x1d, 0x6a, + 0xd4, 0x7f, 0xbd, 0x03, 0xa5, 0x48, 0x77, 0x46, 0x6e, 0x42, 0xf9, 0xa9, 0xf6, 0x5c, 0x53, 0x83, + 0x8e, 0x5b, 0x44, 0xa2, 0xc4, 0x64, 0x03, 0xbf, 0xeb, 0x7e, 0x0f, 0xf6, 0x39, 0x04, 0xd7, 0x88, + 0x1f, 0xd2, 0x4d, 0xcd, 0x75, 0x79, 0xd0, 0x0a, 0x1c, 0x4a, 0xd8, 0x5c, 0x9f, 0x4d, 0xb5, 0x82, + 0x19, 0xf2, 0x3e, 0x5c, 0xe2, 0x1a, 0x73, 0x4c, 0xbc, 0xc6, 0xc2, 0xa4, 0x2a, 0xbb, 0x03, 0xb8, + 0x3c, 0x9f, 0x86, 0x9e, 0xed, 0x31, 0x44, 0xcf, 0x07, 0x30, 0x8f, 0x5c, 0x72, 0x0c, 0xd7, 0xb8, + 0xda, 0x8c, 0x5a, 0xd4, 0xd1, 0x3c, 0xaa, 0xd2, 0x9f, 0x2f, 0x11, 0xab, 0x6a, 0xd6, 0x44, 0x3d, + 0xd3, 0xdc, 0xb3, 0xda, 0x7e, 0xd4, 0xc0, 0x55, 0x86, 0x3d, 0xf6, 0xa1, 0x32, 0x47, 0x36, 0xad, + 0xc9, 0x27, 0x88, 0x23, 0x87, 0x70, 0x99, 0x1b, 0xc2, 0xa0, 0xe0, 0x9a, 0x55, 0xfd, 0x8c, 0xea, + 0xcf, 0xd4, 0xa5, 0x37, 0x7d, 0x58, 0x7b, 0x3d, 0x6a, 0x81, 0x3b, 0x39, 0xe4, 0x98, 0x16, 0x83, + 0x8c, 0x11, 0x41, 0x86, 0x50, 0x66, 0xfb, 0x31, 0x37, 0x3e, 0x47, 0xb7, 0x6d, 0x87, 0xd7, 0x88, + 0x4a, 0xc2, 0xe1, 0x8e, 0x04, 0xb1, 0xd1, 0xf7, 0x15, 0x7a, 0xd8, 0x9f, 0x1e, 0xe6, 0x86, 0x03, + 0x59, 0x6e, 0x2b, 0xa5, 0xc0, 0xca, 0x23, 0xdb, 0x61, 0x9c, 0x9a, 0xd9, 0x61, 0x8c, 0x4b, 0x82, + 0x53, 0x33, 0x3b, 0x88, 0x30, 0xc6, 0x4b, 0xd7, 0xc5, 0xb2, 0xf1, 0xee, 0xe2, 0x37, 0xeb, 0x6e, + 0x4d, 0x8a, 0xc5, 0x4b, 0xd7, 0x8f, 0x05, 0xc0, 0xa7, 0xb9, 0x8b, 0x47, 0xe2, 0xb5, 0x55, 0xbc, + 0xa2, 0x8a, 0x7b, 0x1b, 0xab, 0x5c, 0x57, 0xc5, 0x2f, 0x2e, 0xce, 0x37, 0x15, 0x49, 0xec, 0x8b, + 0x8b, 0xf3, 0x75, 0xb5, 0xdb, 0xfc, 0x02, 0xe6, 0x50, 0x1d, 0x43, 0x3e, 0xa9, 0x5d, 0x89, 0xa2, + 0x23, 0x13, 0xe4, 0x3e, 0x12, 0x59, 0x57, 0xa9, 0xa5, 0x9d, 0xe2, 0xde, 0x6b, 0x0e, 0xfe, 0x70, + 0x6b, 0xd7, 0xa3, 0xe0, 0x8a, 0xae, 0xcb, 0x7c, 0xb6, 0xc9, 0x27, 0xc9, 0x3d, 0xd8, 0xb3, 0x4f, + 0x9f, 0xea, 0x82, 0x5c, 0x2a, 0xda, 0x99, 0x1a, 0x2f, 0x6b, 0xb7, 0x78, 0x98, 0xaa, 0x6c, 0x82, + 0x53, 0x6b, 0xc0, 0xc5, 0xe4, 0x1d, 0x34, 0xee, 0x9e, 0x69, 0xce, 0x82, 0x17, 0x69, 0x17, 0x83, + 0x4a, 0x6b, 0xb7, 0x05, 0x54, 0xc8, 0x4f, 0x02, 0x31, 0xb6, 0xb7, 0xfb, 0x4b, 0xcb, 0xb0, 0x90, + 0x9b, 0x68, 0x92, 0xf5, 0xda, 0xe2, 0xa4, 0xd5, 0xfe, 0xb9, 0xb3, 0xa5, 0x5b, 0x1e, 0x47, 0xd1, + 0x62, 0x77, 0x95, 0x4b, 0xcb, 0x4d, 0x61, 0xfd, 0x10, 0xca, 0xd1, 0x4d, 0x27, 0x45, 0x10, 0xdb, + 0x8e, 0x65, 0x09, 0x4b, 0x61, 0xab, 0xdf, 0x66, 0x45, 0xec, 0x33, 0x19, 0x2b, 0x12, 0x16, 0xd3, + 0x6e, 0x67, 0x24, 0xab, 0xca, 0xf8, 0x64, 0xd4, 0xe9, 0xc9, 0x52, 0xe6, 0x5e, 0xb1, 0xf0, 0xaf, + 0x1d, 0xe9, 0x17, 0xf8, 0x2f, 0xfd, 0x38, 0x5b, 0x78, 0x5b, 0xba, 0x53, 0xff, 0x4b, 0x1a, 0x2a, + 0xf1, 0x36, 0x96, 0xfc, 0x08, 0xae, 0x04, 0x77, 0x4e, 0x97, 0x7a, 0xea, 0x0b, 0xc3, 0xe1, 0x6c, + 0x9c, 0x6b, 0xa2, 0x11, 0x0c, 0x03, 0xb9, 0xef, 0xa3, 0xf0, 0x76, 0xfe, 0x33, 0xc4, 0x3c, 0xe2, + 0x10, 0xd2, 0x85, 0xeb, 0x96, 0x8d, 0xec, 0xc7, 0x83, 0xa3, 0x39, 0x13, 0x75, 0x75, 0xdb, 0x57, + 0x35, 0x1d, 0xb7, 0xd1, 0xb5, 0x45, 0x21, 0x08, 0xad, 0xbc, 0x61, 0xd9, 0x43, 0x1f, 0xbc, 0xca, + 0x90, 0x4d, 0x1f, 0xba, 0xb6, 0xe9, 0x99, 0x6d, 0x9b, 0x8e, 0xad, 0xd3, 0x5c, 0x5b, 0xe0, 0xae, + 0x7b, 0xce, 0x39, 0x6f, 0xbe, 0x0a, 0x4a, 0x01, 0x05, 0x32, 0x1b, 0x7f, 0x73, 0x3b, 0x11, 0x89, + 0x66, 0xfd, 0xef, 0x19, 0x28, 0x47, 0x1b, 0x30, 0xd6, 0xcf, 0xea, 0x3c, 0x4b, 0xa7, 0xf8, 0x21, + 0x7e, 0xeb, 0x2b, 0xdb, 0xb5, 0x46, 0x8b, 0xa5, 0xef, 0xc3, 0xbc, 0x68, 0x8b, 0x14, 0xa1, 0xc9, + 0x4a, 0x27, 0x3b, 0xb6, 0x54, 0x34, 0xdb, 0x05, 0xc5, 0x1f, 0x61, 0xae, 0xca, 0x3f, 0x75, 0xb9, + 0xed, 0x3c, 0xb7, 0x7d, 0xeb, 0xab, 0x6d, 0x3f, 0x1e, 0x72, 0xe3, 0xc5, 0xc7, 0x43, 0xf5, 0xa4, + 0xaf, 0xf4, 0x9a, 0x5d, 0xc5, 0x57, 0x27, 0x57, 0x21, 0x6b, 0x6a, 0x9f, 0x9f, 0xc7, 0x13, 0x3d, + 0x17, 0x5d, 0x34, 0xf0, 0x68, 0x81, 0xbd, 0x58, 0xc4, 0xd3, 0x2b, 0x17, 0x7d, 0x83, 0x07, 0xe0, + 0x3e, 0xe4, 0x78, 0xbc, 0x08, 0x80, 0x1f, 0x31, 0xe9, 0x3b, 0xa4, 0x00, 0xd9, 0x56, 0x5f, 0x61, + 0x87, 0x00, 0x59, 0x2f, 0xa4, 0xea, 0xa0, 0x23, 0xb7, 0xf0, 0x1c, 0xd4, 0xdf, 0x87, 0xbc, 0x08, + 0x02, 0x3b, 0x20, 0x61, 0x18, 0x50, 0x49, 0x0c, 0x7d, 0x1b, 0xa9, 0x60, 0x76, 0xdc, 0x3b, 0x92, + 0x15, 0x29, 0x1d, 0xdd, 0xde, 0x3f, 0xa7, 0xa0, 0x14, 0xe9, 0x87, 0x58, 0x25, 0xd6, 0x4c, 0xd3, + 0x7e, 0xa1, 0x6a, 0xa6, 0x81, 0x09, 0x46, 0xec, 0x0f, 0x70, 0x51, 0x93, 0x49, 0x2e, 0x1a, 0xbf, + 0xff, 0x09, 0x37, 0xff, 0x90, 0x02, 0x69, 0xbd, 0x97, 0x5a, 0x73, 0x30, 0xf5, 0xad, 0x3a, 0xf8, + 0xfb, 0x14, 0x54, 0xe2, 0x0d, 0xd4, 0x9a, 0x7b, 0x37, 0xbf, 0x55, 0xf7, 0x7e, 0x97, 0x82, 0xdd, + 0x58, 0xdb, 0xf4, 0x7f, 0xe5, 0xdd, 0x6f, 0x33, 0x70, 0x29, 0x41, 0x0f, 0x13, 0x90, 0xe8, 0x2f, + 0x45, 0xcb, 0xfb, 0xbd, 0x8b, 0x7c, 0xab, 0xc1, 0xca, 0xd7, 0x00, 0x2f, 0xf9, 0x7e, 0x3b, 0x8a, + 0xe5, 0xce, 0x98, 0x60, 0x52, 0x35, 0xa6, 0x06, 0x76, 0x5f, 0xe2, 0xc2, 0x21, 0x9a, 0xce, 0xea, + 0x4a, 0x2e, 0x6e, 0xb7, 0xdf, 0x05, 0xb2, 0xb0, 0x5d, 0xc3, 0x33, 0x9e, 0xb3, 0xd7, 0xb5, 0xe0, + 0x1e, 0xcc, 0x9a, 0xd0, 0xac, 0x22, 0x05, 0x33, 0x1d, 0xcb, 0x0b, 0xd1, 0x16, 0x9d, 0x69, 0x6b, + 0x68, 0x96, 0x86, 0x32, 0x8a, 0x14, 0xcc, 0x84, 0x68, 0xec, 0x13, 0x27, 0xf6, 0x92, 0xd5, 0x73, + 0x81, 0x63, 0x59, 0x2f, 0xa5, 0x94, 0x84, 0x2c, 0x84, 0xf8, 0x0d, 0xd7, 0xea, 0x02, 0x5e, 0x56, + 0x4a, 0x42, 0x26, 0x20, 0x77, 0xa0, 0xaa, 0xcd, 0x66, 0x0e, 0x33, 0x1e, 0x18, 0x12, 0x5d, 0x64, + 0x25, 0x14, 0x73, 0xe0, 0xc1, 0x63, 0x28, 0x04, 0x71, 0x60, 0x85, 0x85, 0x45, 0x02, 0xdb, 0x27, + 0xfe, 0x0c, 0x92, 0x66, 0x77, 0x72, 0x2b, 0x98, 0xc4, 0x8f, 0x1a, 0xae, 0xba, 0x7a, 0x8f, 0x4b, + 0xe3, 0x7c, 0x41, 0x29, 0x19, 0x6e, 0xf8, 0x00, 0x53, 0xff, 0x12, 0xcb, 0x6b, 0xfc, 0x3d, 0x91, + 0xb4, 0xa1, 0x60, 0xda, 0xc8, 0x0f, 0xa6, 0x21, 0x1e, 0xb3, 0xef, 0xbe, 0xe2, 0x09, 0xb2, 0xd1, + 0xf5, 0xf1, 0x4a, 0xa8, 0x79, 0xf0, 0xd7, 0x14, 0x14, 0x02, 0x31, 0x16, 0x8a, 0xec, 0x42, 0xf3, + 0xce, 0xb8, 0xb9, 0xdc, 0x51, 0x5a, 0x4a, 0x29, 0x7c, 0xcc, 0xe4, 0xd8, 0x8c, 0x58, 0x9c, 0x02, + 0xbe, 0x9c, 0x8d, 0xd9, 0xbe, 0x9a, 0x54, 0x9b, 0xf0, 0xfe, 0xd4, 0x9e, 0xcf, 0x71, 0x27, 0xdd, + 0x60, 0x5f, 0x7d, 0x79, 0xcb, 0x17, 0xb3, 0x67, 0x6d, 0xcf, 0xd1, 0x0c, 0x33, 0x86, 0xcd, 0x72, + 0xac, 0x14, 0x4c, 0x84, 0xe0, 0x43, 0xb8, 0x1a, 0xd8, 0x9d, 0x50, 0x4f, 0xc3, 0xde, 0x77, 0xb2, + 0x52, 0xca, 0xf3, 0xc7, 0xaa, 0x2b, 0x3e, 0xa0, 0xed, 0xcf, 0x07, 0xba, 0xf5, 0xbf, 0xa5, 0x60, + 0x2f, 0xe8, 0xa8, 0x27, 0x61, 0xb0, 0x7a, 0x00, 0x9a, 0x65, 0xd9, 0x5e, 0x34, 0x5c, 0x9b, 0x54, + 0xde, 0xd0, 0x6b, 0x34, 0x43, 0x25, 0x25, 0x62, 0xe0, 0x60, 0x0e, 0xb0, 0x9a, 0xd9, 0x1a, 0x36, + 0x4c, 0xee, 0xfe, 0x63, 0x31, 0xff, 0x8b, 0x83, 0xb8, 0x86, 0x81, 0x10, 0xb1, 0xd6, 0x9b, 0x3d, + 0x8c, 0x9d, 0xd2, 0x99, 0x61, 0xf9, 0x4f, 0x58, 0x62, 0x10, 0x3c, 0x8c, 0x65, 0xc3, 0x87, 0xb1, + 0xa3, 0x27, 0xd8, 0x5b, 0xdb, 0xf3, 0x75, 0x77, 0x8f, 0xa4, 0xb5, 0xab, 0xa0, 0xfb, 0x49, 0xea, + 0x33, 0x58, 0x35, 0x4a, 0x5f, 0xa4, 0x33, 0xc7, 0x83, 0xa3, 0x3f, 0xa6, 0x0f, 0x8e, 0x85, 0xde, + 0x20, 0x58, 0xa6, 0x42, 0xa7, 0x26, 0xd5, 0x99, 0xeb, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x51, + 0xab, 0xa7, 0x9f, 0x56, 0x19, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go index 0e5d2b70d..095891c0b 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -626,6 +626,7 @@ func (g *Generator) CommandLineParameters(parameter string) { } } } + if pluginList != "" { // Amend the set of plugins. enabled := make(map[string]bool) @@ -1180,6 +1181,7 @@ func (g *Generator) generate(file *FileDescriptor) { g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") g.P() } + for _, td := range g.file.imp { g.generateImported(td) } @@ -1549,11 +1551,7 @@ func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptor enum += CamelCaseSlice(obj.TypeName()) } packed := "" - if (field.Options != nil && field.Options.GetPacked()) || - // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: - // "In proto3, repeated fields of scalar numeric types use packed encoding by default." - (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && - isRepeated(field) && isScalar(field)) { + if field.Options != nil && field.Options.GetPacked() { packed = ",packed" } fieldName := field.GetName() @@ -2734,32 +2732,6 @@ func isRepeated(field *descriptor.FieldDescriptorProto) bool { return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED } -// Is this field a scalar numeric type? -func isScalar(field *descriptor.FieldDescriptorProto) bool { - if field.Type == nil { - return false - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE, - descriptor.FieldDescriptorProto_TYPE_FLOAT, - descriptor.FieldDescriptorProto_TYPE_INT64, - descriptor.FieldDescriptorProto_TYPE_UINT64, - descriptor.FieldDescriptorProto_TYPE_INT32, - descriptor.FieldDescriptorProto_TYPE_FIXED64, - descriptor.FieldDescriptorProto_TYPE_FIXED32, - descriptor.FieldDescriptorProto_TYPE_BOOL, - descriptor.FieldDescriptorProto_TYPE_UINT32, - descriptor.FieldDescriptorProto_TYPE_ENUM, - descriptor.FieldDescriptorProto_TYPE_SFIXED32, - descriptor.FieldDescriptorProto_TYPE_SFIXED64, - descriptor.FieldDescriptorProto_TYPE_SINT32, - descriptor.FieldDescriptorProto_TYPE_SINT64: - return true - default: - return false - } -} - // badToUnderscore is the mapping function used to generate Go names from package names, // which can be dotted in the input .proto file. It replaces non-identifier characters such as // dot or dash with underscore. diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go index 2660e47a2..291cbdb46 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go @@ -48,7 +48,7 @@ import ( // It is incremented whenever an incompatibility between the generated code and // the grpc package is introduced; the generated code references // a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion). -const generatedCodeVersion = 4 +const generatedCodeVersion = 2 // Paths for packages used by code generated in this file, // relative to the import_prefix of the generator.Generator. @@ -254,7 +254,6 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P("},") } g.P("},") - g.P("Metadata: \"", file.GetName(), "\",") g.P("}") g.P() } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go index 0ff4e13a8..7b0293467 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -205,25 +205,25 @@ func init() { func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 310 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x51, 0xc1, 0x4a, 0xc3, 0x40, - 0x10, 0x25, 0xb6, 0x22, 0x19, 0xa5, 0x95, 0xa5, 0xc2, 0x52, 0x7a, 0x08, 0x45, 0x31, 0xa7, 0x14, - 0x44, 0xf0, 0xde, 0x8a, 0x7a, 0x2c, 0xc1, 0x93, 0x20, 0x21, 0xa6, 0xd3, 0xb0, 0x90, 0xec, 0xac, - 0xb3, 0xdb, 0x2f, 0xf2, 0x9f, 0xfc, 0x1e, 0xd9, 0x4d, 0x5b, 0xa5, 0xd8, 0xdb, 0xce, 0x7b, 0x6f, - 0xe6, 0xbd, 0x9d, 0x81, 0x9b, 0x9a, 0xa8, 0x6e, 0x70, 0x66, 0x98, 0x1c, 0x7d, 0x6c, 0xd6, 0xb3, - 0x8a, 0x5a, 0xa3, 0x1a, 0xe4, 0x99, 0x69, 0x36, 0xb5, 0xd2, 0x59, 0x20, 0x84, 0xec, 0x64, 0xd9, - 0x4e, 0x96, 0xed, 0x64, 0xe3, 0xe4, 0x70, 0xc0, 0x0a, 0x6d, 0xc5, 0xca, 0x38, 0xe2, 0x4e, 0x3d, - 0xfd, 0x8a, 0x60, 0xb4, 0xa0, 0x15, 0x3e, 0xa3, 0x46, 0x2e, 0x1d, 0x71, 0x8e, 0x9f, 0x1b, 0xb4, - 0x4e, 0xa4, 0x70, 0xb9, 0x56, 0x0d, 0x16, 0x8e, 0x8a, 0xba, 0xe3, 0x50, 0x46, 0x49, 0x2f, 0x8d, - 0xf3, 0x81, 0xc7, 0x5f, 0x69, 0xdb, 0x81, 0x62, 0x02, 0xb1, 0x29, 0xb9, 0x6c, 0xd1, 0x21, 0xcb, - 0x93, 0x24, 0x4a, 0xe3, 0xfc, 0x17, 0x10, 0x0b, 0x80, 0xe0, 0x54, 0xf8, 0x2e, 0x39, 0x4c, 0x7a, - 0xe9, 0xf9, 0xdd, 0x75, 0x76, 0x98, 0xf8, 0x49, 0x35, 0xf8, 0xb8, 0xcf, 0xb6, 0xf4, 0x70, 0x1e, - 0x07, 0xd6, 0x33, 0xd3, 0xef, 0x08, 0xae, 0x0e, 0x52, 0x5a, 0x43, 0xda, 0xa2, 0x18, 0xc1, 0x29, - 0x32, 0x13, 0xcb, 0x28, 0x18, 0x77, 0x85, 0x78, 0x81, 0xfe, 0x1f, 0xbb, 0xfb, 0xec, 0xd8, 0x82, - 0xb2, 0x7f, 0x87, 0x86, 0x34, 0x79, 0x98, 0x30, 0x7e, 0x87, 0xbe, 0xaf, 0x84, 0x80, 0xbe, 0x2e, - 0x5b, 0xdc, 0xda, 0x84, 0xb7, 0xb8, 0x85, 0xa1, 0xd2, 0x16, 0xd9, 0x29, 0xd2, 0x85, 0x21, 0xa5, - 0xdd, 0xf6, 0xfb, 0x83, 0x3d, 0xbc, 0xf4, 0xa8, 0x90, 0x70, 0x56, 0x91, 0x76, 0xa8, 0x9d, 0x1c, - 0x06, 0xc1, 0xae, 0x9c, 0x3f, 0xc0, 0xa4, 0xa2, 0xf6, 0x68, 0xbe, 0xf9, 0xc5, 0x32, 0x1c, 0x3a, - 0x2c, 0xc4, 0xbe, 0xc5, 0xdd, 0xd9, 0x8b, 0x9a, 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff, 0x83, 0x7b, - 0x5c, 0x7c, 0x1b, 0x02, 0x00, 0x00, + // 311 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0xd1, 0x4a, 0xfb, 0x30, + 0x14, 0xc6, 0xe9, 0xff, 0x3f, 0x91, 0x1d, 0x65, 0x93, 0x30, 0xa1, 0x8c, 0x5d, 0x94, 0xa1, 0xb8, + 0xab, 0x14, 0x44, 0xf0, 0x7e, 0x13, 0xf5, 0xb2, 0x14, 0xaf, 0x04, 0x29, 0xb5, 0x3b, 0x2b, 0x81, + 0x2e, 0x27, 0xa6, 0xe9, 0x13, 0xf9, 0x4e, 0x3e, 0x8f, 0x49, 0xda, 0x4e, 0x29, 0xee, 0xaa, 0x3d, + 0xdf, 0xf9, 0xe5, 0x3b, 0x5f, 0x72, 0xe0, 0xba, 0x24, 0x2a, 0x2b, 0x8c, 0x95, 0x26, 0x43, 0xef, + 0xcd, 0x2e, 0x2e, 0x68, 0xaf, 0x44, 0x85, 0x3a, 0x56, 0x55, 0x53, 0x0a, 0xc9, 0x7d, 0x83, 0x85, + 0x2d, 0xc6, 0x7b, 0x8c, 0xf7, 0xd8, 0x3c, 0x1a, 0x1a, 0x6c, 0xb1, 0x2e, 0xb4, 0x50, 0x86, 0x74, + 0x4b, 0x2f, 0x3f, 0x03, 0x98, 0x6d, 0x68, 0x8b, 0x4f, 0x28, 0x51, 0xe7, 0x56, 0x4f, 0xf1, 0xa3, + 0xc1, 0xda, 0xb0, 0x15, 0x5c, 0xec, 0xac, 0x47, 0x66, 0x28, 0x2b, 0xdb, 0x1e, 0x86, 0x41, 0xf4, + 0x7f, 0x35, 0x4e, 0x27, 0x4e, 0x7f, 0xa1, 0xee, 0x04, 0xb2, 0x05, 0x8c, 0x55, 0xae, 0xf3, 0x3d, + 0x1a, 0xd4, 0xe1, 0xbf, 0x28, 0xb0, 0xc8, 0x8f, 0xc0, 0x36, 0x00, 0x7e, 0x52, 0xe6, 0x4e, 0x85, + 0x53, 0xeb, 0x70, 0x76, 0x7b, 0xc5, 0x87, 0x89, 0x1f, 0x6d, 0xf3, 0xe1, 0x90, 0x2d, 0x71, 0xb2, + 0x35, 0x71, 0x1f, 0xd7, 0x59, 0x7e, 0x05, 0x70, 0x39, 0x48, 0x59, 0x2b, 0x92, 0x35, 0xb2, 0x19, + 0x9c, 0xa0, 0xd6, 0xa4, 0x6d, 0x36, 0x37, 0xb8, 0x2d, 0xd8, 0x33, 0x8c, 0x7e, 0x8d, 0xbb, 0xe3, + 0xc7, 0x1e, 0x88, 0xff, 0x69, 0xea, 0xd3, 0xa4, 0xde, 0x61, 0xfe, 0x06, 0x23, 0x57, 0x31, 0x06, + 0x23, 0x69, 0x6f, 0xd4, 0x8d, 0xf1, 0xff, 0xec, 0x06, 0xa6, 0xc2, 0xe2, 0xda, 0x08, 0x92, 0x99, + 0x22, 0x21, 0x4d, 0x77, 0xfd, 0xc9, 0x41, 0x4e, 0x9c, 0xca, 0x42, 0x38, 0x2d, 0x48, 0x1a, 0xb4, + 0xc0, 0xd4, 0x03, 0x7d, 0xb9, 0xbe, 0x87, 0x85, 0xcd, 0x72, 0x34, 0xdf, 0xfa, 0x3c, 0xf1, 0x8b, + 0xf6, 0x0f, 0x52, 0xbf, 0x8e, 0xdb, 0xb5, 0x67, 0x25, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0x83, + 0x7b, 0x5c, 0x7c, 0x1b, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile index b1ac45c77..a85cc5655 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile @@ -47,7 +47,6 @@ golden: make -B my_test/test.pb.go sed -i '/return.*fileDescriptor/d' my_test/test.pb.go sed -i '/^var fileDescriptor/,/^}/d' my_test/test.pb.go - sed -i '/proto.RegisterFile.*fileDescriptor/d' my_test/test.pb.go gofmt -w my_test/test.pb.go diff -w my_test/test.pb.go my_test/test.pb.go.golden @@ -58,7 +57,7 @@ testbuild: regenerate regenerate: # Invoke protoc once to generate three independent .pb.go files in the same package. - protoc --go_out=. multi/multi1.proto multi/multi2.proto multi/multi3.proto + protoc --go_out=. multi/multi{1,2,3}.proto #extension_test: extension_test.$O # $(LD) -L. -o $@ $< diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go index 9ec3e1292..997743beb 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go @@ -33,9 +33,7 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion1 type HatType int32 @@ -284,10 +282,10 @@ func (m *Request_SomeGroup) GetGroupField() int32 { } type Reply struct { - Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` - CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *Reply) Reset() { *m = Reply{} } @@ -301,6 +299,12 @@ var extRange_Reply = []proto.ExtensionRange{ func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { return extRange_Reply } +func (m *Reply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} func (m *Reply) GetFound() []*Reply_Entry { if m != nil { @@ -351,9 +355,9 @@ func (m *Reply_Entry) GetXMyFieldName_2() int64 { } type OtherBase struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *OtherBase) Reset() { *m = OtherBase{} } @@ -367,6 +371,12 @@ var extRange_OtherBase = []proto.ExtensionRange{ func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OtherBase } +func (m *OtherBase) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} func (m *OtherBase) GetName() string { if m != nil && m.Name != nil { @@ -424,8 +434,8 @@ func (m *OtherReplyExtensions) GetKey() int32 { } type OldReply struct { - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *OldReply) Reset() { *m = OldReply{} } @@ -433,16 +443,16 @@ func (m *OldReply) String() string { return proto.CompactTextString(m) } func (*OldReply) ProtoMessage() {} func (m *OldReply) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(&m.XXX_InternalExtensions) + return proto.MarshalMessageSet(m.ExtensionMap()) } func (m *OldReply) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) } func (m *OldReply) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) + return proto.MarshalMessageSetJSON(m.XXX_extensions) } func (m *OldReply) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) } // ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler @@ -456,6 +466,12 @@ var extRange_OldReply = []proto.ExtensionRange{ func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OldReply } +func (m *OldReply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} type Communique struct { MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden index 9ec3e1292..997743beb 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden @@ -33,9 +33,7 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion1 type HatType int32 @@ -284,10 +282,10 @@ func (m *Request_SomeGroup) GetGroupField() int32 { } type Reply struct { - Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` - CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *Reply) Reset() { *m = Reply{} } @@ -301,6 +299,12 @@ var extRange_Reply = []proto.ExtensionRange{ func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { return extRange_Reply } +func (m *Reply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} func (m *Reply) GetFound() []*Reply_Entry { if m != nil { @@ -351,9 +355,9 @@ func (m *Reply_Entry) GetXMyFieldName_2() int64 { } type OtherBase struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *OtherBase) Reset() { *m = OtherBase{} } @@ -367,6 +371,12 @@ var extRange_OtherBase = []proto.ExtensionRange{ func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OtherBase } +func (m *OtherBase) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} func (m *OtherBase) GetName() string { if m != nil && m.Name != nil { @@ -424,8 +434,8 @@ func (m *OtherReplyExtensions) GetKey() int32 { } type OldReply struct { - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *OldReply) Reset() { *m = OldReply{} } @@ -433,16 +443,16 @@ func (m *OldReply) String() string { return proto.CompactTextString(m) } func (*OldReply) ProtoMessage() {} func (m *OldReply) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(&m.XXX_InternalExtensions) + return proto.MarshalMessageSet(m.ExtensionMap()) } func (m *OldReply) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) } func (m *OldReply) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) + return proto.MarshalMessageSetJSON(m.XXX_extensions) } func (m *OldReply) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) } // ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler @@ -456,6 +466,12 @@ var extRange_OldReply = []proto.ExtensionRange{ func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OldReply } +func (m *OldReply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} type Communique struct { MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto index 869b9af5a..c994914e4 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto @@ -44,7 +44,6 @@ message Request { repeated int64 key = 2; Flavour taste = 3; Book book = 4; - repeated int64 unpacked = 5 [packed=false]; } message Book { diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f2c6906b9..72490daf4 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -53,16 +53,6 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // foo = any.unpack(Foo.class); // } // -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' @@ -102,10 +92,10 @@ type Any struct { // A URL/resource name whose content describes the type of the // serialized protocol buffer message. // - // For URLs which use the scheme `http`, `https`, or no scheme, the + // For URLs which use the schema `http`, `https`, or no schema, the // following restrictions and interpretations apply: // - // * If no scheme is provided, `https` is assumed. + // * If no schema is provided, `https` is assumed. // * The last segment of the URL's path must represent the fully // qualified name of the type (as in `path/google.protobuf.Duration`). // The name should be in a canonical form (e.g., leading "." is @@ -118,7 +108,7 @@ type Any struct { // on changes to types. (Use versioned type names to manage // breaking changes.) // - // Schemes other than `http`, `https` (or the empty scheme) might be + // Schemas other than `http`, `https` (or the empty schema) might be // used with implementation specific semantics. // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` @@ -139,17 +129,17 @@ func init() { func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 187 bytes of a gzipped FileDescriptorProto + // 184 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, - 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, - 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, - 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, - 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, - 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, - 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, + 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0x04, 0x14, 0xe7, 0x09, 0x82, 0x70, 0x9c, + 0x8a, 0xb8, 0x84, 0x81, 0x96, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0x01, 0x34, 0x2c, 0x00, 0xc4, 0x09, + 0x60, 0x8c, 0x52, 0x25, 0xca, 0x71, 0x0b, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, + 0x62, 0x92, 0x73, 0x87, 0x98, 0x16, 0x00, 0x55, 0xa5, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, + 0x5f, 0x9e, 0x17, 0x02, 0x52, 0x9d, 0xc4, 0x06, 0xd6, 0x6e, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, + 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto index 81dcf46cc..45db6ede3 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -65,16 +65,6 @@ option objc_class_prefix = "GPB"; // foo = any.unpack(Foo.class); // } // -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' @@ -114,10 +104,10 @@ message Any { // A URL/resource name whose content describes the type of the // serialized protocol buffer message. // - // For URLs which use the scheme `http`, `https`, or no scheme, the + // For URLs which use the schema `http`, `https`, or no schema, the // following restrictions and interpretations apply: // - // * If no scheme is provided, `https` is assumed. + // * If no schema is provided, `https` is assumed. // * The last segment of the URL's path must represent the fully // qualified name of the type (as in `path/google.protobuf.Duration`). // The name should be in a canonical form (e.g., leading "." is @@ -130,7 +120,7 @@ message Any { // on changes to types. (Use versioned type names to manage // breaking changes.) // - // Schemes other than `http`, `https` (or the empty scheme) might be + // Schemas other than `http`, `https` (or the empty schema) might be // used with implementation specific semantics. // string type_url = 1; diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 569748346..ee7d8b8ac 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -98,17 +98,17 @@ func init() { } var fileDescriptor0 = []byte{ - // 189 bytes of a gzipped FileDescriptorProto + // 187 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, - 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, - 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, - 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, - 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, - 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, + 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0xa0, 0x38, 0x6b, 0x10, + 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x0c, 0x74, 0x82, 0x1e, 0x9a, 0x91, 0x4e, 0xbc, 0x30, 0x03, 0x03, + 0x40, 0x22, 0x01, 0x8c, 0x51, 0x5a, 0xc4, 0xbb, 0x77, 0x01, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, + 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0x73, 0x03, 0xa0, 0x4a, 0xf5, 0xc2, 0x53, 0x73, 0x72, + 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x5a, 0x92, 0xd8, 0xc0, 0x66, 0x18, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go index 46c765a96..d49c09bc0 100644 --- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -55,15 +55,15 @@ func init() { } var fileDescriptor0 = []byte{ - // 150 bytes of a gzipped FileDescriptorProto + // 148 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd, - 0x2d, 0x28, 0xa9, 0x84, 0x90, 0x7a, 0x60, 0x39, 0x21, 0xfe, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, - 0x3d, 0x98, 0x4a, 0x25, 0x76, 0x2e, 0x56, 0x57, 0x90, 0xbc, 0x53, 0x25, 0x97, 0x70, 0x72, 0x7e, - 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x27, 0xd2, - 0xce, 0x05, 0x8c, 0x8c, 0x3f, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, - 0x73, 0x87, 0x18, 0x1a, 0x00, 0x55, 0xaa, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, - 0x17, 0x02, 0xd2, 0x92, 0xc4, 0x06, 0x36, 0xc3, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xbb, - 0xf4, 0x0e, 0xd2, 0x00, 0x00, 0x00, + 0x05, 0x32, 0x20, 0xa4, 0x1e, 0x58, 0x4e, 0x88, 0x3f, 0x3d, 0x3f, 0x3f, 0x3d, 0x27, 0x55, 0x0f, + 0xa6, 0x52, 0x89, 0x9d, 0x8b, 0xd5, 0x15, 0x24, 0xef, 0x54, 0xc9, 0x25, 0x0c, 0x34, 0x49, 0x0f, + 0x4d, 0xde, 0x89, 0x0b, 0x2c, 0x1b, 0x00, 0xe2, 0x06, 0x30, 0x46, 0xa9, 0x13, 0x69, 0xe7, 0x02, + 0x46, 0xc6, 0x1f, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, + 0x0c, 0x0d, 0x80, 0x2a, 0xd5, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, + 0x69, 0x49, 0x62, 0x03, 0x9b, 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xbb, 0xf4, 0x0e, + 0xd2, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go index 197042ed5..0b28e475e 100644 --- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -352,31 +352,31 @@ func init() { } var fileDescriptor0 = []byte{ - // 416 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40, - 0x14, 0x80, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa0, 0xa1, 0x7b, 0x09, - 0x22, 0x09, 0x56, 0x04, 0x31, 0x5e, 0x0c, 0xac, 0xbb, 0x60, 0x58, 0x62, 0x74, 0x57, 0xf0, 0x52, - 0x9a, 0x34, 0x8d, 0xa1, 0xd3, 0x99, 0x90, 0xcc, 0x28, 0x3d, 0xfa, 0x2f, 0x3c, 0x8a, 0x47, 0x8f, - 0xfe, 0x42, 0x99, 0x99, 0x24, 0x4a, 0x4b, 0xc1, 0xd3, 0xf4, 0xbd, 0xf9, 0xde, 0x37, 0xef, 0xbd, - 0x06, 0x9e, 0x97, 0x15, 0xff, 0x2c, 0x32, 0x3f, 0x67, 0x9b, 0xa0, 0x64, 0x64, 0x41, 0xcb, 0xa0, - 0x6e, 0x18, 0x67, 0x99, 0x58, 0x05, 0x35, 0xdf, 0xd6, 0x45, 0x1b, 0xb4, 0xbc, 0x11, 0x39, 0xef, - 0x0e, 0x5f, 0xdd, 0xe2, 0x3b, 0x25, 0x63, 0x25, 0x29, 0xfc, 0x9e, 0x9d, 0x7e, 0x47, 0x60, 0xbd, - 0x57, 0x04, 0x0e, 0xc1, 0x5a, 0x55, 0x05, 0x59, 0xb6, 0x13, 0xe4, 0x9a, 0x9e, 0x33, 0x3b, 0xf3, - 0x77, 0x60, 0x5f, 0x83, 0xfe, 0x1b, 0x45, 0x9d, 0x53, 0xde, 0x6c, 0xd3, 0xae, 0xe4, 0xf4, 0x1d, - 0x38, 0xff, 0xa4, 0xf1, 0x09, 0x98, 0xeb, 0x62, 0x3b, 0x41, 0x2e, 0xf2, 0xec, 0x54, 0xfe, 0xc4, - 0x4f, 0x60, 0xfc, 0x65, 0x41, 0x44, 0x31, 0x31, 0x5c, 0xe4, 0x39, 0xb3, 0x7b, 0x7b, 0xf2, 0x1b, - 0x79, 0x9b, 0x6a, 0xe8, 0xa5, 0xf1, 0x02, 0x4d, 0x7f, 0x1b, 0x30, 0x56, 0x49, 0x1c, 0x02, 0x50, - 0x41, 0xc8, 0x5c, 0x0b, 0xa4, 0xf4, 0x78, 0x76, 0xba, 0x27, 0xb8, 0x12, 0x84, 0x28, 0xfe, 0x72, - 0x94, 0xda, 0xb4, 0x0f, 0xf0, 0x19, 0xdc, 0xa6, 0x62, 0x93, 0x15, 0xcd, 0xfc, 0xef, 0xfb, 0xe8, - 0x72, 0x94, 0x3a, 0x3a, 0x3b, 0x40, 0x2d, 0x6f, 0x2a, 0x5a, 0x76, 0x90, 0x29, 0x1b, 0x97, 0x90, - 0xce, 0x6a, 0xe8, 0x11, 0x40, 0xc6, 0x58, 0xdf, 0xc6, 0x91, 0x8b, 0xbc, 0x5b, 0xf2, 0x29, 0x99, - 0xd3, 0xc0, 0x2b, 0x65, 0x11, 0x39, 0xef, 0x90, 0xb1, 0x1a, 0xf5, 0xfe, 0x81, 0x3d, 0x76, 0x7a, - 0x91, 0xf3, 0x61, 0x4a, 0x52, 0xb5, 0x7d, 0xad, 0xa5, 0x6a, 0xf7, 0xa7, 0x8c, 0xab, 0x96, 0x0f, - 0x53, 0x92, 0x3e, 0x88, 0x2c, 0x38, 0x5a, 0x57, 0x74, 0x39, 0x0d, 0xc1, 0x1e, 0x08, 0xec, 0x83, - 0xa5, 0x64, 0xfd, 0x3f, 0x7a, 0x68, 0xe9, 0x1d, 0xf5, 0xf8, 0x01, 0xd8, 0xc3, 0x12, 0xf1, 0x31, - 0xc0, 0xd5, 0x75, 0x1c, 0xcf, 0x6f, 0x5e, 0xc7, 0xd7, 0xe7, 0x27, 0xa3, 0xe8, 0x1b, 0x82, 0xbb, - 0x39, 0xdb, 0xec, 0x2a, 0x22, 0x47, 0x4f, 0x93, 0xc8, 0x38, 0x41, 0x9f, 0x9e, 0xfe, 0xef, 0x87, - 0x19, 0xea, 0xa3, 0xce, 0x7e, 0x20, 0xf4, 0xd3, 0x30, 0x2f, 0x92, 0xe8, 0x97, 0xf1, 0xf0, 0x42, - 0xcb, 0x93, 0xbe, 0xbf, 0x8f, 0x05, 0x21, 0x6f, 0x29, 0xfb, 0x4a, 0x3f, 0xc8, 0xca, 0xcc, 0x52, - 0xaa, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xbc, 0xcf, 0x6d, 0x50, 0xfe, 0x02, 0x00, 0x00, + // 412 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x92, 0xcf, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0x49, 0x1b, 0xcc, 0x8b, 0xd4, 0x12, 0x41, 0x4b, 0x05, 0x95, 0xf6, 0x52, 0x44, + 0x12, 0xac, 0x08, 0x62, 0xbd, 0x18, 0xa8, 0x15, 0x0c, 0x25, 0x46, 0x5b, 0xc1, 0x4b, 0x69, 0xda, + 0x34, 0x86, 0x4e, 0x67, 0x42, 0x7e, 0x28, 0x3d, 0xfa, 0x5f, 0x78, 0x5c, 0xf6, 0xb8, 0xc7, 0xfd, + 0x0b, 0x77, 0x7e, 0x24, 0xd9, 0xa5, 0xa5, 0xb0, 0xa7, 0x99, 0xf7, 0x9d, 0xcf, 0xfb, 0xce, 0x7b, + 0x6f, 0x06, 0xde, 0x45, 0x71, 0xfe, 0xbb, 0x08, 0xac, 0x35, 0xdd, 0xdb, 0x11, 0xc5, 0x2b, 0x12, + 0xd9, 0x49, 0x4a, 0x73, 0x1a, 0x14, 0x5b, 0x3b, 0xc9, 0x0f, 0x49, 0x98, 0xd9, 0x59, 0x9e, 0x16, + 0xeb, 0xbc, 0x5c, 0x2c, 0x71, 0x6a, 0x3e, 0x8a, 0x28, 0x8d, 0x70, 0x68, 0x55, 0x6c, 0xff, 0x3f, + 0x02, 0xed, 0xbb, 0x20, 0xcc, 0x31, 0x68, 0xdb, 0x38, 0xc4, 0x9b, 0xac, 0x8b, 0x5e, 0xaa, 0x43, + 0x63, 0x34, 0xb0, 0x8e, 0x60, 0x4b, 0x82, 0xd6, 0x67, 0x41, 0x4d, 0x48, 0x9e, 0x1e, 0xfc, 0x32, + 0xa5, 0xf7, 0x0d, 0x8c, 0x3b, 0xb2, 0xd9, 0x01, 0x75, 0x17, 0x1e, 0x98, 0x11, 0x1a, 0xea, 0x3e, + 0xdf, 0x9a, 0xaf, 0xa1, 0xf5, 0x67, 0x85, 0x8b, 0xb0, 0xab, 0x30, 0xcd, 0x18, 0x3d, 0x39, 0x31, + 0x5f, 0xf0, 0x53, 0x5f, 0x42, 0x1f, 0x94, 0xf7, 0xa8, 0x7f, 0xad, 0x40, 0x4b, 0x88, 0xac, 0x32, + 0x20, 0x05, 0xc6, 0x4b, 0x69, 0xc0, 0x4d, 0xdb, 0xa3, 0xde, 0x89, 0xc1, 0x8c, 0x21, 0x82, 0xff, + 0xd2, 0xf0, 0x75, 0x52, 0x05, 0xe6, 0x00, 0x1e, 0x92, 0x62, 0x1f, 0x84, 0xe9, 0xf2, 0xf6, 0x7e, + 0xc4, 0x10, 0x43, 0xaa, 0x35, 0xc4, 0xe6, 0x14, 0x93, 0xa8, 0x84, 0x54, 0x5e, 0x38, 0x87, 0xa4, + 0x2a, 0xa1, 0x17, 0x00, 0x01, 0xa5, 0x55, 0x19, 0x4d, 0x86, 0x3c, 0xe0, 0x57, 0x71, 0x4d, 0x02, + 0x1f, 0x85, 0x0b, 0x1b, 0x51, 0x89, 0xb4, 0x44, 0xab, 0x4f, 0xcf, 0xcc, 0xb1, 0xb4, 0x67, 0xbb, + 0xba, 0x4b, 0x1c, 0x67, 0x55, 0xae, 0x26, 0x72, 0x4f, 0xbb, 0x74, 0x19, 0x52, 0x77, 0x89, 0xab, + 0xc0, 0xd1, 0xa0, 0xb9, 0x8b, 0xc9, 0xa6, 0x3f, 0x06, 0xbd, 0x26, 0x4c, 0x0b, 0x34, 0x61, 0x56, + 0xbd, 0xe8, 0xb9, 0xa1, 0x97, 0xd4, 0xab, 0x67, 0xa0, 0xd7, 0x43, 0x34, 0xdb, 0x00, 0xb3, 0xb9, + 0xeb, 0x2e, 0x17, 0x9f, 0xdc, 0xf9, 0xa4, 0xd3, 0x70, 0xfe, 0x21, 0x78, 0xcc, 0x7e, 0xdb, 0xb1, + 0x85, 0x63, 0xc8, 0x6e, 0x3c, 0x1e, 0x7b, 0xe8, 0xd7, 0x9b, 0xfb, 0x7e, 0xcc, 0xb1, 0x5c, 0x92, + 0xe0, 0x02, 0xa1, 0x4b, 0x45, 0x9d, 0x7a, 0xce, 0x95, 0xf2, 0x7c, 0x2a, 0xcd, 0xbd, 0xaa, 0xbe, + 0x9f, 0x21, 0xc6, 0x5f, 0x09, 0xfd, 0x4b, 0x7e, 0xf0, 0xcc, 0x40, 0x13, 0x56, 0x6f, 0x6f, 0x02, + 0x00, 0x00, 0xff, 0xff, 0xbc, 0xcf, 0x6d, 0x50, 0xfe, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index ffcc51594..588348c33 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -110,18 +110,17 @@ func init() { } var fileDescriptor0 = []byte{ - // 194 bytes of a gzipped FileDescriptorProto + // 192 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, - 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, - 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, - 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, - 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, - 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, - 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, - 0x00, 0x00, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0xa0, 0x38, 0x6b, + 0x10, 0x84, 0xe3, 0xd4, 0xc8, 0xc8, 0x25, 0x0c, 0x74, 0x86, 0x1e, 0x9a, 0xa1, 0x4e, 0x7c, 0x70, + 0x23, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, 0xda, 0x24, 0x38, 0x7a, 0x01, 0x23, 0xe3, 0x0f, 0x46, + 0xc6, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x86, 0x07, 0x40, 0x95, + 0xeb, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0xb4, 0x25, 0xb1, 0x81, + 0xcd, 0x31, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go index 5e52a81c7..bfc0a5b02 100644 --- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -179,7 +179,7 @@ func init() { } var fileDescriptor0 = []byte{ - // 260 bytes of a gzipped FileDescriptorProto + // 258 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x2f, @@ -187,14 +187,14 @@ var fileDescriptor0 = []byte{ 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, 0x52, 0xc3, 0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, 0x1a, 0x26, - 0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x28, 0x73, 0x71, 0x87, - 0xe2, 0x52, 0xc4, 0x82, 0x6a, 0x90, 0xb1, 0x11, 0x16, 0x35, 0xac, 0x68, 0x06, 0x61, 0x55, 0xc4, - 0x0b, 0x53, 0xa4, 0xc8, 0xc5, 0xe9, 0x94, 0x9f, 0x9f, 0x83, 0x45, 0x09, 0x07, 0x92, 0x39, 0xc1, - 0x25, 0x45, 0x99, 0x79, 0xe9, 0x58, 0x14, 0x71, 0x22, 0x39, 0xc8, 0xa9, 0xb2, 0x24, 0xb5, 0x18, - 0x8b, 0x1a, 0x1e, 0xa8, 0x1a, 0xa7, 0x7a, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0x3d, 0xb4, 0xd0, 0x75, - 0xe2, 0x0d, 0x87, 0x06, 0x7f, 0x00, 0x48, 0x24, 0x80, 0x31, 0x4a, 0x8b, 0xf8, 0xa8, 0x5b, 0xc0, - 0xc8, 0xf8, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, - 0xd1, 0x01, 0x50, 0xd5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, - 0x5d, 0x49, 0x6c, 0x60, 0x63, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xdf, 0x64, 0x4b, - 0x1c, 0x02, 0x00, 0x00, + 0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x00, 0x2d, 0x0b, 0xc5, + 0xa5, 0x88, 0x05, 0xd5, 0x20, 0x63, 0x23, 0x2c, 0x6a, 0x58, 0xd1, 0x0c, 0xc2, 0xaa, 0x88, 0x17, + 0xa6, 0x48, 0x91, 0x8b, 0xd3, 0x29, 0x3f, 0x3f, 0x07, 0x8b, 0x12, 0x0e, 0x24, 0x73, 0x82, 0x4b, + 0x8a, 0x32, 0xf3, 0xd2, 0xb1, 0x28, 0xe2, 0x44, 0x72, 0x90, 0x53, 0x65, 0x49, 0x6a, 0x31, 0x16, + 0x35, 0x3c, 0x50, 0x35, 0x4e, 0xf5, 0x5c, 0xc2, 0xc0, 0xd8, 0xd0, 0x43, 0x0b, 0x5d, 0x27, 0xde, + 0x70, 0x68, 0xf0, 0x07, 0x80, 0x44, 0x02, 0x18, 0xa3, 0xb4, 0x88, 0x8f, 0xba, 0x05, 0x8c, 0x8c, + 0x3f, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x18, 0x1d, + 0x00, 0x55, 0xad, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, 0x17, 0x02, 0xd2, 0x95, + 0xc4, 0x06, 0x36, 0xc6, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xdf, 0x64, 0x4b, 0x1c, 0x02, + 0x00, 0x00, } diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d9b..000000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index bcfa19520..000000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae3160..000000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10f4..000000000 --- a/vendor/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea12879a..000000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp b/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp deleted file mode 100644 index fc31f5173..000000000 --- a/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp +++ /dev/null @@ -1,77 +0,0 @@ -/* -To build the snappytool binary: -g++ main.cpp /usr/lib/libsnappy.a -o snappytool -or, if you have built the C++ snappy library from source: -g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool -after running "make" from your snappy checkout directory. -*/ - -#include -#include -#include -#include - -#include "snappy.h" - -#define N 1000000 - -char dst[N]; -char src[N]; - -int main(int argc, char** argv) { - // Parse args. - if (argc != 2) { - fprintf(stderr, "exactly one of -d or -e must be given\n"); - return 1; - } - bool decode = strcmp(argv[1], "-d") == 0; - bool encode = strcmp(argv[1], "-e") == 0; - if (decode == encode) { - fprintf(stderr, "exactly one of -d or -e must be given\n"); - return 1; - } - - // Read all of stdin into src[:s]. - size_t s = 0; - while (1) { - if (s == N) { - fprintf(stderr, "input too large\n"); - return 1; - } - ssize_t n = read(0, src+s, N-s); - if (n == 0) { - break; - } - if (n < 0) { - fprintf(stderr, "read error: %s\n", strerror(errno)); - // TODO: handle EAGAIN, EINTR? - return 1; - } - s += n; - } - - // Encode or decode src[:s] to dst[:d], and write to stdout. - size_t d = 0; - if (encode) { - if (N < snappy::MaxCompressedLength(s)) { - fprintf(stderr, "input too large after encoding\n"); - return 1; - } - snappy::RawCompress(src, s, dst, &d); - } else { - if (!snappy::GetUncompressedLength(src, s, &d)) { - fprintf(stderr, "could not get uncompressed length\n"); - return 1; - } - if (N < d) { - fprintf(stderr, "input too large after decoding\n"); - return 1; - } - if (!snappy::RawUncompress(src, s, dst)) { - fprintf(stderr, "input was not valid Snappy-compressed data\n"); - return 1; - } - } - write(1, dst, d); - return 0; -} diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 72efb0353..000000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4], true) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.decoded[:n], false) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err - } - } -} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go deleted file mode 100644 index fcd192b84..000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f65e..000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 8c9f2049b..000000000 --- a/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] - } - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 874968906..000000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer than can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go deleted file mode 100644 index 2a56fb504..000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) \ No newline at end of file diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979fe..000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index dbcae905e..000000000 --- a/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/golang/snappy/golden_test.go b/vendor/github.com/golang/snappy/golden_test.go deleted file mode 100644 index e4496f92e..000000000 --- a/vendor/github.com/golang/snappy/golden_test.go +++ /dev/null @@ -1,1965 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -// extendMatchGoldenTestCases is the i and j arguments, and the returned value, -// for every extendMatch call issued when encoding the -// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the -// extendMatch implementation. -// -// It was generated manually by adding some print statements to the (pure Go) -// extendMatch implementation: -// -// func extendMatch(src []byte, i, j int) int { -// i0, j0 := i, j -// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { -// } -// println("{", i0, ",", j0, ",", j, "},") -// return j -// } -// -// and running "go test -test.run=EncodeGoldenInput -tags=noasm". -var extendMatchGoldenTestCases = []struct { - i, j, want int -}{ - {11, 61, 62}, - {80, 81, 82}, - {86, 87, 101}, - {85, 133, 149}, - {152, 153, 162}, - {133, 168, 193}, - {168, 207, 225}, - {81, 255, 275}, - {278, 279, 283}, - {306, 417, 417}, - {373, 428, 430}, - {389, 444, 447}, - {474, 510, 512}, - {465, 533, 533}, - {47, 547, 547}, - {307, 551, 554}, - {420, 582, 587}, - {309, 604, 604}, - {604, 625, 625}, - {538, 629, 629}, - {328, 640, 640}, - {573, 645, 645}, - {319, 657, 657}, - {30, 664, 664}, - {45, 679, 680}, - {621, 684, 684}, - {376, 700, 700}, - {33, 707, 708}, - {601, 733, 733}, - {334, 744, 745}, - {625, 758, 759}, - {382, 763, 763}, - {550, 769, 771}, - {533, 789, 789}, - {804, 813, 813}, - {342, 841, 842}, - {742, 847, 847}, - {74, 852, 852}, - {810, 864, 864}, - {758, 868, 869}, - {714, 883, 883}, - {582, 889, 891}, - {61, 934, 935}, - {894, 942, 942}, - {939, 949, 949}, - {785, 956, 957}, - {886, 978, 978}, - {792, 998, 998}, - {998, 1005, 1005}, - {572, 1032, 1032}, - {698, 1051, 1053}, - {599, 1067, 1069}, - {1056, 1079, 1079}, - {942, 1089, 1090}, - {831, 1094, 1096}, - {1088, 1100, 1103}, - {732, 1113, 1114}, - {1037, 1118, 1118}, - {872, 1128, 1130}, - {1079, 1140, 1142}, - {332, 1162, 1162}, - {207, 1168, 1186}, - {1189, 1190, 1225}, - {105, 1229, 1230}, - {79, 1256, 1257}, - {1190, 1261, 1283}, - {255, 1306, 1306}, - {1319, 1339, 1358}, - {364, 1370, 1370}, - {955, 1378, 1380}, - {122, 1403, 1403}, - {1325, 1407, 1419}, - {664, 1423, 1424}, - {941, 1461, 1463}, - {867, 1477, 1478}, - {757, 1488, 1489}, - {1140, 1499, 1499}, - {31, 1506, 1506}, - {1487, 1510, 1512}, - {1089, 1520, 1521}, - {1467, 1525, 1529}, - {1394, 1537, 1537}, - {1499, 1541, 1541}, - {367, 1558, 1558}, - {1475, 1564, 1564}, - {1525, 1568, 1571}, - {1541, 1582, 1583}, - {864, 1587, 1588}, - {704, 1597, 1597}, - {336, 1602, 1602}, - {1383, 1613, 1613}, - {1498, 1617, 1618}, - {1051, 1623, 1625}, - {401, 1643, 1645}, - {1072, 1654, 1655}, - {1067, 1667, 1669}, - {699, 1673, 1674}, - {1587, 1683, 1684}, - {920, 1696, 1696}, - {1505, 1710, 1710}, - {1550, 1723, 1723}, - {996, 1727, 1727}, - {833, 1733, 1734}, - {1638, 1739, 1740}, - {1654, 1744, 1744}, - {753, 1761, 1761}, - {1548, 1773, 1773}, - {1568, 1777, 1780}, - {1683, 1793, 1794}, - {948, 1801, 1801}, - {1666, 1805, 1808}, - {1502, 1814, 1814}, - {1696, 1822, 1822}, - {502, 1836, 1837}, - {917, 1843, 1843}, - {1733, 1854, 1855}, - {970, 1859, 1859}, - {310, 1863, 1863}, - {657, 1872, 1872}, - {1005, 1876, 1876}, - {1662, 1880, 1880}, - {904, 1892, 1892}, - {1427, 1910, 1910}, - {1772, 1929, 1930}, - {1822, 1937, 1940}, - {1858, 1949, 1950}, - {1602, 1956, 1956}, - {1150, 1962, 1962}, - {1504, 1966, 1967}, - {51, 1971, 1971}, - {1605, 1979, 1979}, - {1458, 1983, 1988}, - {1536, 2001, 2006}, - {1373, 2014, 2018}, - {1494, 2025, 2025}, - {1667, 2029, 2031}, - {1592, 2035, 2035}, - {330, 2045, 2045}, - {1376, 2053, 2053}, - {1991, 2058, 2059}, - {1635, 2065, 2065}, - {1992, 2073, 2074}, - {2014, 2080, 2081}, - {1546, 2085, 2087}, - {59, 2099, 2099}, - {1996, 2106, 2106}, - {1836, 2110, 2110}, - {2068, 2114, 2114}, - {1338, 2122, 2122}, - {1562, 2128, 2130}, - {1934, 2134, 2134}, - {2114, 2141, 2142}, - {977, 2149, 2150}, - {956, 2154, 2155}, - {1407, 2162, 2162}, - {1773, 2166, 2166}, - {883, 2171, 2171}, - {623, 2175, 2178}, - {1520, 2191, 2192}, - {1162, 2200, 2200}, - {912, 2204, 2204}, - {733, 2208, 2208}, - {1777, 2212, 2215}, - {1532, 2219, 2219}, - {718, 2223, 2225}, - {2069, 2229, 2229}, - {2207, 2245, 2246}, - {1139, 2264, 2264}, - {677, 2274, 2274}, - {2099, 2279, 2279}, - {1863, 2283, 2283}, - {1966, 2305, 2306}, - {2279, 2313, 2313}, - {1628, 2319, 2319}, - {755, 2329, 2329}, - {1461, 2334, 2334}, - {2117, 2340, 2340}, - {2313, 2349, 2349}, - {1859, 2353, 2353}, - {1048, 2362, 2362}, - {895, 2366, 2366}, - {2278, 2373, 2373}, - {1884, 2377, 2377}, - {1402, 2387, 2392}, - {700, 2398, 2398}, - {1971, 2402, 2402}, - {2009, 2419, 2419}, - {1441, 2426, 2428}, - {2208, 2432, 2432}, - {2038, 2436, 2436}, - {932, 2443, 2443}, - {1759, 2447, 2448}, - {744, 2452, 2452}, - {1875, 2458, 2458}, - {2405, 2468, 2468}, - {1596, 2472, 2473}, - {1953, 2480, 2482}, - {736, 2487, 2487}, - {1913, 2493, 2493}, - {774, 2497, 2497}, - {1484, 2506, 2508}, - {2432, 2512, 2512}, - {752, 2519, 2519}, - {2497, 2523, 2523}, - {2409, 2528, 2529}, - {2122, 2533, 2533}, - {2396, 2537, 2538}, - {2410, 2547, 2548}, - {1093, 2555, 2560}, - {551, 2564, 2565}, - {2268, 2569, 2569}, - {1362, 2580, 2580}, - {1916, 2584, 2585}, - {994, 2589, 2590}, - {1979, 2596, 2596}, - {1041, 2602, 2602}, - {2104, 2614, 2616}, - {2609, 2621, 2628}, - {2329, 2638, 2638}, - {2211, 2657, 2658}, - {2638, 2662, 2667}, - {2578, 2676, 2679}, - {2153, 2685, 2686}, - {2608, 2696, 2697}, - {598, 2712, 2712}, - {2620, 2719, 2720}, - {1888, 2724, 2728}, - {2709, 2732, 2732}, - {1365, 2739, 2739}, - {784, 2747, 2748}, - {424, 2753, 2753}, - {2204, 2759, 2759}, - {812, 2768, 2769}, - {2455, 2773, 2773}, - {1722, 2781, 2781}, - {1917, 2792, 2792}, - {2705, 2799, 2799}, - {2685, 2806, 2807}, - {2742, 2811, 2811}, - {1370, 2818, 2818}, - {2641, 2830, 2830}, - {2512, 2837, 2837}, - {2457, 2841, 2841}, - {2756, 2845, 2845}, - {2719, 2855, 2855}, - {1423, 2859, 2859}, - {2849, 2863, 2865}, - {1474, 2871, 2871}, - {1161, 2875, 2876}, - {2282, 2880, 2881}, - {2746, 2888, 2888}, - {1783, 2893, 2893}, - {2401, 2899, 2900}, - {2632, 2920, 2923}, - {2422, 2928, 2930}, - {2715, 2939, 2939}, - {2162, 2943, 2943}, - {2859, 2947, 2947}, - {1910, 2951, 2951}, - {1431, 2955, 2956}, - {1439, 2964, 2964}, - {2501, 2968, 2969}, - {2029, 2973, 2976}, - {689, 2983, 2984}, - {1658, 2988, 2988}, - {1031, 2996, 2996}, - {2149, 3001, 3002}, - {25, 3009, 3013}, - {2964, 3023, 3023}, - {953, 3027, 3028}, - {2359, 3036, 3036}, - {3023, 3049, 3049}, - {2880, 3055, 3056}, - {2973, 3076, 3077}, - {2874, 3090, 3090}, - {2871, 3094, 3094}, - {2532, 3100, 3100}, - {2938, 3107, 3108}, - {350, 3115, 3115}, - {2196, 3119, 3121}, - {1133, 3127, 3129}, - {1797, 3134, 3150}, - {3032, 3158, 3158}, - {3016, 3172, 3172}, - {2533, 3179, 3179}, - {3055, 3187, 3188}, - {1384, 3192, 3193}, - {2799, 3199, 3199}, - {2126, 3203, 3207}, - {2334, 3215, 3215}, - {2105, 3220, 3221}, - {3199, 3229, 3229}, - {2891, 3233, 3233}, - {855, 3240, 3240}, - {1852, 3253, 3256}, - {2140, 3263, 3263}, - {1682, 3268, 3270}, - {3243, 3274, 3274}, - {924, 3279, 3279}, - {2212, 3283, 3283}, - {2596, 3287, 3287}, - {2999, 3291, 3291}, - {2353, 3295, 3295}, - {2480, 3302, 3304}, - {1959, 3308, 3311}, - {3000, 3318, 3318}, - {845, 3330, 3330}, - {2283, 3334, 3334}, - {2519, 3342, 3342}, - {3325, 3346, 3348}, - {2397, 3353, 3354}, - {2763, 3358, 3358}, - {3198, 3363, 3364}, - {3211, 3368, 3372}, - {2950, 3376, 3377}, - {3245, 3388, 3391}, - {2264, 3398, 3398}, - {795, 3403, 3403}, - {3287, 3407, 3407}, - {3358, 3411, 3411}, - {3317, 3415, 3415}, - {3232, 3431, 3431}, - {2128, 3435, 3437}, - {3236, 3441, 3441}, - {3398, 3445, 3446}, - {2814, 3450, 3450}, - {3394, 3466, 3466}, - {2425, 3470, 3470}, - {3330, 3476, 3476}, - {1612, 3480, 3480}, - {1004, 3485, 3486}, - {2732, 3490, 3490}, - {1117, 3494, 3495}, - {629, 3501, 3501}, - {3087, 3514, 3514}, - {684, 3518, 3518}, - {3489, 3522, 3524}, - {1760, 3529, 3529}, - {617, 3537, 3537}, - {3431, 3541, 3541}, - {997, 3547, 3547}, - {882, 3552, 3553}, - {2419, 3558, 3558}, - {610, 3562, 3563}, - {1903, 3567, 3569}, - {3005, 3575, 3575}, - {3076, 3585, 3586}, - {3541, 3590, 3590}, - {3490, 3594, 3594}, - {1899, 3599, 3599}, - {3545, 3606, 3606}, - {3290, 3614, 3615}, - {2056, 3619, 3620}, - {3556, 3625, 3625}, - {3294, 3632, 3633}, - {637, 3643, 3644}, - {3609, 3648, 3650}, - {3175, 3658, 3658}, - {3498, 3665, 3665}, - {1597, 3669, 3669}, - {1983, 3673, 3673}, - {3215, 3682, 3682}, - {3544, 3689, 3689}, - {3694, 3698, 3698}, - {3228, 3715, 3716}, - {2594, 3720, 3722}, - {3573, 3726, 3726}, - {2479, 3732, 3735}, - {3191, 3741, 3742}, - {1113, 3746, 3747}, - {2844, 3751, 3751}, - {3445, 3756, 3757}, - {3755, 3766, 3766}, - {3421, 3775, 3780}, - {3593, 3784, 3786}, - {3263, 3796, 3796}, - {3469, 3806, 3806}, - {2602, 3815, 3815}, - {723, 3819, 3821}, - {1608, 3826, 3826}, - {3334, 3830, 3830}, - {2198, 3835, 3835}, - {2635, 3840, 3840}, - {3702, 3852, 3853}, - {3406, 3858, 3859}, - {3681, 3867, 3870}, - {3407, 3880, 3880}, - {340, 3889, 3889}, - {3772, 3893, 3893}, - {593, 3897, 3897}, - {2563, 3914, 3916}, - {2981, 3929, 3929}, - {1835, 3933, 3934}, - {3906, 3951, 3951}, - {1459, 3958, 3958}, - {3889, 3974, 3974}, - {2188, 3982, 3982}, - {3220, 3986, 3987}, - {3585, 3991, 3993}, - {3712, 3997, 4001}, - {2805, 4007, 4007}, - {1879, 4012, 4013}, - {3618, 4018, 4018}, - {1145, 4031, 4032}, - {3901, 4037, 4037}, - {2772, 4046, 4047}, - {2802, 4053, 4054}, - {3299, 4058, 4058}, - {3725, 4066, 4066}, - {2271, 4070, 4070}, - {385, 4075, 4076}, - {3624, 4089, 4090}, - {3745, 4096, 4098}, - {1563, 4102, 4102}, - {4045, 4106, 4111}, - {3696, 4115, 4119}, - {3376, 4125, 4126}, - {1880, 4130, 4130}, - {2048, 4140, 4141}, - {2724, 4149, 4149}, - {1767, 4156, 4156}, - {2601, 4164, 4164}, - {2757, 4168, 4168}, - {3974, 4172, 4172}, - {3914, 4178, 4178}, - {516, 4185, 4185}, - {1032, 4189, 4190}, - {3462, 4197, 4198}, - {3805, 4202, 4203}, - {3910, 4207, 4212}, - {3075, 4221, 4221}, - {3756, 4225, 4226}, - {1872, 4236, 4237}, - {3844, 4241, 4241}, - {3991, 4245, 4249}, - {2203, 4258, 4258}, - {3903, 4267, 4268}, - {705, 4272, 4272}, - {1896, 4276, 4276}, - {1955, 4285, 4288}, - {3746, 4302, 4303}, - {2672, 4311, 4311}, - {3969, 4317, 4317}, - {3883, 4322, 4322}, - {1920, 4339, 4340}, - {3527, 4344, 4346}, - {1160, 4358, 4358}, - {3648, 4364, 4366}, - {2711, 4387, 4387}, - {3619, 4391, 4392}, - {1944, 4396, 4396}, - {4369, 4400, 4400}, - {2736, 4404, 4407}, - {2546, 4411, 4412}, - {4390, 4422, 4422}, - {3610, 4426, 4427}, - {4058, 4431, 4431}, - {4374, 4435, 4435}, - {3463, 4445, 4446}, - {1813, 4452, 4452}, - {3669, 4456, 4456}, - {3830, 4460, 4460}, - {421, 4464, 4465}, - {1719, 4471, 4471}, - {3880, 4475, 4475}, - {1834, 4485, 4487}, - {3590, 4491, 4491}, - {442, 4496, 4497}, - {4435, 4501, 4501}, - {3814, 4509, 4509}, - {987, 4513, 4513}, - {4494, 4518, 4521}, - {3218, 4526, 4529}, - {4221, 4537, 4537}, - {2778, 4543, 4545}, - {4422, 4552, 4552}, - {4031, 4558, 4559}, - {4178, 4563, 4563}, - {3726, 4567, 4574}, - {4027, 4578, 4578}, - {4339, 4585, 4587}, - {3796, 4592, 4595}, - {543, 4600, 4613}, - {2855, 4620, 4621}, - {2795, 4627, 4627}, - {3440, 4631, 4632}, - {4279, 4636, 4639}, - {4245, 4643, 4645}, - {4516, 4649, 4650}, - {3133, 4654, 4654}, - {4042, 4658, 4659}, - {3422, 4663, 4663}, - {4046, 4667, 4668}, - {4267, 4672, 4672}, - {4004, 4676, 4677}, - {2490, 4682, 4682}, - {2451, 4697, 4697}, - {3027, 4705, 4705}, - {4028, 4717, 4717}, - {4460, 4721, 4721}, - {2471, 4725, 4727}, - {3090, 4735, 4735}, - {3192, 4739, 4740}, - {3835, 4760, 4760}, - {4540, 4764, 4764}, - {4007, 4772, 4774}, - {619, 4784, 4784}, - {3561, 4789, 4791}, - {3367, 4805, 4805}, - {4490, 4810, 4811}, - {2402, 4815, 4815}, - {3352, 4819, 4822}, - {2773, 4828, 4828}, - {4552, 4832, 4832}, - {2522, 4840, 4841}, - {316, 4847, 4852}, - {4715, 4858, 4858}, - {2959, 4862, 4862}, - {4858, 4868, 4869}, - {2134, 4873, 4873}, - {578, 4878, 4878}, - {4189, 4889, 4890}, - {2229, 4894, 4894}, - {4501, 4898, 4898}, - {2297, 4903, 4903}, - {2933, 4909, 4909}, - {3008, 4913, 4913}, - {3153, 4917, 4917}, - {4819, 4921, 4921}, - {4921, 4932, 4933}, - {4920, 4944, 4945}, - {4814, 4954, 4955}, - {576, 4966, 4966}, - {1854, 4970, 4971}, - {1374, 4975, 4976}, - {3307, 4980, 4980}, - {974, 4984, 4988}, - {4721, 4992, 4992}, - {4898, 4996, 4996}, - {4475, 5006, 5006}, - {3819, 5012, 5012}, - {1948, 5019, 5021}, - {4954, 5027, 5029}, - {3740, 5038, 5040}, - {4763, 5044, 5045}, - {1936, 5051, 5051}, - {4844, 5055, 5060}, - {4215, 5069, 5072}, - {1146, 5076, 5076}, - {3845, 5082, 5082}, - {4865, 5090, 5090}, - {4624, 5094, 5094}, - {4815, 5098, 5098}, - {5006, 5105, 5105}, - {4980, 5109, 5109}, - {4795, 5113, 5115}, - {5043, 5119, 5121}, - {4782, 5129, 5129}, - {3826, 5139, 5139}, - {3876, 5156, 5156}, - {3111, 5167, 5171}, - {1470, 5177, 5177}, - {4431, 5181, 5181}, - {546, 5189, 5189}, - {4225, 5193, 5193}, - {1672, 5199, 5201}, - {4207, 5205, 5209}, - {4220, 5216, 5217}, - {4658, 5224, 5225}, - {3295, 5235, 5235}, - {2436, 5239, 5239}, - {2349, 5246, 5246}, - {2175, 5250, 5250}, - {5180, 5257, 5258}, - {3161, 5263, 5263}, - {5105, 5272, 5272}, - {3552, 5282, 5282}, - {4944, 5299, 5300}, - {4130, 5312, 5313}, - {902, 5323, 5323}, - {913, 5327, 5327}, - {2987, 5333, 5334}, - {5150, 5344, 5344}, - {5249, 5348, 5348}, - {1965, 5358, 5359}, - {5330, 5364, 5364}, - {2012, 5373, 5377}, - {712, 5384, 5386}, - {5235, 5390, 5390}, - {5044, 5398, 5399}, - {564, 5406, 5406}, - {39, 5410, 5410}, - {4642, 5422, 5425}, - {4421, 5437, 5438}, - {2347, 5449, 5449}, - {5333, 5453, 5454}, - {4136, 5458, 5459}, - {3793, 5468, 5468}, - {2243, 5480, 5480}, - {4889, 5492, 5493}, - {4295, 5504, 5504}, - {2785, 5511, 5511}, - {2377, 5518, 5518}, - {3662, 5525, 5525}, - {5097, 5529, 5530}, - {4781, 5537, 5538}, - {4697, 5547, 5548}, - {436, 5552, 5553}, - {5542, 5558, 5558}, - {3692, 5562, 5562}, - {2696, 5568, 5569}, - {4620, 5578, 5578}, - {2898, 5590, 5590}, - {5557, 5596, 5618}, - {2797, 5623, 5625}, - {2792, 5629, 5629}, - {5243, 5633, 5633}, - {5348, 5637, 5637}, - {5547, 5643, 5643}, - {4296, 5654, 5655}, - {5568, 5662, 5662}, - {3001, 5670, 5671}, - {3794, 5679, 5679}, - {4006, 5685, 5686}, - {4969, 5690, 5692}, - {687, 5704, 5704}, - {4563, 5708, 5708}, - {1723, 5738, 5738}, - {649, 5742, 5742}, - {5163, 5748, 5755}, - {3907, 5759, 5759}, - {3074, 5764, 5764}, - {5326, 5771, 5771}, - {2951, 5776, 5776}, - {5181, 5780, 5780}, - {2614, 5785, 5788}, - {4709, 5794, 5794}, - {2784, 5799, 5799}, - {5518, 5803, 5803}, - {4155, 5812, 5815}, - {921, 5819, 5819}, - {5224, 5823, 5824}, - {2853, 5830, 5836}, - {5776, 5840, 5840}, - {2955, 5844, 5845}, - {5745, 5853, 5853}, - {3291, 5857, 5857}, - {2988, 5861, 5861}, - {2647, 5865, 5865}, - {5398, 5869, 5870}, - {1085, 5874, 5875}, - {4906, 5881, 5881}, - {802, 5886, 5886}, - {5119, 5890, 5893}, - {5802, 5899, 5900}, - {3415, 5904, 5904}, - {5629, 5908, 5908}, - {3714, 5912, 5914}, - {5558, 5921, 5921}, - {2710, 5927, 5928}, - {1094, 5932, 5934}, - {2653, 5940, 5941}, - {4735, 5954, 5954}, - {5861, 5958, 5958}, - {1040, 5971, 5971}, - {5514, 5977, 5977}, - {5048, 5981, 5982}, - {5953, 5992, 5993}, - {3751, 5997, 5997}, - {4991, 6001, 6002}, - {5885, 6006, 6007}, - {5529, 6011, 6012}, - {4974, 6019, 6020}, - {5857, 6024, 6024}, - {3483, 6032, 6032}, - {3594, 6036, 6036}, - {1997, 6040, 6040}, - {5997, 6044, 6047}, - {5197, 6051, 6051}, - {1764, 6055, 6055}, - {6050, 6059, 6059}, - {5239, 6063, 6063}, - {5049, 6067, 6067}, - {5957, 6073, 6074}, - {1022, 6078, 6078}, - {3414, 6083, 6084}, - {3809, 6090, 6090}, - {4562, 6095, 6096}, - {5878, 6104, 6104}, - {594, 6108, 6109}, - {3353, 6115, 6116}, - {4992, 6120, 6121}, - {2424, 6125, 6125}, - {4484, 6130, 6130}, - {3900, 6134, 6135}, - {5793, 6139, 6141}, - {3562, 6145, 6145}, - {1438, 6152, 6153}, - {6058, 6157, 6158}, - {4411, 6162, 6163}, - {4590, 6167, 6171}, - {4748, 6175, 6175}, - {5517, 6183, 6184}, - {6095, 6191, 6192}, - {1471, 6203, 6203}, - {2643, 6209, 6210}, - {450, 6220, 6220}, - {5266, 6226, 6226}, - {2576, 6233, 6233}, - {2607, 6239, 6240}, - {5164, 6244, 6251}, - {6054, 6255, 6255}, - {1789, 6260, 6261}, - {5250, 6265, 6265}, - {6062, 6273, 6278}, - {5990, 6282, 6282}, - {3283, 6286, 6286}, - {5436, 6290, 6290}, - {6059, 6294, 6294}, - {5668, 6298, 6300}, - {3072, 6324, 6329}, - {3132, 6338, 6339}, - {3246, 6343, 6344}, - {28, 6348, 6349}, - {1503, 6353, 6355}, - {6067, 6359, 6359}, - {3384, 6364, 6364}, - {545, 6375, 6376}, - {5803, 6380, 6380}, - {5522, 6384, 6385}, - {5908, 6389, 6389}, - {2796, 6393, 6396}, - {4831, 6403, 6404}, - {6388, 6412, 6412}, - {6005, 6417, 6420}, - {4450, 6430, 6430}, - {4050, 6435, 6435}, - {5372, 6441, 6441}, - {4378, 6447, 6447}, - {6199, 6452, 6452}, - {3026, 6456, 6456}, - {2642, 6460, 6462}, - {6392, 6470, 6470}, - {6459, 6474, 6474}, - {2829, 6487, 6488}, - {2942, 6499, 6504}, - {5069, 6508, 6511}, - {5341, 6515, 6516}, - {5853, 6521, 6525}, - {6104, 6531, 6531}, - {5759, 6535, 6538}, - {4672, 6542, 6543}, - {2443, 6550, 6550}, - {5109, 6554, 6554}, - {6494, 6558, 6560}, - {6006, 6570, 6572}, - {6424, 6576, 6580}, - {4693, 6591, 6592}, - {6439, 6596, 6597}, - {3179, 6601, 6601}, - {5299, 6606, 6607}, - {4148, 6612, 6613}, - {3774, 6617, 6617}, - {3537, 6623, 6624}, - {4975, 6628, 6629}, - {3848, 6636, 6636}, - {856, 6640, 6640}, - {5724, 6645, 6645}, - {6632, 6651, 6651}, - {4630, 6656, 6658}, - {1440, 6662, 6662}, - {4281, 6666, 6667}, - {4302, 6671, 6672}, - {2589, 6676, 6677}, - {5647, 6681, 6687}, - {6082, 6691, 6693}, - {6144, 6698, 6698}, - {6103, 6709, 6710}, - {3710, 6714, 6714}, - {4253, 6718, 6721}, - {2467, 6730, 6730}, - {4778, 6734, 6734}, - {6528, 6738, 6738}, - {4358, 6747, 6747}, - {5889, 6753, 6753}, - {5193, 6757, 6757}, - {5797, 6761, 6761}, - {3858, 6765, 6766}, - {5951, 6776, 6776}, - {6487, 6781, 6782}, - {3282, 6786, 6787}, - {4667, 6797, 6799}, - {1927, 6803, 6806}, - {6583, 6810, 6810}, - {4937, 6814, 6814}, - {6099, 6824, 6824}, - {4415, 6835, 6836}, - {6332, 6840, 6841}, - {5160, 6850, 6850}, - {4764, 6854, 6854}, - {6814, 6858, 6859}, - {3018, 6864, 6864}, - {6293, 6868, 6869}, - {6359, 6877, 6877}, - {3047, 6884, 6886}, - {5262, 6890, 6891}, - {5471, 6900, 6900}, - {3268, 6910, 6912}, - {1047, 6916, 6916}, - {5904, 6923, 6923}, - {5798, 6933, 6938}, - {4149, 6942, 6942}, - {1821, 6946, 6946}, - {3599, 6952, 6952}, - {6470, 6957, 6957}, - {5562, 6961, 6961}, - {6268, 6965, 6967}, - {6389, 6971, 6971}, - {6596, 6975, 6976}, - {6553, 6980, 6981}, - {6576, 6985, 6989}, - {1375, 6993, 6993}, - {652, 6998, 6998}, - {4876, 7002, 7003}, - {5768, 7011, 7013}, - {3973, 7017, 7017}, - {6802, 7025, 7025}, - {6955, 7034, 7036}, - {6974, 7040, 7040}, - {5944, 7044, 7044}, - {6992, 7048, 7054}, - {6872, 7059, 7059}, - {2943, 7063, 7063}, - {6923, 7067, 7067}, - {5094, 7071, 7071}, - {4873, 7075, 7075}, - {5819, 7079, 7079}, - {5945, 7085, 7085}, - {1540, 7090, 7091}, - {2090, 7095, 7095}, - {5024, 7104, 7105}, - {6900, 7109, 7109}, - {6024, 7113, 7114}, - {6000, 7118, 7120}, - {2187, 7124, 7125}, - {6760, 7129, 7130}, - {5898, 7134, 7136}, - {7032, 7144, 7144}, - {4271, 7148, 7148}, - {3706, 7152, 7152}, - {6970, 7156, 7157}, - {7088, 7161, 7163}, - {2718, 7168, 7169}, - {5674, 7175, 7175}, - {4631, 7182, 7182}, - {7070, 7188, 7189}, - {6220, 7196, 7196}, - {3458, 7201, 7202}, - {2041, 7211, 7212}, - {1454, 7216, 7216}, - {5199, 7225, 7227}, - {3529, 7234, 7234}, - {6890, 7238, 7238}, - {3815, 7242, 7243}, - {5490, 7250, 7253}, - {6554, 7257, 7263}, - {5890, 7267, 7269}, - {6877, 7273, 7273}, - {4877, 7277, 7277}, - {2502, 7285, 7285}, - {1483, 7289, 7295}, - {7210, 7304, 7308}, - {6845, 7313, 7316}, - {7219, 7320, 7320}, - {7001, 7325, 7329}, - {6853, 7333, 7334}, - {6120, 7338, 7338}, - {6606, 7342, 7343}, - {7020, 7348, 7350}, - {3509, 7354, 7354}, - {7133, 7359, 7363}, - {3434, 7371, 7374}, - {2787, 7384, 7384}, - {7044, 7388, 7388}, - {6960, 7394, 7395}, - {6676, 7399, 7400}, - {7161, 7404, 7404}, - {7285, 7417, 7418}, - {4558, 7425, 7426}, - {4828, 7430, 7430}, - {6063, 7436, 7436}, - {3597, 7442, 7442}, - {914, 7446, 7446}, - {7320, 7452, 7454}, - {7267, 7458, 7460}, - {5076, 7464, 7464}, - {7430, 7468, 7469}, - {6273, 7473, 7474}, - {7440, 7478, 7487}, - {7348, 7491, 7494}, - {1021, 7510, 7510}, - {7473, 7515, 7515}, - {2823, 7519, 7519}, - {6264, 7527, 7527}, - {7302, 7531, 7531}, - {7089, 7535, 7535}, - {7342, 7540, 7541}, - {3688, 7547, 7551}, - {3054, 7558, 7560}, - {4177, 7566, 7567}, - {6691, 7574, 7575}, - {7156, 7585, 7586}, - {7147, 7590, 7592}, - {7407, 7598, 7598}, - {7403, 7602, 7603}, - {6868, 7607, 7607}, - {6636, 7611, 7611}, - {4805, 7617, 7617}, - {5779, 7623, 7623}, - {7063, 7627, 7627}, - {5079, 7632, 7632}, - {7377, 7637, 7637}, - {7337, 7641, 7642}, - {6738, 7655, 7655}, - {7338, 7659, 7659}, - {6541, 7669, 7671}, - {595, 7675, 7675}, - {7658, 7679, 7680}, - {7647, 7685, 7686}, - {2477, 7690, 7690}, - {5823, 7694, 7694}, - {4156, 7699, 7699}, - {5931, 7703, 7706}, - {6854, 7712, 7712}, - {4931, 7718, 7718}, - {6979, 7722, 7722}, - {5085, 7727, 7727}, - {6965, 7732, 7732}, - {7201, 7736, 7737}, - {3639, 7741, 7743}, - {7534, 7749, 7749}, - {4292, 7753, 7753}, - {3427, 7759, 7763}, - {7273, 7767, 7767}, - {940, 7778, 7778}, - {4838, 7782, 7785}, - {4216, 7790, 7792}, - {922, 7800, 7801}, - {7256, 7810, 7811}, - {7789, 7815, 7819}, - {7225, 7823, 7825}, - {7531, 7829, 7829}, - {6997, 7833, 7833}, - {7757, 7837, 7838}, - {4129, 7842, 7842}, - {7333, 7848, 7849}, - {6776, 7855, 7855}, - {7527, 7859, 7859}, - {4370, 7863, 7863}, - {4512, 7868, 7868}, - {5679, 7880, 7880}, - {3162, 7884, 7885}, - {3933, 7892, 7894}, - {7804, 7899, 7902}, - {6363, 7906, 7907}, - {7848, 7911, 7912}, - {5584, 7917, 7921}, - {874, 7926, 7926}, - {3342, 7930, 7930}, - {4507, 7935, 7937}, - {3672, 7943, 7944}, - {7911, 7948, 7949}, - {6402, 7956, 7956}, - {7940, 7960, 7960}, - {7113, 7964, 7964}, - {1073, 7968, 7968}, - {7740, 7974, 7974}, - {7601, 7978, 7982}, - {6797, 7987, 7988}, - {3528, 7994, 7995}, - {5483, 7999, 7999}, - {5717, 8011, 8011}, - {5480, 8017, 8017}, - {7770, 8023, 8030}, - {2452, 8034, 8034}, - {5282, 8047, 8047}, - {7967, 8051, 8051}, - {1128, 8058, 8066}, - {6348, 8070, 8070}, - {8055, 8077, 8077}, - {7925, 8081, 8086}, - {6810, 8090, 8090}, - {5051, 8101, 8101}, - {4696, 8109, 8110}, - {5129, 8119, 8119}, - {4449, 8123, 8123}, - {7222, 8127, 8127}, - {4649, 8131, 8134}, - {7994, 8138, 8138}, - {5954, 8148, 8148}, - {475, 8152, 8153}, - {7906, 8157, 8157}, - {7458, 8164, 8166}, - {7632, 8171, 8173}, - {3874, 8177, 8183}, - {4391, 8187, 8187}, - {561, 8191, 8191}, - {2417, 8195, 8195}, - {2357, 8204, 8204}, - {2269, 8216, 8218}, - {3968, 8222, 8222}, - {2200, 8226, 8227}, - {3453, 8247, 8247}, - {2439, 8251, 8252}, - {7175, 8257, 8257}, - {976, 8262, 8264}, - {4953, 8273, 8273}, - {4219, 8278, 8278}, - {6, 8285, 8291}, - {5703, 8295, 8296}, - {5272, 8300, 8300}, - {8037, 8304, 8304}, - {8186, 8314, 8314}, - {8304, 8318, 8318}, - {8051, 8326, 8326}, - {8318, 8330, 8330}, - {2671, 8334, 8335}, - {2662, 8339, 8339}, - {8081, 8349, 8350}, - {3328, 8356, 8356}, - {2879, 8360, 8362}, - {8050, 8370, 8371}, - {8330, 8375, 8376}, - {8375, 8386, 8386}, - {4961, 8390, 8390}, - {1017, 8403, 8405}, - {3533, 8416, 8416}, - {4555, 8422, 8422}, - {6445, 8426, 8426}, - {8169, 8432, 8432}, - {990, 8436, 8436}, - {4102, 8440, 8440}, - {7398, 8444, 8446}, - {3480, 8450, 8450}, - {6324, 8462, 8462}, - {7948, 8466, 8467}, - {5950, 8471, 8471}, - {5189, 8476, 8476}, - {4026, 8490, 8490}, - {8374, 8494, 8495}, - {4682, 8501, 8501}, - {7387, 8506, 8506}, - {8164, 8510, 8515}, - {4079, 8524, 8524}, - {8360, 8529, 8531}, - {7446, 8540, 8543}, - {7971, 8547, 8548}, - {4311, 8552, 8552}, - {5204, 8556, 8557}, - {7968, 8562, 8562}, - {7847, 8571, 8573}, - {8547, 8577, 8577}, - {5320, 8581, 8581}, - {8556, 8585, 8586}, - {8504, 8590, 8590}, - {7669, 8602, 8604}, - {5874, 8608, 8609}, - {5828, 8613, 8613}, - {7998, 8617, 8617}, - {8519, 8625, 8625}, - {7250, 8637, 8637}, - {426, 8641, 8641}, - {8436, 8645, 8645}, - {5986, 8649, 8656}, - {8157, 8660, 8660}, - {7182, 8665, 8665}, - {8421, 8675, 8675}, - {8509, 8681, 8681}, - {5137, 8688, 8689}, - {8625, 8694, 8695}, - {5228, 8701, 8702}, - {6661, 8714, 8714}, - {1010, 8719, 8719}, - {6648, 8723, 8723}, - {3500, 8728, 8728}, - {2442, 8735, 8735}, - {8494, 8740, 8741}, - {8171, 8753, 8755}, - {7242, 8763, 8764}, - {4739, 8768, 8769}, - {7079, 8773, 8773}, - {8386, 8777, 8777}, - {8624, 8781, 8787}, - {661, 8791, 8794}, - {8631, 8801, 8801}, - {7753, 8805, 8805}, - {4783, 8809, 8810}, - {1673, 8814, 8815}, - {6623, 8819, 8819}, - {4404, 8823, 8823}, - {8089, 8827, 8828}, - {8773, 8832, 8832}, - {5394, 8836, 8836}, - {6231, 8841, 8843}, - {1015, 8852, 8853}, - {6873, 8857, 8857}, - {6289, 8865, 8865}, - {8577, 8869, 8869}, - {8114, 8873, 8875}, - {8534, 8883, 8883}, - {3007, 8887, 8888}, - {8827, 8892, 8893}, - {4788, 8897, 8900}, - {5698, 8906, 8907}, - {7690, 8911, 8911}, - {6643, 8919, 8919}, - {7206, 8923, 8924}, - {7866, 8929, 8931}, - {8880, 8942, 8942}, - {8630, 8951, 8952}, - {6027, 8958, 8958}, - {7749, 8966, 8967}, - {4932, 8972, 8973}, - {8892, 8980, 8981}, - {634, 9003, 9003}, - {8109, 9007, 9008}, - {8777, 9012, 9012}, - {3981, 9016, 9017}, - {5723, 9025, 9025}, - {7662, 9034, 9038}, - {8955, 9042, 9042}, - {8070, 9060, 9062}, - {8910, 9066, 9066}, - {5363, 9070, 9071}, - {7699, 9075, 9076}, - {8991, 9081, 9081}, - {6850, 9085, 9085}, - {5811, 9092, 9094}, - {9079, 9098, 9102}, - {6456, 9106, 9106}, - {2259, 9111, 9111}, - {4752, 9116, 9116}, - {9060, 9120, 9123}, - {8090, 9127, 9127}, - {5305, 9131, 9132}, - {8623, 9137, 9137}, - {7417, 9141, 9141}, - {6564, 9148, 9149}, - {9126, 9157, 9158}, - {4285, 9169, 9170}, - {8698, 9174, 9174}, - {8869, 9178, 9178}, - {2572, 9182, 9183}, - {6482, 9188, 9190}, - {9181, 9201, 9201}, - {2968, 9208, 9209}, - {2506, 9213, 9215}, - {9127, 9219, 9219}, - {7910, 9225, 9227}, - {5422, 9235, 9239}, - {8813, 9244, 9246}, - {9178, 9250, 9250}, - {8748, 9255, 9255}, - {7354, 9265, 9265}, - {7767, 9269, 9269}, - {7710, 9281, 9283}, - {8826, 9288, 9290}, - {861, 9295, 9295}, - {4482, 9301, 9301}, - {9264, 9305, 9306}, - {8805, 9310, 9310}, - {4995, 9314, 9314}, - {6730, 9318, 9318}, - {7457, 9328, 9328}, - {2547, 9335, 9336}, - {6298, 9340, 9343}, - {9305, 9353, 9354}, - {9269, 9358, 9358}, - {6338, 9370, 9370}, - {7289, 9376, 9379}, - {5780, 9383, 9383}, - {7607, 9387, 9387}, - {2065, 9392, 9392}, - {7238, 9396, 9396}, - {8856, 9400, 9400}, - {8069, 9412, 9413}, - {611, 9420, 9420}, - {7071, 9424, 9424}, - {3089, 9430, 9431}, - {7117, 9435, 9438}, - {1976, 9445, 9445}, - {6640, 9449, 9449}, - {5488, 9453, 9453}, - {8739, 9457, 9459}, - {5958, 9466, 9466}, - {7985, 9470, 9470}, - {8735, 9475, 9475}, - {5009, 9479, 9479}, - {8073, 9483, 9484}, - {2328, 9490, 9491}, - {9250, 9495, 9495}, - {4043, 9502, 9502}, - {7712, 9506, 9506}, - {9012, 9510, 9510}, - {9028, 9514, 9515}, - {2190, 9521, 9524}, - {9029, 9528, 9528}, - {9519, 9532, 9532}, - {9495, 9536, 9536}, - {8527, 9540, 9540}, - {2137, 9550, 9550}, - {8419, 9557, 9557}, - {9383, 9561, 9562}, - {8970, 9575, 9578}, - {8911, 9582, 9582}, - {7828, 9595, 9596}, - {6180, 9600, 9600}, - {8738, 9604, 9607}, - {7540, 9611, 9612}, - {9599, 9616, 9618}, - {9187, 9623, 9623}, - {9294, 9628, 9629}, - {4536, 9639, 9639}, - {3867, 9643, 9643}, - {6305, 9648, 9648}, - {1617, 9654, 9657}, - {5762, 9666, 9666}, - {8314, 9670, 9670}, - {9666, 9674, 9675}, - {9506, 9679, 9679}, - {9669, 9685, 9686}, - {9683, 9690, 9690}, - {8763, 9697, 9698}, - {7468, 9702, 9702}, - {460, 9707, 9707}, - {3115, 9712, 9712}, - {9424, 9716, 9717}, - {7359, 9721, 9724}, - {7547, 9728, 9729}, - {7151, 9733, 9738}, - {7627, 9742, 9742}, - {2822, 9747, 9747}, - {8247, 9751, 9753}, - {9550, 9758, 9758}, - {7585, 9762, 9763}, - {1002, 9767, 9767}, - {7168, 9772, 9773}, - {6941, 9777, 9780}, - {9728, 9784, 9786}, - {9770, 9792, 9796}, - {6411, 9801, 9802}, - {3689, 9806, 9808}, - {9575, 9814, 9816}, - {7025, 9820, 9821}, - {2776, 9826, 9826}, - {9806, 9830, 9830}, - {9820, 9834, 9835}, - {9800, 9839, 9847}, - {9834, 9851, 9852}, - {9829, 9856, 9862}, - {1400, 9866, 9866}, - {3197, 9870, 9871}, - {9851, 9875, 9876}, - {9742, 9883, 9884}, - {3362, 9888, 9889}, - {9883, 9893, 9893}, - {5711, 9899, 9910}, - {7806, 9915, 9915}, - {9120, 9919, 9919}, - {9715, 9925, 9934}, - {2580, 9938, 9938}, - {4907, 9942, 9944}, - {6239, 9953, 9954}, - {6961, 9963, 9963}, - {5295, 9967, 9968}, - {1915, 9972, 9973}, - {3426, 9983, 9985}, - {9875, 9994, 9995}, - {6942, 9999, 9999}, - {6621, 10005, 10005}, - {7589, 10010, 10012}, - {9286, 10020, 10020}, - {838, 10024, 10024}, - {9980, 10028, 10031}, - {9994, 10035, 10041}, - {2702, 10048, 10051}, - {2621, 10059, 10059}, - {10054, 10065, 10065}, - {8612, 10073, 10074}, - {7033, 10078, 10078}, - {916, 10082, 10082}, - {10035, 10086, 10087}, - {8613, 10097, 10097}, - {9919, 10107, 10108}, - {6133, 10114, 10115}, - {10059, 10119, 10119}, - {10065, 10126, 10127}, - {7732, 10131, 10131}, - {7155, 10135, 10136}, - {6728, 10140, 10140}, - {6162, 10144, 10145}, - {4724, 10150, 10150}, - {1665, 10154, 10154}, - {10126, 10163, 10163}, - {9783, 10168, 10168}, - {1715, 10172, 10173}, - {7152, 10177, 10182}, - {8760, 10187, 10187}, - {7829, 10191, 10191}, - {9679, 10196, 10196}, - {9369, 10201, 10201}, - {2928, 10206, 10208}, - {6951, 10214, 10217}, - {5633, 10221, 10221}, - {7199, 10225, 10225}, - {10118, 10230, 10231}, - {9999, 10235, 10236}, - {10045, 10240, 10249}, - {5565, 10256, 10256}, - {9866, 10261, 10261}, - {10163, 10268, 10268}, - {9869, 10272, 10272}, - {9789, 10276, 10283}, - {10235, 10287, 10288}, - {10214, 10298, 10299}, - {6971, 10303, 10303}, - {3346, 10307, 10307}, - {10185, 10311, 10312}, - {9993, 10318, 10320}, - {2779, 10332, 10334}, - {1726, 10338, 10338}, - {741, 10354, 10360}, - {10230, 10372, 10373}, - {10260, 10384, 10385}, - {10131, 10389, 10398}, - {6946, 10406, 10409}, - {10158, 10413, 10420}, - {10123, 10424, 10424}, - {6157, 10428, 10429}, - {4518, 10434, 10434}, - {9893, 10438, 10438}, - {9865, 10442, 10446}, - {7558, 10454, 10454}, - {10434, 10460, 10460}, - {10064, 10466, 10468}, - {2703, 10472, 10474}, - {9751, 10478, 10479}, - {6714, 10485, 10485}, - {8020, 10490, 10490}, - {10303, 10494, 10494}, - {3521, 10499, 10500}, - {9281, 10513, 10515}, - {6028, 10519, 10523}, - {9387, 10527, 10527}, - {7614, 10531, 10531}, - {3611, 10536, 10536}, - {9162, 10540, 10540}, - {10081, 10546, 10547}, - {10034, 10560, 10562}, - {6726, 10567, 10571}, - {8237, 10575, 10575}, - {10438, 10579, 10583}, - {10140, 10587, 10587}, - {5784, 10592, 10592}, - {9819, 10597, 10600}, - {10567, 10604, 10608}, - {9335, 10613, 10613}, - {8300, 10617, 10617}, - {10575, 10621, 10621}, - {9678, 10625, 10626}, - {9962, 10632, 10633}, - {10535, 10637, 10638}, - {8199, 10642, 10642}, - {10372, 10647, 10648}, - {10637, 10656, 10657}, - {10579, 10667, 10668}, - {10465, 10677, 10680}, - {6702, 10684, 10685}, - {10073, 10691, 10692}, - {4505, 10696, 10697}, - {9042, 10701, 10701}, - {6460, 10705, 10706}, - {10010, 10714, 10716}, - {10656, 10720, 10722}, - {7282, 10727, 10729}, - {2327, 10733, 10733}, - {2491, 10740, 10741}, - {10704, 10748, 10750}, - {6465, 10754, 10754}, - {10647, 10758, 10759}, - {10424, 10763, 10763}, - {10748, 10776, 10776}, - {10546, 10780, 10781}, - {10758, 10785, 10786}, - {10287, 10790, 10797}, - {10785, 10801, 10807}, - {10240, 10811, 10826}, - {9509, 10830, 10830}, - {2579, 10836, 10838}, - {9801, 10843, 10845}, - {7555, 10849, 10850}, - {10776, 10860, 10865}, - {8023, 10869, 10869}, - {10046, 10876, 10884}, - {10253, 10888, 10892}, - {9941, 10897, 10897}, - {7898, 10901, 10905}, - {6725, 10909, 10913}, - {10757, 10921, 10923}, - {10160, 10931, 10931}, - {10916, 10935, 10942}, - {10261, 10946, 10946}, - {10318, 10952, 10954}, - {5911, 10959, 10961}, - {10801, 10965, 10966}, - {10946, 10970, 10977}, - {10592, 10982, 10984}, - {9913, 10988, 10990}, - {8510, 10994, 10996}, - {9419, 11000, 11001}, - {6765, 11006, 11007}, - {10725, 11011, 11011}, - {5537, 11017, 11019}, - {9208, 11024, 11025}, - {5850, 11030, 11030}, - {9610, 11034, 11036}, - {8846, 11041, 11047}, - {9697, 11051, 11051}, - {1622, 11055, 11058}, - {2370, 11062, 11062}, - {8393, 11067, 11067}, - {9756, 11071, 11071}, - {10172, 11076, 11076}, - {27, 11081, 11081}, - {7357, 11087, 11092}, - {8151, 11104, 11106}, - {6115, 11110, 11110}, - {10667, 11114, 11115}, - {11099, 11121, 11123}, - {10705, 11127, 11127}, - {8938, 11131, 11131}, - {11114, 11135, 11136}, - {1390, 11140, 11141}, - {10964, 11146, 11148}, - {11140, 11152, 11155}, - {9813, 11159, 11166}, - {624, 11171, 11172}, - {3118, 11177, 11179}, - {11029, 11184, 11186}, - {10186, 11190, 11190}, - {10306, 11196, 11196}, - {8665, 11201, 11201}, - {7382, 11205, 11205}, - {1100, 11210, 11210}, - {2337, 11216, 11217}, - {1609, 11221, 11223}, - {5763, 11228, 11229}, - {5220, 11233, 11233}, - {11061, 11241, 11241}, - {10617, 11246, 11246}, - {11190, 11250, 11251}, - {10144, 11255, 11256}, - {11232, 11260, 11260}, - {857, 11264, 11265}, - {10994, 11269, 11271}, - {3879, 11280, 11281}, - {11184, 11287, 11289}, - {9611, 11293, 11295}, - {11250, 11299, 11299}, - {4495, 11304, 11304}, - {7574, 11308, 11309}, - {9814, 11315, 11317}, - {1713, 11321, 11324}, - {1905, 11328, 11328}, - {8745, 11335, 11340}, - {8883, 11351, 11351}, - {8119, 11358, 11358}, - {1842, 11363, 11364}, - {11237, 11368, 11368}, - {8814, 11373, 11374}, - {5684, 11378, 11378}, - {11011, 11382, 11382}, - {6520, 11389, 11389}, - {11183, 11393, 11396}, - {1790, 11404, 11404}, - {9536, 11408, 11408}, - {11298, 11418, 11419}, - {3929, 11425, 11425}, - {5588, 11429, 11429}, - {8476, 11436, 11436}, - {4096, 11440, 11442}, - {11084, 11446, 11454}, - {10603, 11458, 11463}, - {7332, 11472, 11474}, - {7611, 11483, 11486}, - {4836, 11490, 11491}, - {10024, 11495, 11495}, - {4917, 11501, 11506}, - {6486, 11510, 11512}, - {11269, 11516, 11518}, - {3603, 11522, 11525}, - {11126, 11535, 11535}, - {11418, 11539, 11541}, - {11408, 11545, 11545}, - {9021, 11549, 11552}, - {6745, 11557, 11557}, - {5118, 11561, 11564}, - {7590, 11568, 11569}, - {4426, 11573, 11578}, - {9790, 11582, 11583}, - {6447, 11587, 11587}, - {10229, 11591, 11594}, - {10457, 11598, 11598}, - {10168, 11604, 11604}, - {10543, 11608, 11608}, - {7404, 11612, 11612}, - {11127, 11616, 11616}, - {3337, 11620, 11620}, - {11501, 11624, 11628}, - {4543, 11633, 11635}, - {8449, 11642, 11642}, - {4943, 11646, 11648}, - {10526, 11652, 11654}, - {11620, 11659, 11659}, - {8927, 11664, 11669}, - {532, 11673, 11673}, - {10513, 11677, 11679}, - {10428, 11683, 11683}, - {10999, 11689, 11690}, - {9469, 11695, 11695}, - {3606, 11699, 11699}, - {9560, 11708, 11709}, - {1564, 11714, 11714}, - {10527, 11718, 11718}, - {3071, 11723, 11726}, - {11590, 11731, 11732}, - {6605, 11737, 11737}, - {11624, 11741, 11745}, - {7822, 11749, 11752}, - {5269, 11757, 11758}, - {1339, 11767, 11767}, - {1363, 11771, 11773}, - {3704, 11777, 11777}, - {10952, 11781, 11783}, - {6764, 11793, 11795}, - {8675, 11800, 11800}, - {9963, 11804, 11804}, - {11573, 11808, 11809}, - {9548, 11813, 11813}, - {11591, 11817, 11818}, - {11446, 11822, 11822}, - {9224, 11828, 11828}, - {3158, 11836, 11836}, - {10830, 11840, 11840}, - {7234, 11846, 11846}, - {11299, 11850, 11850}, - {11544, 11854, 11855}, - {11498, 11859, 11859}, - {10993, 11865, 11868}, - {9720, 11872, 11878}, - {10489, 11882, 11890}, - {11712, 11898, 11904}, - {11516, 11908, 11910}, - {11568, 11914, 11915}, - {10177, 11919, 11924}, - {11363, 11928, 11929}, - {10494, 11933, 11933}, - {9870, 11937, 11938}, - {9427, 11942, 11942}, - {11481, 11949, 11949}, - {6030, 11955, 11957}, - {11718, 11961, 11961}, - {10531, 11965, 11983}, - {5126, 11987, 11987}, - {7515, 11991, 11991}, - {10646, 11996, 11997}, - {2947, 12001, 12001}, - {9582, 12009, 12010}, - {6202, 12017, 12018}, - {11714, 12022, 12022}, - {9235, 12033, 12037}, - {9721, 12041, 12044}, - {11932, 12051, 12052}, - {12040, 12056, 12056}, - {12051, 12060, 12060}, - {11601, 12066, 12066}, - {8426, 12070, 12070}, - {4053, 12077, 12077}, - {4262, 12081, 12081}, - {9761, 12086, 12088}, - {11582, 12092, 12093}, - {10965, 12097, 12098}, - {11803, 12103, 12104}, - {11933, 12108, 12109}, - {10688, 12117, 12117}, - {12107, 12125, 12126}, - {6774, 12130, 12132}, - {6286, 12137, 12137}, - {9543, 12141, 12141}, - {12097, 12145, 12146}, - {10790, 12150, 12150}, - {10125, 12154, 12156}, - {12125, 12164, 12164}, - {12064, 12168, 12172}, - {10811, 12178, 12188}, - {12092, 12192, 12193}, - {10058, 12197, 12198}, - {11611, 12211, 12212}, - {3459, 12216, 12216}, - {10291, 12225, 12228}, - {12191, 12232, 12234}, - {12145, 12238, 12238}, - {12001, 12242, 12250}, - {3840, 12255, 12255}, - {12216, 12259, 12259}, - {674, 12272, 12272}, - {12141, 12276, 12276}, - {10766, 12280, 12280}, - {11545, 12284, 12284}, - {6496, 12290, 12290}, - {11381, 12294, 12295}, - {603, 12302, 12303}, - {12276, 12308, 12308}, - {11850, 12313, 12314}, - {565, 12319, 12319}, - {9351, 12324, 12324}, - {11822, 12328, 12328}, - {2691, 12333, 12334}, - {11840, 12338, 12338}, - {11070, 12343, 12343}, - {9510, 12347, 12347}, - {11024, 12352, 12353}, - {7173, 12359, 12359}, - {517, 12363, 12363}, - {6311, 12367, 12368}, - {11367, 12372, 12373}, - {12008, 12377, 12377}, - {11372, 12382, 12384}, - {11358, 12391, 12392}, - {11382, 12396, 12396}, - {6882, 12400, 12401}, - {11246, 12405, 12405}, - {8359, 12409, 12412}, - {10154, 12418, 12418}, - {12016, 12425, 12426}, - {8972, 12434, 12435}, - {10478, 12439, 12440}, - {12395, 12449, 12449}, - {11612, 12454, 12454}, - {12347, 12458, 12458}, - {10700, 12466, 12467}, - {3637, 12471, 12476}, - {1042, 12480, 12481}, - {6747, 12488, 12488}, - {12396, 12492, 12493}, - {9420, 12497, 12497}, - {11285, 12501, 12510}, - {4470, 12515, 12515}, - {9374, 12519, 12519}, - {11293, 12528, 12528}, - {2058, 12534, 12535}, - {6521, 12539, 12539}, - {12492, 12543, 12543}, - {3043, 12547, 12547}, - {2982, 12551, 12553}, - {11030, 12557, 12563}, - {7636, 12568, 12568}, - {9639, 12572, 12572}, - {12543, 12576, 12576}, - {5989, 12580, 12583}, - {11051, 12587, 12587}, - {1061, 12592, 12594}, - {12313, 12599, 12601}, - {11846, 12605, 12605}, - {12576, 12609, 12609}, - {11040, 12618, 12625}, - {12479, 12629, 12629}, - {6903, 12633, 12633}, - {12322, 12639, 12639}, - {12253, 12643, 12645}, - {5594, 12651, 12651}, - {12522, 12655, 12655}, - {11703, 12659, 12659}, - {1377, 12665, 12665}, - {8022, 12669, 12669}, - {12280, 12674, 12674}, - {9023, 12680, 12681}, - {12328, 12685, 12685}, - {3085, 12689, 12693}, - {4700, 12698, 12698}, - {10224, 12702, 12702}, - {8781, 12706, 12706}, - {1651, 12710, 12710}, - {12458, 12714, 12714}, - {12005, 12718, 12721}, - {11908, 12725, 12726}, - {8202, 12733, 12733}, - {11708, 12739, 12740}, - {12599, 12744, 12745}, - {12284, 12749, 12749}, - {5285, 12756, 12756}, - {12055, 12775, 12777}, - {6919, 12782, 12782}, - {12242, 12786, 12786}, - {12009, 12790, 12790}, - {9628, 12794, 12796}, - {11354, 12801, 12802}, - {10225, 12806, 12807}, - {579, 12813, 12813}, - {8935, 12817, 12822}, - {8753, 12827, 12829}, - {11006, 12835, 12835}, - {858, 12841, 12845}, - {476, 12849, 12849}, - {7667, 12854, 12854}, - {12760, 12860, 12871}, - {11677, 12875, 12877}, - {12714, 12881, 12881}, - {12731, 12885, 12890}, - {7108, 12894, 12896}, - {1165, 12900, 12900}, - {4021, 12906, 12906}, - {10829, 12910, 12911}, - {12331, 12915, 12915}, - {8887, 12919, 12921}, - {11639, 12925, 12925}, - {7964, 12929, 12929}, - {12528, 12937, 12937}, - {8148, 12941, 12941}, - {12770, 12948, 12950}, - {12609, 12954, 12954}, - {12685, 12958, 12958}, - {2803, 12962, 12962}, - {9561, 12966, 12966}, - {6671, 12972, 12973}, - {12056, 12977, 12977}, - {6380, 12981, 12981}, - {12048, 12985, 12985}, - {11961, 12989, 12993}, - {3368, 12997, 12999}, - {6634, 13004, 13004}, - {6775, 13009, 13010}, - {12136, 13014, 13019}, - {10341, 13023, 13023}, - {13002, 13027, 13027}, - {10587, 13031, 13031}, - {10307, 13035, 13035}, - {12736, 13039, 13039}, - {12744, 13043, 13044}, - {6175, 13048, 13048}, - {9702, 13053, 13054}, - {662, 13059, 13061}, - {12718, 13065, 13068}, - {12893, 13072, 13075}, - {8299, 13086, 13091}, - {12604, 13095, 13096}, - {12848, 13100, 13101}, - {12749, 13105, 13105}, - {12526, 13109, 13114}, - {9173, 13122, 13122}, - {12769, 13128, 13128}, - {13038, 13132, 13132}, - {12725, 13136, 13137}, - {12639, 13146, 13146}, - {9711, 13150, 13151}, - {12137, 13155, 13155}, - {13039, 13159, 13159}, - {4681, 13163, 13164}, - {12954, 13168, 13168}, - {13158, 13175, 13176}, - {13105, 13180, 13180}, - {10754, 13184, 13184}, - {13167, 13188, 13188}, - {12658, 13192, 13192}, - {4294, 13199, 13200}, - {11682, 13204, 13205}, - {11695, 13209, 13209}, - {11076, 13214, 13214}, - {12232, 13218, 13218}, - {9399, 13223, 13224}, - {12880, 13228, 13229}, - {13048, 13234, 13234}, - {9701, 13238, 13239}, - {13209, 13243, 13243}, - {3658, 13248, 13248}, - {3698, 13252, 13254}, - {12237, 13260, 13260}, - {8872, 13266, 13266}, - {12957, 13272, 13273}, - {1393, 13281, 13281}, - {2013, 13285, 13288}, - {4244, 13296, 13299}, - {9428, 13303, 13303}, - {12702, 13307, 13307}, - {13078, 13311, 13311}, - {6071, 13315, 13315}, - {3061, 13319, 13319}, - {2051, 13324, 13324}, - {11560, 13328, 13331}, - {6584, 13336, 13336}, - {8482, 13340, 13340}, - {5331, 13344, 13344}, - {4171, 13348, 13348}, - {8501, 13352, 13352}, - {9219, 13356, 13356}, - {9473, 13360, 13363}, - {12881, 13367, 13367}, - {13065, 13371, 13375}, - {2979, 13379, 13384}, - {1518, 13388, 13388}, - {11177, 13392, 13392}, - {9457, 13398, 13398}, - {12293, 13407, 13410}, - {3697, 13414, 13417}, - {10338, 13425, 13425}, - {13367, 13429, 13429}, - {11074, 13433, 13437}, - {4201, 13441, 13443}, - {1812, 13447, 13448}, - {13360, 13452, 13456}, - {13188, 13463, 13463}, - {9732, 13470, 13470}, - {11332, 13477, 13477}, - {9918, 13487, 13487}, - {6337, 13497, 13497}, - {13429, 13501, 13501}, - {11413, 13505, 13505}, - {4685, 13512, 13513}, - {13136, 13517, 13519}, - {7416, 13528, 13530}, - {12929, 13534, 13534}, - {11110, 13539, 13539}, - {11521, 13543, 13543}, - {12825, 13553, 13553}, - {13447, 13557, 13558}, - {12299, 13562, 13563}, - {9003, 13570, 13570}, - {12500, 13577, 13577}, - {13501, 13581, 13581}, - {9392, 13586, 13586}, - {12454, 13590, 13590}, - {6189, 13595, 13595}, - {13053, 13599, 13599}, - {11881, 13604, 13604}, - {13159, 13608, 13608}, - {4894, 13612, 13612}, - {13221, 13621, 13621}, - {8950, 13625, 13625}, - {13533, 13629, 13629}, - {9633, 13633, 13633}, - {7892, 13637, 13639}, - {13581, 13643, 13643}, - {13616, 13647, 13649}, - {12794, 13653, 13654}, - {8919, 13659, 13659}, - {9674, 13663, 13663}, - {13577, 13668, 13668}, - {12966, 13672, 13672}, - {12659, 13676, 13683}, - {6124, 13688, 13688}, - {9225, 13693, 13695}, - {11833, 13702, 13702}, - {12904, 13709, 13717}, - {13647, 13721, 13722}, - {11687, 13726, 13727}, - {12434, 13731, 13732}, - {12689, 13736, 13742}, - {13168, 13746, 13746}, - {6151, 13751, 13752}, - {11821, 13756, 13757}, - {6467, 13764, 13764}, - {5730, 13769, 13769}, - {5136, 13780, 13780}, - {724, 13784, 13785}, - {13517, 13789, 13791}, - {640, 13795, 13796}, - {7721, 13800, 13802}, - {11121, 13806, 13807}, - {5791, 13811, 13815}, - {12894, 13819, 13819}, - {11100, 13824, 13824}, - {7011, 13830, 13830}, - {7129, 13834, 13837}, - {13833, 13841, 13841}, - {11276, 13847, 13847}, - {13621, 13853, 13853}, - {13589, 13862, 13863}, - {12989, 13867, 13867}, - {12789, 13871, 13871}, - {1239, 13875, 13875}, - {4675, 13879, 13881}, - {4686, 13885, 13885}, - {707, 13889, 13889}, - {5449, 13897, 13898}, - {13867, 13902, 13903}, - {10613, 13908, 13908}, - {13789, 13912, 13914}, - {4451, 13918, 13919}, - {9200, 13924, 13924}, - {2011, 13930, 13930}, - {11433, 13934, 13936}, - {4695, 13942, 13943}, - {9435, 13948, 13951}, - {13688, 13955, 13957}, - {11694, 13961, 13962}, - {5712, 13966, 13966}, - {5991, 13970, 13972}, - {13477, 13976, 13976}, - {10213, 13987, 13987}, - {11839, 13991, 13993}, - {12272, 13997, 13997}, - {6206, 14001, 14001}, - {13179, 14006, 14007}, - {2939, 14011, 14011}, - {12972, 14016, 14017}, - {13918, 14021, 14022}, - {7436, 14026, 14027}, - {7678, 14032, 14034}, - {13586, 14040, 14040}, - {13347, 14044, 14044}, - {13109, 14048, 14051}, - {9244, 14055, 14057}, - {13315, 14061, 14061}, - {13276, 14067, 14067}, - {11435, 14073, 14074}, - {13853, 14078, 14078}, - {13452, 14082, 14082}, - {14044, 14087, 14087}, - {4440, 14091, 14095}, - {4479, 14100, 14103}, - {9395, 14107, 14109}, - {6834, 14119, 14119}, - {10458, 14123, 14124}, - {1429, 14129, 14129}, - {8443, 14135, 14135}, - {10365, 14140, 14140}, - {5267, 14145, 14145}, - {11834, 14151, 14153}, -} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index 0cf5e379c..000000000 --- a/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. -// -// The C++ snappy implementation is at https://github.com/google/snappy -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/golang/snappy/snappy_test.go b/vendor/github.com/golang/snappy/snappy_test.go deleted file mode 100644 index 2712710df..000000000 --- a/vendor/github.com/golang/snappy/snappy_test.go +++ /dev/null @@ -1,1353 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "bytes" - "encoding/binary" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "testing" -) - -var ( - download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") - testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data") - benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data") -) - -// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by -// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on -// this GOARCH. There is more than one valid encoding of any given input, and -// there is more than one good algorithm along the frontier of trading off -// throughput for output size. Nonetheless, we presume that the C++ encoder's -// algorithm is a good one and has been tested on a wide range of inputs, so -// matching that exactly should mean that the Go encoder's algorithm is also -// good, without needing to gather our own corpus of test data. -// -// The exact algorithm used by the C++ code is potentially endian dependent, as -// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes -// at a time. The Go implementation is endian agnostic, in that its output is -// the same (as little-endian C++ code), regardless of the CPU's endianness. -// -// Thus, when comparing Go's output to C++ output generated beforehand, such as -// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little- -// endian system, we can run that test regardless of the runtime.GOARCH value. -// -// When comparing Go's output to dynamically generated C++ output, i.e. the -// result of fork/exec'ing a C++ program, we can run that test only on -// little-endian systems, because the C++ output might be different on -// big-endian systems. The runtime package doesn't export endianness per se, -// but we can restrict this match-C++ test to common little-endian systems. -const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" - -func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) { - got := maxEncodedLenOfMaxBlockSize - want := MaxEncodedLen(maxBlockSize) - if got != want { - t.Fatalf("got %d, want %d", got, want) - } -} - -func cmp(a, b []byte) error { - if bytes.Equal(a, b) { - return nil - } - if len(a) != len(b) { - return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) - } - for i := range a { - if a[i] != b[i] { - return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) - } - } - return nil -} - -func roundtrip(b, ebuf, dbuf []byte) error { - d, err := Decode(dbuf, Encode(ebuf, b)) - if err != nil { - return fmt.Errorf("decoding error: %v", err) - } - if err := cmp(d, b); err != nil { - return fmt.Errorf("roundtrip mismatch: %v", err) - } - return nil -} - -func TestEmpty(t *testing.T) { - if err := roundtrip(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestSmallCopy(t *testing.T) { - for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for i := 0; i < 32; i++ { - s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" - if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { - t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) - } - } - } - } -} - -func TestSmallRand(t *testing.T) { - rng := rand.New(rand.NewSource(1)) - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(rng.Intn(256)) - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestSmallRegular(t *testing.T) { - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(i%10 + 'a') - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestInvalidVarint(t *testing.T) { - testCases := []struct { - desc string - input string - }{{ - "invalid varint, final byte has continuation bit set", - "\xff", - }, { - "invalid varint, value overflows uint64", - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00", - }, { - // https://github.com/google/snappy/blob/master/format_description.txt - // says that "the stream starts with the uncompressed length [as a - // varint] (up to a maximum of 2^32 - 1)". - "valid varint (as uint64), but value overflows uint32", - "\x80\x80\x80\x80\x10", - }} - - for _, tc := range testCases { - input := []byte(tc.input) - if _, err := DecodedLen(input); err != ErrCorrupt { - t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err) - } - if _, err := Decode(nil, input); err != ErrCorrupt { - t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err) - } - } -} - -func TestDecode(t *testing.T) { - lit40Bytes := make([]byte, 40) - for i := range lit40Bytes { - lit40Bytes[i] = byte(i) - } - lit40 := string(lit40Bytes) - - testCases := []struct { - desc string - input string - want string - wantErr error - }{{ - `decodedLen=0; valid input`, - "\x00", - "", - nil, - }, { - `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`, - "\x03" + "\x08\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`, - "\x02" + "\x08\xff\xff\xff", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`, - "\x03" + "\x08\xff\xff", - "", - ErrCorrupt, - }, { - `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`, - "\x28" + "\x9c" + lit40, - lit40, - nil, - }, { - `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`, - "\x01" + "\xf0", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`, - "\x03" + "\xf0\x02\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`, - "\x01" + "\xf4\x00", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`, - "\x03" + "\xf4\x02\x00\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`, - "\x01" + "\xf8\x00\x00", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`, - "\x03" + "\xf8\x02\x00\x00\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`, - "\x01" + "\xfc\x00\x00\x00", - "", - ErrCorrupt, - }, { - `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`, - "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`, - "\x04" + "\xfc\x02\x00\x00\x00\xff", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`, - "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`, - "\x04" + "\x01", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`, - "\x04" + "\x02\x00", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`, - "\x04" + "\x03\x00\x00\x00", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`, - "\x04" + "\x0cabcd", - "abcd", - nil, - }, { - `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`, - "\x0d" + "\x0cabcd" + "\x15\x04", - "abcdabcdabcda", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`, - "\x08" + "\x0cabcd" + "\x01\x04", - "abcdabcd", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`, - "\x08" + "\x0cabcd" + "\x01\x02", - "abcdcdcd", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`, - "\x08" + "\x0cabcd" + "\x01\x01", - "abcddddd", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`, - "\x08" + "\x0cabcd" + "\x01\x00", - "", - ErrCorrupt, - }, { - `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`, - "\x09" + "\x0cabcd" + "\x01\x04", - "", - ErrCorrupt, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`, - "\x08" + "\x0cabcd" + "\x01\x05", - "", - ErrCorrupt, - }, { - `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`, - "\x07" + "\x0cabcd" + "\x01\x04", - "", - ErrCorrupt, - }, { - `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`, - "\x06" + "\x0cabcd" + "\x06\x03\x00", - "abcdbc", - nil, - }, { - `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`, - "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00", - "abcdbc", - nil, - }} - - const ( - // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are - // not present in either the input or the output. It is written to dBuf - // to check that Decode does not write bytes past the end of - // dBuf[:dLen]. - // - // The magic number 37 was chosen because it is prime. A more 'natural' - // number like 32 might lead to a false negative if, for example, a - // byte was incorrectly copied 4*8 bytes later. - notPresentBase = 0xa0 - notPresentLen = 37 - ) - - var dBuf [100]byte -loop: - for i, tc := range testCases { - input := []byte(tc.input) - for _, x := range input { - if notPresentBase <= x && x < notPresentBase+notPresentLen { - t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input) - continue loop - } - } - - dLen, n := binary.Uvarint(input) - if n <= 0 { - t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc) - continue - } - if dLen > uint64(len(dBuf)) { - t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen) - continue - } - - for j := range dBuf { - dBuf[j] = byte(notPresentBase + j%notPresentLen) - } - g, gotErr := Decode(dBuf[:], input) - if got := string(g); got != tc.want || gotErr != tc.wantErr { - t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v", - i, tc.desc, got, gotErr, tc.want, tc.wantErr) - continue - } - for j, x := range dBuf { - if uint64(j) < dLen { - continue - } - if w := byte(notPresentBase + j%notPresentLen); x != w { - t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x", - i, tc.desc, j, x, w, dBuf) - continue loop - } - } - } -} - -func TestDecodeCopy4(t *testing.T) { - dots := strings.Repeat(".", 65536) - - input := strings.Join([]string{ - "\x89\x80\x04", // decodedLen = 65545. - "\x0cpqrs", // 4-byte literal "pqrs". - "\xf4\xff\xff" + dots, // 65536-byte literal dots. - "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540. - }, "") - - gotBytes, err := Decode(nil, []byte(input)) - if err != nil { - t.Fatal(err) - } - got := string(gotBytes) - want := "pqrs" + dots + "pqrs." - if len(got) != len(want) { - t.Fatalf("got %d bytes, want %d", len(got), len(want)) - } - if got != want { - for i := 0; i < len(got); i++ { - if g, w := got[i], want[i]; g != w { - t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w) - } - } - } -} - -// TestDecodeLengthOffset tests decoding an encoding of the form literal + -// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB". -func TestDecodeLengthOffset(t *testing.T) { - const ( - prefix = "abcdefghijklmnopqr" - suffix = "ABCDEFGHIJKLMNOPQR" - - // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are - // not present in either the input or the output. It is written to - // gotBuf to check that Decode does not write bytes past the end of - // gotBuf[:totalLen]. - // - // The magic number 37 was chosen because it is prime. A more 'natural' - // number like 32 might lead to a false negative if, for example, a - // byte was incorrectly copied 4*8 bytes later. - notPresentBase = 0xa0 - notPresentLen = 37 - ) - var gotBuf, wantBuf, inputBuf [128]byte - for length := 1; length <= 18; length++ { - for offset := 1; offset <= 18; offset++ { - loop: - for suffixLen := 0; suffixLen <= 18; suffixLen++ { - totalLen := len(prefix) + length + suffixLen - - inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen)) - inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1) - inputLen++ - inputLen += copy(inputBuf[inputLen:], prefix) - inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1) - inputBuf[inputLen+1] = byte(offset) - inputBuf[inputLen+2] = 0x00 - inputLen += 3 - if suffixLen > 0 { - inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1) - inputLen++ - inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen]) - } - input := inputBuf[:inputLen] - - for i := range gotBuf { - gotBuf[i] = byte(notPresentBase + i%notPresentLen) - } - got, err := Decode(gotBuf[:], input) - if err != nil { - t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err) - continue - } - - wantLen := 0 - wantLen += copy(wantBuf[wantLen:], prefix) - for i := 0; i < length; i++ { - wantBuf[wantLen] = wantBuf[wantLen-offset] - wantLen++ - } - wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen]) - want := wantBuf[:wantLen] - - for _, x := range input { - if notPresentBase <= x && x < notPresentBase+notPresentLen { - t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x", - length, offset, suffixLen, x, input) - continue loop - } - } - for i, x := range gotBuf { - if i < totalLen { - continue - } - if w := byte(notPresentBase + i%notPresentLen); x != w { - t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+ - "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x", - length, offset, suffixLen, totalLen, i, x, w, gotBuf) - continue loop - } - } - for _, x := range want { - if notPresentBase <= x && x < notPresentBase+notPresentLen { - t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x", - length, offset, suffixLen, x, want) - continue loop - } - } - - if !bytes.Equal(got, want) { - t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x", - length, offset, suffixLen, input, got, want) - continue - } - } - } - } -} - -const ( - goldenText = "Mark.Twain-Tom.Sawyer.txt" - goldenCompressed = goldenText + ".rawsnappy" -) - -func TestDecodeGoldenInput(t *testing.T) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - got, err := Decode(nil, src) - if err != nil { - t.Fatalf("Decode: %v", err) - } - want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - if err := cmp(got, want); err != nil { - t.Fatal(err) - } -} - -func TestEncodeGoldenInput(t *testing.T) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - got := Encode(nil, src) - want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - if err := cmp(got, want); err != nil { - t.Fatal(err) - } -} - -func TestExtendMatchGoldenInput(t *testing.T) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - for i, tc := range extendMatchGoldenTestCases { - got := extendMatch(src, tc.i, tc.j) - if got != tc.want { - t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)", - i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j) - } - } -} - -func TestExtendMatch(t *testing.T) { - // ref is a simple, reference implementation of extendMatch. - ref := func(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j - } - - nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40} - for yIndex := 40; yIndex > 30; yIndex-- { - xxx := bytes.Repeat([]byte("x"), 40) - if yIndex < len(xxx) { - xxx[yIndex] = 'y' - } - for _, i := range nums { - for _, j := range nums { - if i >= j { - continue - } - got := extendMatch(xxx, i, j) - want := ref(xxx, i, j) - if got != want { - t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want) - } - } - } - } -} - -const snappytoolCmdName = "cmd/snappytool/snappytool" - -func skipTestSameEncodingAsCpp() (msg string) { - if !goEncoderShouldMatchCppEncoder { - return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH) - } - if _, err := os.Stat(snappytoolCmdName); err != nil { - return fmt.Sprintf("could not find snappytool: %v", err) - } - return "" -} - -func runTestSameEncodingAsCpp(src []byte) error { - got := Encode(nil, src) - - cmd := exec.Command(snappytoolCmdName, "-e") - cmd.Stdin = bytes.NewReader(src) - want, err := cmd.Output() - if err != nil { - return fmt.Errorf("could not run snappytool: %v", err) - } - return cmp(got, want) -} - -func TestSameEncodingAsCppShortCopies(t *testing.T) { - if msg := skipTestSameEncodingAsCpp(); msg != "" { - t.Skip(msg) - } - src := bytes.Repeat([]byte{'a'}, 20) - for i := 0; i <= len(src); i++ { - if err := runTestSameEncodingAsCpp(src[:i]); err != nil { - t.Errorf("i=%d: %v", i, err) - } - } -} - -func TestSameEncodingAsCppLongFiles(t *testing.T) { - if msg := skipTestSameEncodingAsCpp(); msg != "" { - t.Skip(msg) - } - bDir := filepath.FromSlash(*benchdataDir) - failed := false - for i, tf := range testFiles { - if err := downloadBenchmarkFiles(t, tf.filename); err != nil { - t.Fatalf("failed to download testdata: %s", err) - } - data := readFile(t, filepath.Join(bDir, tf.filename)) - if n := tf.sizeLimit; 0 < n && n < len(data) { - data = data[:n] - } - if err := runTestSameEncodingAsCpp(data); err != nil { - t.Errorf("i=%d: %v", i, err) - failed = true - } - } - if failed { - t.Errorf("was the snappytool program built against the C++ snappy library version " + - "d53de187 or later, commited on 2016-04-05? See " + - "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc") - } -} - -// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm -// described in decode_amd64.s and its claim of a 10 byte overrun worst case. -func TestSlowForwardCopyOverrun(t *testing.T) { - const base = 100 - - for length := 1; length < 18; length++ { - for offset := 1; offset < 18; offset++ { - highWaterMark := base - d := base - l := length - o := offset - - // makeOffsetAtLeast8 - for o < 8 { - if end := d + 8; highWaterMark < end { - highWaterMark = end - } - l -= o - d += o - o += o - } - - // fixUpSlowForwardCopy - a := d - d += l - - // finishSlowForwardCopy - for l > 0 { - if end := a + 8; highWaterMark < end { - highWaterMark = end - } - a += 8 - l -= 8 - } - - dWant := base + length - overrun := highWaterMark - dWant - if d != dWant || overrun < 0 || 10 < overrun { - t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])", - length, offset, d, overrun, dWant) - } - } - } -} - -// TestEncodeNoiseThenRepeats encodes input for which the first half is very -// incompressible and the second half is very compressible. The encoded form's -// length should be closer to 50% of the original length than 100%. -func TestEncodeNoiseThenRepeats(t *testing.T) { - for _, origLen := range []int{256 * 1024, 2048 * 1024} { - src := make([]byte, origLen) - rng := rand.New(rand.NewSource(1)) - firstHalf, secondHalf := src[:origLen/2], src[origLen/2:] - for i := range firstHalf { - firstHalf[i] = uint8(rng.Intn(256)) - } - for i := range secondHalf { - secondHalf[i] = uint8(i >> 8) - } - dst := Encode(nil, src) - if got, want := len(dst), origLen*3/4; got >= want { - t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want) - } - } -} - -func TestFramingFormat(t *testing.T) { - // src is comprised of alternating 1e5-sized sequences of random - // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen - // because it is larger than maxBlockSize (64k). - src := make([]byte, 1e6) - rng := rand.New(rand.NewSource(1)) - for i := 0; i < 10; i++ { - if i%2 == 0 { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(rng.Intn(256)) - } - } else { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(i) - } - } - } - - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(src); err != nil { - t.Fatalf("Write: encoding: %v", err) - } - dst, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Fatalf("ReadAll: decoding: %v", err) - } - if err := cmp(dst, src); err != nil { - t.Fatal(err) - } -} - -func TestWriterGoldenOutput(t *testing.T) { - buf := new(bytes.Buffer) - w := NewBufferedWriter(buf) - defer w.Close() - w.Write([]byte("abcd")) // Not compressible. - w.Flush() - w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible. - w.Flush() - // The next chunk is also compressible, but a naive, greedy encoding of the - // overall length 67 copy as a length 64 copy (the longest expressible as a - // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte - // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4 - // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2 - // (of length 60) and a 2-byte tagCopy1 (of length 7). - w.Write(bytes.Repeat([]byte{'B'}, 68)) - w.Write([]byte("efC")) // Not compressible. - w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible. - w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible. - w.Write([]byte("g")) // Not compressible. - w.Flush() - - got := buf.String() - want := strings.Join([]string{ - magicChunk, - "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum). - "\x68\x10\xe6\xb6", // Checksum. - "\x61\x62\x63\x64", // Uncompressed payload: "abcd". - "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum). - "\x5f\xeb\xf2\x10", // Checksum. - "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150. - "\x00\x41", // Compressed payload: tagLiteral, length=1, "A". - "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. - "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. - "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1. - "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum). - "\x30\x85\x69\xeb", // Checksum. - "\x70", // Compressed payload: Uncompressed length (varint encoded): 112. - "\x00\x42", // Compressed payload: tagLiteral, length=1, "B". - "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1. - "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1. - "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC". - "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1. - "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90. - "\x00\x67", // Compressed payload: tagLiteral, length=1, "g". - }, "") - if got != want { - t.Fatalf("\ngot: % x\nwant: % x", got, want) - } -} - -func TestEmitLiteral(t *testing.T) { - testCases := []struct { - length int - want string - }{ - {1, "\x00"}, - {2, "\x04"}, - {59, "\xe8"}, - {60, "\xec"}, - {61, "\xf0\x3c"}, - {62, "\xf0\x3d"}, - {254, "\xf0\xfd"}, - {255, "\xf0\xfe"}, - {256, "\xf0\xff"}, - {257, "\xf4\x00\x01"}, - {65534, "\xf4\xfd\xff"}, - {65535, "\xf4\xfe\xff"}, - {65536, "\xf4\xff\xff"}, - } - - dst := make([]byte, 70000) - nines := bytes.Repeat([]byte{0x99}, 65536) - for _, tc := range testCases { - lit := nines[:tc.length] - n := emitLiteral(dst, lit) - if !bytes.HasSuffix(dst[:n], lit) { - t.Errorf("length=%d: did not end with that many literal bytes", tc.length) - continue - } - got := string(dst[:n-tc.length]) - if got != tc.want { - t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want) - continue - } - } -} - -func TestEmitCopy(t *testing.T) { - testCases := []struct { - offset int - length int - want string - }{ - {8, 04, "\x01\x08"}, - {8, 11, "\x1d\x08"}, - {8, 12, "\x2e\x08\x00"}, - {8, 13, "\x32\x08\x00"}, - {8, 59, "\xea\x08\x00"}, - {8, 60, "\xee\x08\x00"}, - {8, 61, "\xf2\x08\x00"}, - {8, 62, "\xf6\x08\x00"}, - {8, 63, "\xfa\x08\x00"}, - {8, 64, "\xfe\x08\x00"}, - {8, 65, "\xee\x08\x00\x05\x08"}, - {8, 66, "\xee\x08\x00\x09\x08"}, - {8, 67, "\xee\x08\x00\x0d\x08"}, - {8, 68, "\xfe\x08\x00\x01\x08"}, - {8, 69, "\xfe\x08\x00\x05\x08"}, - {8, 80, "\xfe\x08\x00\x3e\x08\x00"}, - - {256, 04, "\x21\x00"}, - {256, 11, "\x3d\x00"}, - {256, 12, "\x2e\x00\x01"}, - {256, 13, "\x32\x00\x01"}, - {256, 59, "\xea\x00\x01"}, - {256, 60, "\xee\x00\x01"}, - {256, 61, "\xf2\x00\x01"}, - {256, 62, "\xf6\x00\x01"}, - {256, 63, "\xfa\x00\x01"}, - {256, 64, "\xfe\x00\x01"}, - {256, 65, "\xee\x00\x01\x25\x00"}, - {256, 66, "\xee\x00\x01\x29\x00"}, - {256, 67, "\xee\x00\x01\x2d\x00"}, - {256, 68, "\xfe\x00\x01\x21\x00"}, - {256, 69, "\xfe\x00\x01\x25\x00"}, - {256, 80, "\xfe\x00\x01\x3e\x00\x01"}, - - {2048, 04, "\x0e\x00\x08"}, - {2048, 11, "\x2a\x00\x08"}, - {2048, 12, "\x2e\x00\x08"}, - {2048, 13, "\x32\x00\x08"}, - {2048, 59, "\xea\x00\x08"}, - {2048, 60, "\xee\x00\x08"}, - {2048, 61, "\xf2\x00\x08"}, - {2048, 62, "\xf6\x00\x08"}, - {2048, 63, "\xfa\x00\x08"}, - {2048, 64, "\xfe\x00\x08"}, - {2048, 65, "\xee\x00\x08\x12\x00\x08"}, - {2048, 66, "\xee\x00\x08\x16\x00\x08"}, - {2048, 67, "\xee\x00\x08\x1a\x00\x08"}, - {2048, 68, "\xfe\x00\x08\x0e\x00\x08"}, - {2048, 69, "\xfe\x00\x08\x12\x00\x08"}, - {2048, 80, "\xfe\x00\x08\x3e\x00\x08"}, - } - - dst := make([]byte, 1024) - for _, tc := range testCases { - n := emitCopy(dst, tc.offset, tc.length) - got := string(dst[:n]) - if got != tc.want { - t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want) - } - } -} - -func TestNewBufferedWriter(t *testing.T) { - // Test all 32 possible sub-sequences of these 5 input slices. - // - // Their lengths sum to 400,000, which is over 6 times the Writer ibuf - // capacity: 6 * maxBlockSize is 393,216. - inputs := [][]byte{ - bytes.Repeat([]byte{'a'}, 40000), - bytes.Repeat([]byte{'b'}, 150000), - bytes.Repeat([]byte{'c'}, 60000), - bytes.Repeat([]byte{'d'}, 120000), - bytes.Repeat([]byte{'e'}, 30000), - } -loop: - for i := 0; i < 1< 0; { - i := copy(x, src) - x = x[i:] - } - return dst -} - -func benchWords(b *testing.B, n int, decode bool) { - // Note: the file is OS-language dependent so the resulting values are not - // directly comparable for non-US-English OS installations. - data := expand(readFile(b, "/usr/share/dict/words"), n) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) } -func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) } -func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } -func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } -func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } -func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } -func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) } -func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) } -func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } -func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } -func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } -func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } - -func BenchmarkRandomEncode(b *testing.B) { - rng := rand.New(rand.NewSource(1)) - data := make([]byte, 1<<20) - for i := range data { - data[i] = uint8(rng.Intn(256)) - } - benchEncode(b, data) -} - -// testFiles' values are copied directly from -// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc -// The label field is unused in snappy-go. -var testFiles = []struct { - label string - filename string - sizeLimit int -}{ - {"html", "html", 0}, - {"urls", "urls.10K", 0}, - {"jpg", "fireworks.jpeg", 0}, - {"jpg_200", "fireworks.jpeg", 200}, - {"pdf", "paper-100k.pdf", 0}, - {"html4", "html_x_4", 0}, - {"txt1", "alice29.txt", 0}, - {"txt2", "asyoulik.txt", 0}, - {"txt3", "lcet10.txt", 0}, - {"txt4", "plrabn12.txt", 0}, - {"pb", "geo.protodata", 0}, - {"gaviota", "kppkn.gtb", 0}, -} - -const ( - // The benchmark data files are at this canonical URL. - benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" -) - -func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) { - bDir := filepath.FromSlash(*benchdataDir) - filename := filepath.Join(bDir, basename) - if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { - return nil - } - - if !*download { - b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b)) - } - // Download the official snappy C++ implementation reference test data - // files for benchmarking. - if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) { - return fmt.Errorf("failed to create %s: %s", bDir, err) - } - - f, err := os.Create(filename) - if err != nil { - return fmt.Errorf("failed to create %s: %s", filename, err) - } - defer f.Close() - defer func() { - if errRet != nil { - os.Remove(filename) - } - }() - url := benchURL + basename - resp, err := http.Get(url) - if err != nil { - return fmt.Errorf("failed to download %s: %s", url, err) - } - defer resp.Body.Close() - if s := resp.StatusCode; s != http.StatusOK { - return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) - } - _, err = io.Copy(f, resp.Body) - if err != nil { - return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) - } - return nil -} - -func benchFile(b *testing.B, i int, decode bool) { - if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil { - b.Fatalf("failed to download testdata: %s", err) - } - bDir := filepath.FromSlash(*benchdataDir) - data := readFile(b, filepath.Join(bDir, testFiles[i].filename)) - if n := testFiles[i].sizeLimit; 0 < n && n < len(data) { - data = data[:n] - } - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -// Naming convention is kept similar to what snappy's C++ implementation uses. -func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } -func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } -func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } -func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } -func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } -func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } -func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } -func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } -func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } -func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } -func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } -func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } -func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } -func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } -func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } -func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } -func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } -func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } -func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } -func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } -func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } -func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } -func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } -func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } - -func BenchmarkExtendMatch(b *testing.B) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - b.Fatalf("ReadFile: %v", err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, tc := range extendMatchGoldenTestCases { - extendMatch(src, tc.i, tc.j) - } - } -} diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt deleted file mode 100644 index 86a18750b..000000000 --- a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt +++ /dev/null @@ -1,396 +0,0 @@ -Produced by David Widger. The previous edition was updated by Jose -Menendez. - - - - - - THE ADVENTURES OF TOM SAWYER - BY - MARK TWAIN - (Samuel Langhorne Clemens) - - - - - P R E F A C E - -MOST of the adventures recorded in this book really occurred; one or -two were experiences of my own, the rest those of boys who were -schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but -not from an individual--he is a combination of the characteristics of -three boys whom I knew, and therefore belongs to the composite order of -architecture. - -The odd superstitions touched upon were all prevalent among children -and slaves in the West at the period of this story--that is to say, -thirty or forty years ago. - -Although my book is intended mainly for the entertainment of boys and -girls, I hope it will not be shunned by men and women on that account, -for part of my plan has been to try to pleasantly remind adults of what -they once were themselves, and of how they felt and thought and talked, -and what queer enterprises they sometimes engaged in. - - THE AUTHOR. - -HARTFORD, 1876. - - - - T O M S A W Y E R - - - -CHAPTER I - -"TOM!" - -No answer. - -"TOM!" - -No answer. - -"What's gone with that boy, I wonder? You TOM!" - -No answer. - -The old lady pulled her spectacles down and looked over them about the -room; then she put them up and looked out under them. She seldom or -never looked THROUGH them for so small a thing as a boy; they were her -state pair, the pride of her heart, and were built for "style," not -service--she could have seen through a pair of stove-lids just as well. -She looked perplexed for a moment, and then said, not fiercely, but -still loud enough for the furniture to hear: - -"Well, I lay if I get hold of you I'll--" - -She did not finish, for by this time she was bending down and punching -under the bed with the broom, and so she needed breath to punctuate the -punches with. She resurrected nothing but the cat. - -"I never did see the beat of that boy!" - -She went to the open door and stood in it and looked out among the -tomato vines and "jimpson" weeds that constituted the garden. No Tom. -So she lifted up her voice at an angle calculated for distance and -shouted: - -"Y-o-u-u TOM!" - -There was a slight noise behind her and she turned just in time to -seize a small boy by the slack of his roundabout and arrest his flight. - -"There! I might 'a' thought of that closet. What you been doing in -there?" - -"Nothing." - -"Nothing! Look at your hands. And look at your mouth. What IS that -truck?" - -"I don't know, aunt." - -"Well, I know. It's jam--that's what it is. Forty times I've said if -you didn't let that jam alone I'd skin you. Hand me that switch." - -The switch hovered in the air--the peril was desperate-- - -"My! Look behind you, aunt!" - -The old lady whirled round, and snatched her skirts out of danger. The -lad fled on the instant, scrambled up the high board-fence, and -disappeared over it. - -His aunt Polly stood surprised a moment, and then broke into a gentle -laugh. - -"Hang the boy, can't I never learn anything? Ain't he played me tricks -enough like that for me to be looking out for him by this time? But old -fools is the biggest fools there is. Can't learn an old dog new tricks, -as the saying is. But my goodness, he never plays them alike, two days, -and how is a body to know what's coming? He 'pears to know just how -long he can torment me before I get my dander up, and he knows if he -can make out to put me off for a minute or make me laugh, it's all down -again and I can't hit him a lick. I ain't doing my duty by that boy, -and that's the Lord's truth, goodness knows. Spare the rod and spile -the child, as the Good Book says. I'm a laying up sin and suffering for -us both, I know. He's full of the Old Scratch, but laws-a-me! he's my -own dead sister's boy, poor thing, and I ain't got the heart to lash -him, somehow. Every time I let him off, my conscience does hurt me so, -and every time I hit him my old heart most breaks. Well-a-well, man -that is born of woman is of few days and full of trouble, as the -Scripture says, and I reckon it's so. He'll play hookey this evening, * -and [* Southwestern for "afternoon"] I'll just be obleeged to make him -work, to-morrow, to punish him. It's mighty hard to make him work -Saturdays, when all the boys is having holiday, but he hates work more -than he hates anything else, and I've GOT to do some of my duty by him, -or I'll be the ruination of the child." - -Tom did play hookey, and he had a very good time. He got back home -barely in season to help Jim, the small colored boy, saw next-day's -wood and split the kindlings before supper--at least he was there in -time to tell his adventures to Jim while Jim did three-fourths of the -work. Tom's younger brother (or rather half-brother) Sid was already -through with his part of the work (picking up chips), for he was a -quiet boy, and had no adventurous, troublesome ways. - -While Tom was eating his supper, and stealing sugar as opportunity -offered, Aunt Polly asked him questions that were full of guile, and -very deep--for she wanted to trap him into damaging revealments. Like -many other simple-hearted souls, it was her pet vanity to believe she -was endowed with a talent for dark and mysterious diplomacy, and she -loved to contemplate her most transparent devices as marvels of low -cunning. Said she: - -"Tom, it was middling warm in school, warn't it?" - -"Yes'm." - -"Powerful warm, warn't it?" - -"Yes'm." - -"Didn't you want to go in a-swimming, Tom?" - -A bit of a scare shot through Tom--a touch of uncomfortable suspicion. -He searched Aunt Polly's face, but it told him nothing. So he said: - -"No'm--well, not very much." - -The old lady reached out her hand and felt Tom's shirt, and said: - -"But you ain't too warm now, though." And it flattered her to reflect -that she had discovered that the shirt was dry without anybody knowing -that that was what she had in her mind. But in spite of her, Tom knew -where the wind lay, now. So he forestalled what might be the next move: - -"Some of us pumped on our heads--mine's damp yet. See?" - -Aunt Polly was vexed to think she had overlooked that bit of -circumstantial evidence, and missed a trick. Then she had a new -inspiration: - -"Tom, you didn't have to undo your shirt collar where I sewed it, to -pump on your head, did you? Unbutton your jacket!" - -The trouble vanished out of Tom's face. He opened his jacket. His -shirt collar was securely sewed. - -"Bother! Well, go 'long with you. I'd made sure you'd played hookey -and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a -singed cat, as the saying is--better'n you look. THIS time." - -She was half sorry her sagacity had miscarried, and half glad that Tom -had stumbled into obedient conduct for once. - -But Sidney said: - -"Well, now, if I didn't think you sewed his collar with white thread, -but it's black." - -"Why, I did sew it with white! Tom!" - -But Tom did not wait for the rest. As he went out at the door he said: - -"Siddy, I'll lick you for that." - -In a safe place Tom examined two large needles which were thrust into -the lapels of his jacket, and had thread bound about them--one needle -carried white thread and the other black. He said: - -"She'd never noticed if it hadn't been for Sid. Confound it! sometimes -she sews it with white, and sometimes she sews it with black. I wish to -geeminy she'd stick to one or t'other--I can't keep the run of 'em. But -I bet you I'll lam Sid for that. I'll learn him!" - -He was not the Model Boy of the village. He knew the model boy very -well though--and loathed him. - -Within two minutes, or even less, he had forgotten all his troubles. -Not because his troubles were one whit less heavy and bitter to him -than a man's are to a man, but because a new and powerful interest bore -them down and drove them out of his mind for the time--just as men's -misfortunes are forgotten in the excitement of new enterprises. This -new interest was a valued novelty in whistling, which he had just -acquired from a negro, and he was suffering to practise it undisturbed. -It consisted in a peculiar bird-like turn, a sort of liquid warble, -produced by touching the tongue to the roof of the mouth at short -intervals in the midst of the music--the reader probably remembers how -to do it, if he has ever been a boy. Diligence and attention soon gave -him the knack of it, and he strode down the street with his mouth full -of harmony and his soul full of gratitude. He felt much as an -astronomer feels who has discovered a new planet--no doubt, as far as -strong, deep, unalloyed pleasure is concerned, the advantage was with -the boy, not the astronomer. - -The summer evenings were long. It was not dark, yet. Presently Tom -checked his whistle. A stranger was before him--a boy a shade larger -than himself. A new-comer of any age or either sex was an impressive -curiosity in the poor little shabby village of St. Petersburg. This boy -was well dressed, too--well dressed on a week-day. This was simply -astounding. His cap was a dainty thing, his close-buttoned blue cloth -roundabout was new and natty, and so were his pantaloons. He had shoes -on--and it was only Friday. He even wore a necktie, a bright bit of -ribbon. He had a citified air about him that ate into Tom's vitals. The -more Tom stared at the splendid marvel, the higher he turned up his -nose at his finery and the shabbier and shabbier his own outfit seemed -to him to grow. Neither boy spoke. If one moved, the other moved--but -only sidewise, in a circle; they kept face to face and eye to eye all -the time. Finally Tom said: - -"I can lick you!" - -"I'd like to see you try it." - -"Well, I can do it." - -"No you can't, either." - -"Yes I can." - -"No you can't." - -"I can." - -"You can't." - -"Can!" - -"Can't!" - -An uncomfortable pause. Then Tom said: - -"What's your name?" - -"'Tisn't any of your business, maybe." - -"Well I 'low I'll MAKE it my business." - -"Well why don't you?" - -"If you say much, I will." - -"Much--much--MUCH. There now." - -"Oh, you think you're mighty smart, DON'T you? I could lick you with -one hand tied behind me, if I wanted to." - -"Well why don't you DO it? You SAY you can do it." - -"Well I WILL, if you fool with me." - -"Oh yes--I've seen whole families in the same fix." - -"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" - -"You can lump that hat if you don't like it. I dare you to knock it -off--and anybody that'll take a dare will suck eggs." - -"You're a liar!" - -"You're another." - -"You're a fighting liar and dasn't take it up." - -"Aw--take a walk!" - -"Say--if you give me much more of your sass I'll take and bounce a -rock off'n your head." - -"Oh, of COURSE you will." - -"Well I WILL." - -"Well why don't you DO it then? What do you keep SAYING you will for? -Why don't you DO it? It's because you're afraid." - -"I AIN'T afraid." - -"You are." - -"I ain't." - -"You are." - -Another pause, and more eying and sidling around each other. Presently -they were shoulder to shoulder. Tom said: - -"Get away from here!" - -"Go away yourself!" - -"I won't." - -"I won't either." - -So they stood, each with a foot placed at an angle as a brace, and -both shoving with might and main, and glowering at each other with -hate. But neither could get an advantage. After struggling till both -were hot and flushed, each relaxed his strain with watchful caution, -and Tom said: - -"You're a coward and a pup. I'll tell my big brother on you, and he -can thrash you with his little finger, and I'll make him do it, too." - -"What do I care for your big brother? I've got a brother that's bigger -than he is--and what's more, he can throw him over that fence, too." -[Both brothers were imaginary.] - -"That's a lie." - -"YOUR saying so don't make it so." - -Tom drew a line in the dust with his big toe, and said: - -"I dare you to step over that, and I'll lick you till you can't stand -up. Anybody that'll take a dare will steal sheep." - -The new boy stepped over promptly, and said: - -"Now you said you'd do it, now let's see you do it." - -"Don't you crowd me now; you better look out." - -"Well, you SAID you'd do it--why don't you do it?" - -"By jingo! for two cents I WILL do it." - -The new boy took two broad coppers out of his pocket and held them out -with derision. Tom struck them to the ground. In an instant both boys -were rolling and tumbling in the dirt, gripped together like cats; and -for the space of a minute they tugged and tore at each other's hair and -clothes, punched and scratched each other's nose, and covered -themselves with dust and glory. Presently the confusion took form, and -through the fog of battle Tom appeared, seated astride the new boy, and -pounding him with his fists. "Holler 'nuff!" said he. - -The boy only struggled to free himself. He was crying--mainly from rage. - -"Holler 'nuff!"--and the pounding went on. - -At last the stranger got out a smothered "'Nuff!" and Tom let him up -and said: - -"Now that'll learn you. Better look out who you're fooling with next -time." - -The new boy went off brushing the dust from his clothes, sobbing, -snuffling, and occasionally looking back and shaking his head and -threatening what he would do to Tom the "next time he caught him out." -To which Tom responded with jeers, and started off in high feather, and -as soon as his back was turned the new boy snatched up a stone, threw -it and hit him between the shoulders and then turned tail and ran like -an antelope. Tom chased the traitor home, and thus found out where he -lived. He then held a position at the gate for some time, daring the -enemy to come outside, but the enemy only made faces at him through the -window and declined. At last the enemy's mother appeared, and called -Tom a bad, vicious, vulgar child, and ordered him away. So he went -away; but he said he "'lowed" to "lay" for that boy. - -He got home pretty late that night, and when he climbed cautiously in -at the window, he uncovered an ambuscade, in the person of his aunt; -and when she saw the state his clothes were in her resolution to turn -his Saturday holiday into captivity at hard labor became adamantine in -its firmness. diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy deleted file mode 100644 index 9c56d985888e48a9967e187523f0e3326330aeca..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9871 zcmW++d3+ni*`6u8u)A7~rM0|~WyP6QqBu?@lY=8Ar;;dGG#9aRhm?^tk~Wrh#qP>N zQ1~GlQvw7CE&Q640B$iMX|9tMwyz{)z z`#jI+Pu3f296Mjj@jT5o=rT5F=II7AU*t{??Jsd!b@-rZ*Idf;rf1p~tuvR_s(I#d zarWGEY?mu5xy7wKzoT}^s4@KYtwyn^>W(3dL`{kZP=7vyJ{wy zjghcqlQS7jS(#e zmZs@)nxac-T2WT6?(3&^fqJk`mLKGnS97>a9iFqDJZ#9c;8&(gv$j0|KV{`|gW9=V z&2e~s9<|~5wxf7;o7TC*DZjAF9g|x*cmzDO0)Jb#690QRJk^6QV6)f`XYGbd>m~Wj zDT<_YJpY-0H!a>nduzD?y4AHM2i^#*q+E4y}r zW^0~e*`7RQF@yiG@+MTj#>&ZLiQ{Ec7|q$0tZ6aN_|*UD)?A4>Ea$-*UC+#ouTk+z z4(Dw@o>`$8I$-g7f-);_bS-vx!G;}WdD*s#F7<5OhPQ;PwrhHv8gr3@d!x+BmEhZ) zu~zC)l+;Dj?0lZOHK>8>r7yyZ617|jYFMj|pSiJ2g6gGzyoQyb%9K^$xfK4toOigT z;2mMN%m$%u@x7GO4dc~fo;YM&BG`HPwbP&$u4g-qWYR0aX3z@HrY>tFVEv}!L7g31 zu(7mcktmzOUc|f?epI z^F!FaQZ${in}CXx>?)_GN9$&}OrgwB59*d`mc_X#0j%&$iPmkbNU%fkuEi{uvul=@ zP@s0S${Y`GoewXsmf>tl0QxM4e|cW=oCZE+W3|kg%PbEN+Ga;}X>V#xe|jy=C`t_a(ugU_DjSgFLL2mK z4x#7<*g=;|i|iw{ZhEEe{k?$s1cj2;ZD@375#40hhVjC+D1G9xa-No19_mypmjN#& z0JmG^IgjPau;RR3&q!0s(17qedyuEIp$cVLyOw?}q&T)+nP(Iwi@L*4$rIEHpe=q+ z@GQ0F(Hg!EPoG)op?%QYP`$ieY3>&+jyL&dg!_Bfu3mR`uh^Y_Dc}N@D(n)(n#aQc zN;oKn-x2Gmk(qxQEJkrXpei^|)7kmw0Ms& zo`^9CI-5I#W{xM5_&Mmp8f>z}2Jzjg*92&}9f*k>Z=MOD9^}cgnRn^-8v92SXn~i@ zJqjN4AED4l0Fn!NUbG|zs9op3TJTE7T$P#m1io|%EtuTN@p2EhWS)LbhRK1t>}){`}ZwY8IH<7+mLvV9nGBvc;u>S2GT(0A)oySKzL=B-0TES{&!n+}OV)XfJFT8tzeP zj(IE~WN79L{2R&TTWWiOi)}Y6RoAv+P-LEO@(}ajWUSEzsD>XfZeLbp!1-1W z1tFRB_$ZJEN@c^b;X{&J zvhlZ#4?~-hcCwbNy(;vVMt^MFGm}xbY!*u%@QCRednG8H`6_I{AqwP@hMbNLYRo!2so{4J6pevxa zjML`=1_0B*e{K&=S^4<_FLm|a;U^s~zr)8k#R+k3@!zN242`Ep@ zU9b#Kka}olc$>PNRgy>VLS3X?7MPcs5isG2%a7h}EL2@US#xq201OaR*uXf_@HVN; zJ+Xn)5)8HBJENH{7$XCwg&%_l_8Mv9GQ26|0*~ZM_{!m%Wg^hhaP9DY>GcxyB)%5$ z=6b;_$B)m?b07`}x;2@E9jADog<@HA1}vb#o4@WN`!_A>K2;1SbmO z0DzM$jL&_sNELtrt5!L~f5NW|)J)BldQDe6Akj58c}_8ZB)GNrev9&}z9~X;*aOhW zfKdkB2(tj+EWyM|U7_}ZfDv~b4G1}AT)+Et6^4bwENtgp1)DEbjJZs?IW}5(G>V~luD6joYPJOOrWTDF?bA3?D;v(A1 zX_s*q(R{LjwvWE2!c0?0$%if2GWlFaj%F#S(sZ>eea~DbR2r$XLNGYS`TZ+A3zv{P zdbN`@wFkKOVg@pkNe~@NuVMS&d zFG+ct!Mg(h=cyKwi60X597F0bK*w? z*Y5r{d0%c@FzY99H0ck?Z>5k&E4J%t2}vD*Ee`LNU}1keAnsS0E*z+@iO_? zPgckQ(98oo5|O8HknDh_1f8Bvs9UDfzR{vk=f+lK8NZlGW2^v0uxz0G^E6jBXGKB3 z+1WDZtwaUT2q7C?^*=YB~K`+zEH`ALC*9o3L8Qt?T;;Zi3%lgQokTi`w{nt5xYC8 zQeJkA`GS>y=BRY`>VAAVFmt-aXkL;@bxDft$e*KZC5mcGUTu*;93MU^*p;zqL;I7w z`&w~m_@C)g^VM|4xG{Yj%987)X(f<$N|rew1IW{o>Vl{oy*6rtI4M`@IjNr+qW-+3 z=IpXPS~={S@3J~H^@Uy%TC~em>TDl=Lv^cVGxJplC^ug=NjiYCJD;5bLLRs@nPlGZ zEy#^8CKdVaM+8fO(=fuS2U{m3f)@)<(C|~ zB?Ow+wQD3DBnyM^er>uOs)B4AWcW#(3xPc|`Jk5aBL&9-Ak6G~r{M>&=;@0!{3A8b zoPl^2h`}YtH1cIAnm4Ou5JkC)8g+p@%b-5_zrrAbJYK1iPF1{cWO~4JK*^6@9R-_~ z2kV|ABk=g2Cq=4ksXTQR0>@PEz=brY4aIZ7zWT3K0nNo zF!L*wkyYbQ#@o~s&6-+8Q9(%M#?BMm(&6heN22gMz$o>|p4Mg~50ccb0Ev1Gx*U!a zh&Tt4Lyyu+?r9-MTLqp!FEchDF><*gfp<0@1F*`z)COcla;iZ4nhOvmzxEju1FP(= zO7gh|iM6IT2$kB@VIN+sWqc|?Gl$FMWQBawM{ZE#uV@cyLch&kKrZ=9Yvm+?nIa<( z$;JfzCP@NOY77V!s_j*g<_hvEy&d+1oc1_G3e&tB_@b||# zYcRc6>a#22`C*7{sx^SdcQnDG+DHBnG^U$OlB;xRX7+?sr45oHLLb*=l6`aWGiXq& z2iXY~)a^jqOU4xPh>xwu?`H;AC^^%~)hh5Z%QIP-0%C*75Yo<-wUn%#XxXD=7|Nukhl}PQrwyLoMjrJ=hUIaM z+U1b_fw9~1ivgy%aDMA#kV5SI(J1?sw#64Rq3&RVz6mO1s&=m*me${!>C@Lr5w#V+ zHMR~Y4CYrO^@lJ-BSn@oHBXNJCy)cDV2_PNNwkC1QxUqAK`*()_a z5bCVyAzM4xMs~FZz^$ZQjY{~)P-^EkEz=smVCTy!tRRo8WPy(i`o)Lk3}RX~C}gbR zze=pY$qA?u7Y8MNA!EM>sd+0203wQ9n;jyT>jBXExzc;bBnY}lgBV-2oF9a9^|?1T z9Xh~_YsmRBESYA>J=)ZRdH}e@wu0M`l6)C+iQkug34by^zk29W+bW1VB-1mrIX*I75nN>$_=>|5$c}cf@bV7% z5b2l2sYCb9o{Id}B_I|7uMJF~6gk<{jv9D6*{G@Tf`R78_^9zX>Jpz8lF8227HNQU z{f9xpsc8%RQWu1qZb-?1|Muc{cRwPq3O*$MI`tNNK`58We}G)AvC~qgp` zE>BMnMl$P->1>&}hBS#V*hrrwWX;2XW(g0lW^ zA3jA~gsPnrWtgDXbdV2%D68lF0iN_h;4HaV}dus{Sp|S9<5{aVF|h(>uooK0AMg} z%Qa4p`BWHyp0>_E4l36p2YqBSVLKyx{rLKj!g8QM$PJSA)kOTIRCMgjlc<#GnEFD1 zyiCaR6UhyF0Ei#n{)!&0InZ%EN=BbIf!q?%u9`5PLEHmrg2@~hb6_XQ!O%-^6H!7{GO?3%NPwquNCM|hZVaP@_<0m=0boF}VrKdOlFN~Q zYLB&y-eP>OeP86sNs?Qu;GZ_5+QTi{(GKzhi7XAK-^BBX<3|a7wh~waffm>Hb&fru zLiU{_0mYb-IiW(XpEAzV{-uklh(92dOzldwd3apOl*rrC;Rlhkvnvc%1X9WZUKjun zCUfx=3?$a&*?81nfom1lg&_i3%rVK&D8C%Puie$EmQ85F zcho?MWwVW+ZN(c->jPe0!@&u;*_u-{|AueYE+!-AqdeYB9{Ha{(T}LXRp(5syGQ6A`gkVr-fRLzZj=$FSetkeuDx+nP#C+ za0s|o(asO+b0T=FVDST!RY+O9&Z7W@?t5@Asv2L0AkVejp0WDy-$00huT*U7naRlP zejZ|r<5XDgke4*E+xRzmJwT!g9BgjDGsjj-H0x+PB9S+JA;-*SZA+YqHvw%x1Dget z{K+SnPNYjBtz`655OTLhi6y0I6ewlT^jLXhkpwE6m@zU7#Db?C4o8SAf&C%LiR9l3 zxuhlYJVYf$@_>K%IaEu%O$LJ_mkYM5ebIqGlL|1D``X67VH}UYBbYq$lc@0t_Tz5$ zSqO5n0tk=KqHrycZ#v?A)TY1zVBlA+JTG-C1lr(O!T1*rze~3ID_L!VBnMH_cmOH! zRUsEL<+@2eZY9_2u2P~|O5`kA8M*JNI-at0~z1Eb-f*4>Yp+rJHc`lgEppI5)3;p`h2||L( zA8YR`XcM`r1BXS@(}l!fdk4`BEf^8Q(o$xLyR=K&u_C0bv$h7HX%1MkM#SsYg8 z460yWC|(=x0~_Tv4Dma&zk$`h)hpAH9sY^`HIgvL;3T6HQpUL{m6mH2vc?By0CGlZ zcHf|H>P){fE^HqmC{ON?@gd}COFAfA6`Gy`G0o23cU~QAdW6H|g~K1tLUH0pC^#Va z6J9K42-?(%TS!a}?lKO+n|`AqUQvQ^RA_!p{4(z0_X~NJzC|pMt7TM;x2manGAXt~ zl2(VSC2?b*kHPJm|79pI6fC`6J2gCdv36rXb(!mCo{$$KORxFKAE^YFGTJw(ROqtg z1Q`v8w$V3!8@hD$y0v|292Jj=9Scsh2hN%Ijp1&=TBN5a%J0;)zo{W8bY|7rZT+Ov zCoNJo^ejB-+>23$J=*z*uXAHSEjY}SZ&x!km0>4MY)#!QFeh^n%ILpr6>pyyMyc*A zcFxfskPjyz;@TZBIt2h=sSUBCVQ8xPp##^Wo~hrLhR=z-I)!{{pfYik;mN4Pu7qr} zX9ff+D05RAxnzQxwY?I401c*U;{2fvII8WR z*4-K?f}!Txdr>%Ux%kWtcovASJrV6Dh`Fr;e~nxjCofEqfbwIVN*LzIc%(&cmmw!E zlKm>8+N?PKE>g7r%4DH#xKg29BVw!nMKG#mMy`#ItqH-n1;U1b46#-3L~=xzK#`H_ zefqu8p6$4{A&|4dt?P+E08~h=8lf%P*X_fH1hW|XWxIY`t9&Bzac2k=G~|WG9=U;c zM&6K7fjp~4X0&TpwZz`X5wuiS{0zecfm10>ElE8Wh~q&}wf*7lE90C0@O=98;ajyRoiKC+x_gUQD0RJ&M~;D5DW=*DSbkWrPYSMK~si3DV6 zm0cI>7!115l{b!%`2?=H>SW_2a&;%-dYc^odZ>(+#S`K^Al&Nt;Tu)>{X9|AKL`P2 zU(?A{MVvXj=DiQqzEoxz?h2Mb)1I+G6!Y2^i7si-?J$~cBfXKF9t-5)2EqOCTDcpa z*Yi&VSbUx0889$yFgQ88N>(ND_lA$9-V&-d`LWJ-LwsJ=?oXl$rZ9zG0A@Ls1U92V?p;7Lcu1xJMa*DRDf576QoN+MdKZ{Xl0wQO55_b@O7-ebv<#n2c4HNt*rdMVrxA^Z57Ycxg znj5=ci@z`a$H=ccTXpShmQ-2<#Wjj}Tl`0GubP8vr!O_z&%VGHYB2-<5kfvD2JyGZ zr7~_y>Ej8i_DpQ#`0z1s{+lKCXBbHTAn~mzh9B0B_;C#7$Y5Mw8yD;v`!D2UWO`7@ zmE!Y&mfR|-_C%zFx*a~I5ZOAJOm0b?hu-`#W&p`4RGkMpjNDuTCbP zXrwz3Ikm-jXV&c4ZPUoM98(TnH9m35bGj7MaakS-vcqYIE=-E%xa!Rw$9Q&olHKSOfH?H zdhkIS4H@EOaC5J#^2OShQvlzd2G`Q$x*q+oEYw^L6sHmy4tD#r*Ceu#>N6%8pM)!* z^>Qpv9w&N98hwnood`ayL>A9L^M*oVQG2`{G5y6bNN(fl)I*iK^xtWzV*=dNP7v&} zwo1WcsX<8WW^~>k2g1KGu9vhBcn!}SB( Amey Sakhadeo Andreas Garnæs Andrew Ryabchun +Andy Lindeman Anshuman Bhartiya Arıl Bozoluk Austin Dizzy @@ -66,6 +67,7 @@ John Engelman Juan Basso Julien Rostand Justin Abrahms +Keita Urashima Konrad Malawski Krzysztof Kowalczyk kyokomi @@ -109,6 +111,7 @@ Trey Tacon ttacon Victor Castell Victor Vrantchan +Will Maier William Bailey Yann Malet Yannick Utard diff --git a/vendor/github.com/google/go-github/github/activity_events.go b/vendor/github.com/google/go-github/github/activity_events.go index 0fd850e76..3cf37f461 100644 --- a/vendor/github.com/google/go-github/github/activity_events.go +++ b/vendor/github.com/google/go-github/github/activity_events.go @@ -69,6 +69,8 @@ func (e *Event) Payload() (payload interface{}) { payload = &PublicEvent{} case "PullRequestEvent": payload = &PullRequestEvent{} + case "PullRequestReviewEvent": + payload = &PullRequestReviewEvent{} case "PullRequestReviewCommentEvent": payload = &PullRequestReviewCommentEvent{} case "PushEvent": @@ -140,7 +142,7 @@ func (s *ActivityService) ListRepositoryEvents(owner, repo string, opt *ListOpti // ListIssueEventsForRepository lists issue events for a repository. // // GitHub API docs: http://developer.github.com/v3/activity/events/#list-issue-events-for-a-repository -func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *ListOptions) ([]*Event, *Response, error) { +func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo) u, err := addOptions(u, opt) if err != nil { @@ -152,7 +154,7 @@ func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt * return nil, nil, err } - events := new([]*Event) + events := new([]*IssueEvent) resp, err := s.client.Do(req, events) if err != nil { return nil, resp, err diff --git a/vendor/github.com/google/go-github/github/activity_events_test.go b/vendor/github.com/google/go-github/github/activity_events_test.go index f8ffea71a..2bb414b1e 100644 --- a/vendor/github.com/google/go-github/github/activity_events_test.go +++ b/vendor/github.com/google/go-github/github/activity_events_test.go @@ -75,7 +75,7 @@ func TestActivityService_ListIssueEventsForRepository(t *testing.T) { testFormValues(t, r, values{ "page": "2", }) - fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`) + fmt.Fprint(w, `[{"id":1},{"id":2}]`) }) opt := &ListOptions{Page: 2} @@ -84,7 +84,7 @@ func TestActivityService_ListIssueEventsForRepository(t *testing.T) { t.Errorf("Activities.ListIssueEventsForRepository returned error: %v", err) } - want := []*Event{{ID: String("1")}, {ID: String("2")}} + want := []*IssueEvent{{ID: Int(1)}, {ID: Int(2)}} if !reflect.DeepEqual(events, want) { t.Errorf("Activities.ListIssueEventsForRepository returned %+v, want %+v", events, want) } diff --git a/vendor/github.com/google/go-github/github/admin.go b/vendor/github.com/google/go-github/github/admin.go new file mode 100644 index 000000000..fccc4c6b3 --- /dev/null +++ b/vendor/github.com/google/go-github/github/admin.go @@ -0,0 +1,100 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import "fmt" + +// AdminService handles communication with the admin related methods of the +// GitHub API. These API routes are normally only accessible for GitHub +// Enterprise installations. +// +// GitHub API docs: https://developer.github.com/v3/enterprise/ +type AdminService service + +// TeamLDAPMapping represents the mapping between a GitHub team and an LDAP +// group. +type TeamLDAPMapping struct { + ID *int `json:"id,omitempty"` + LDAPDN *string `json:"ldap_dn,omitempty"` + URL *string `json:"url,omitempty"` + Name *string `json:"name,omitempty"` + Slug *string `json:"slug,omitempty"` + Description *string `json:"description,omitempty"` + Privacy *string `json:"privacy,omitempty"` + Permission *string `json:"permission,omitempty"` + + MembersURL *string `json:"members_url,omitempty"` + RepositoriesURL *string `json:"repositories_url,omitempty"` +} + +func (m TeamLDAPMapping) String() string { + return Stringify(m) +} + +// UserLDAPMapping represents the mapping between a GitHub user and an LDAP +// user. +type UserLDAPMapping struct { + ID *int `json:"id,omitempty"` + LDAPDN *string `json:"ldap_dn,omitempty"` + Login *string `json:"login,omitempty"` + AvatarURL *string `json:"avatar_url,omitempty"` + GravatarID *string `json:"gravatar_id,omitempty"` + Type *string `json:"type,omitempty"` + SiteAdmin *bool `json:"site_admin,omitempty"` + + URL *string `json:"url,omitempty"` + EventsURL *string `json:"events_url,omitempty"` + FollowingURL *string `json:"following_url,omitempty"` + FollowersURL *string `json:"followers_url,omitempty"` + GistsURL *string `json:"gists_url,omitempty"` + OrganizationsURL *string `json:"organizations_url,omitempty"` + ReceivedEventsURL *string `json:"received_events_url,omitempty"` + ReposURL *string `json:"repos_url,omitempty"` + StarredURL *string `json:"starred_url,omitempty"` + SubscriptionsURL *string `json:"subscriptions_url,omitempty"` +} + +func (m UserLDAPMapping) String() string { + return Stringify(m) +} + +// UpdateUserLDAPMapping updates the mapping between a GitHub user and an LDAP user. +// +// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-user +func (s *AdminService) UpdateUserLDAPMapping(user string, mapping *UserLDAPMapping) (*UserLDAPMapping, *Response, error) { + u := fmt.Sprintf("admin/ldap/users/%v/mapping", user) + req, err := s.client.NewRequest("PATCH", u, mapping) + if err != nil { + return nil, nil, err + } + + m := new(UserLDAPMapping) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, err +} + +// UpdateTeamLDAPMapping updates the mapping between a GitHub team and an LDAP group. +// +// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-team +func (s *AdminService) UpdateTeamLDAPMapping(team int, mapping *TeamLDAPMapping) (*TeamLDAPMapping, *Response, error) { + u := fmt.Sprintf("admin/ldap/teams/%v/mapping", team) + req, err := s.client.NewRequest("PATCH", u, mapping) + if err != nil { + return nil, nil, err + } + + m := new(TeamLDAPMapping) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, err +} diff --git a/vendor/github.com/google/go-github/github/admin_test.go b/vendor/github.com/google/go-github/github/admin_test.go new file mode 100644 index 000000000..f4f2e5a22 --- /dev/null +++ b/vendor/github.com/google/go-github/github/admin_test.go @@ -0,0 +1,80 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestAdminService_UpdateUserLDAPMapping(t *testing.T) { + setup() + defer teardown() + + input := &UserLDAPMapping{ + LDAPDN: String("uid=asdf,ou=users,dc=github,dc=com"), + } + + mux.HandleFunc("/admin/ldap/users/u/mapping", func(w http.ResponseWriter, r *http.Request) { + v := new(UserLDAPMapping) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + fmt.Fprint(w, `{"id":1,"ldap_dn":"uid=asdf,ou=users,dc=github,dc=com"}`) + }) + + mapping, _, err := client.Admin.UpdateUserLDAPMapping("u", input) + if err != nil { + t.Errorf("Admin.UpdateUserLDAPMapping returned error: %v", err) + } + + want := &UserLDAPMapping{ + ID: Int(1), + LDAPDN: String("uid=asdf,ou=users,dc=github,dc=com"), + } + if !reflect.DeepEqual(mapping, want) { + t.Errorf("Admin.UpdateUserLDAPMapping returned %+v, want %+v", mapping, want) + } +} + +func TestAdminService_UpdateTeamLDAPMapping(t *testing.T) { + setup() + defer teardown() + + input := &TeamLDAPMapping{ + LDAPDN: String("cn=Enterprise Ops,ou=teams,dc=github,dc=com"), + } + + mux.HandleFunc("/admin/ldap/teams/1/mapping", func(w http.ResponseWriter, r *http.Request) { + v := new(TeamLDAPMapping) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + fmt.Fprint(w, `{"id":1,"ldap_dn":"cn=Enterprise Ops,ou=teams,dc=github,dc=com"}`) + }) + + mapping, _, err := client.Admin.UpdateTeamLDAPMapping(1, input) + if err != nil { + t.Errorf("Admin.UpdateTeamLDAPMapping returned error: %v", err) + } + + want := &TeamLDAPMapping{ + ID: Int(1), + LDAPDN: String("cn=Enterprise Ops,ou=teams,dc=github,dc=com"), + } + if !reflect.DeepEqual(mapping, want) { + t.Errorf("Admin.UpdateTeamLDAPMapping returned %+v, want %+v", mapping, want) + } +} diff --git a/vendor/github.com/google/go-github/github/event_types.go b/vendor/github.com/google/go-github/github/event_types.go index 3abe1efc2..6a09208e4 100644 --- a/vendor/github.com/google/go-github/github/event_types.go +++ b/vendor/github.com/google/go-github/github/event_types.go @@ -327,6 +327,26 @@ type PullRequestEvent struct { Sender *User `json:"sender,omitempty"` } +// PullRequestReviewEvent is triggered when a review is submitted on a pull +// request. +// The Webhook event name is "pull_request_review". +// +// GitHub docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewevent +type PullRequestReviewEvent struct { + // Action is always "submitted". + Action *string `json:"action,omitempty"` + Review *PullRequestReview `json:"review,omitempty"` + PullRequest *PullRequest `json:"pull_request,omitempty"` + + // The following fields are only populated by Webhook events. + Repo *Repository `json:"repository,omitempty"` + Sender *User `json:"sender,omitempty"` + + // The following field is only present when the webhook is triggered on + // a repository belonging to an organization. + Organization *Organization `json:"organization,omitempty"` +} + // PullRequestReviewCommentEvent is triggered when a comment is created on a // portion of the unified diff of a pull request. // The Webhook event name is "pull_request_review_comment". diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go index 258f9e215..d96fa85c8 100644 --- a/vendor/github.com/google/go-github/github/github.go +++ b/vendor/github.com/google/go-github/github/github.go @@ -117,6 +117,7 @@ type Client struct { // Services used for talking to different parts of the GitHub API. Activity *ActivityService + Admin *AdminService Authorizations *AuthorizationsService Gists *GistsService Git *GitService @@ -189,6 +190,7 @@ func NewClient(httpClient *http.Client) *Client { c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent, UploadURL: uploadURL} c.common.client = c c.Activity = (*ActivityService)(&c.common) + c.Admin = (*AdminService)(&c.common) c.Authorizations = (*AuthorizationsService)(&c.common) c.Gists = (*GistsService)(&c.common) c.Git = (*GitService)(&c.common) diff --git a/vendor/github.com/google/go-github/github/messages.go b/vendor/github.com/google/go-github/github/messages.go index 810e9fd6f..070665490 100644 --- a/vendor/github.com/google/go-github/github/messages.go +++ b/vendor/github.com/google/go-github/github/messages.go @@ -55,6 +55,7 @@ var ( "milestone": "MilestoneEvent", "page_build": "PageBuildEvent", "public": "PublicEvent", + "pull_request_review": "PullRequestReviewEvent", "pull_request_review_comment": "PullRequestReviewCommentEvent", "pull_request": "PullRequestEvent", "push": "PushEvent", @@ -170,9 +171,9 @@ func WebHookType(r *http.Request) string { // event, err := github.ParseWebHook(github.WebHookType(r), payload) // if err != nil { ... } // switch event := event.(type) { -// case CommitCommentEvent: +// case *github.CommitCommentEvent: // processCommitCommentEvent(event) -// case CreateEvent: +// case *github.CreateEvent: // processCreateEvent(event) // ... // } diff --git a/vendor/github.com/google/go-github/github/messages_test.go b/vendor/github.com/google/go-github/github/messages_test.go index 327adf5ab..a9af4ef5d 100644 --- a/vendor/github.com/google/go-github/github/messages_test.go +++ b/vendor/github.com/google/go-github/github/messages_test.go @@ -152,6 +152,10 @@ func TestParseWebHook(t *testing.T) { payload: &PullRequestEvent{}, messageType: "pull_request", }, + { + payload: &PullRequestReviewEvent{}, + messageType: "pull_request_review", + }, { payload: &PullRequestReviewCommentEvent{}, messageType: "pull_request_review_comment", diff --git a/vendor/github.com/google/go-github/github/pulls_reviews.go b/vendor/github.com/google/go-github/github/pulls_reviews.go new file mode 100644 index 000000000..ae3cdd4af --- /dev/null +++ b/vendor/github.com/google/go-github/github/pulls_reviews.go @@ -0,0 +1,19 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import "time" + +// PullRequestReview represents a review of a pull request. +type PullRequestReview struct { + ID *int `json:"id,omitempty"` + User *User `json:"user,omitempty"` + Body *string `json:"body,omitempty"` + SubmittedAt *time.Time `json:"submitted_at,omitempty"` + + // State can be "approved", "rejected", or "commented". + State *string `json:"state,omitempty"` +} diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go index ff846735b..3df0b69aa 100644 --- a/vendor/github.com/google/go-github/github/repos.go +++ b/vendor/github.com/google/go-github/github/repos.go @@ -501,29 +501,54 @@ func (s *RepositoriesService) ListTags(owner string, repo string, opt *ListOptio // Branch represents a repository branch type Branch struct { - Name *string `json:"name,omitempty"` - Commit *Commit `json:"commit,omitempty"` - Protection *Protection `json:"protection,omitempty"` + Name *string `json:"name,omitempty"` + Commit *Commit `json:"commit,omitempty"` + Protected *bool `json:"protected,omitempty"` } -// Protection represents a repository branch's protection +// Protection represents a repository branch's protection. type Protection struct { - Enabled *bool `json:"enabled,omitempty"` - RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks,omitempty"` + RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` + Restrictions *BranchRestrictions `json:"restrictions"` } -// RequiredStatusChecks represents the protection status of a individual branch +// ProtectionRequest represents a request to create/edit a branch's protection. +type ProtectionRequest struct { + RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` + Restrictions *BranchRestrictionsRequest `json:"restrictions"` +} + +// RequiredStatusChecks represents the protection status of a individual branch. type RequiredStatusChecks struct { - // Who required status checks apply to. - // Possible values are: - // off - // non_admins - // everyone - EnforcementLevel *string `json:"enforcement_level,omitempty"` - // The list of status checks which are required + // Enforce required status checks for repository administrators. + IncludeAdmins *bool `json:"include_admins,omitempty"` + // Require branches to be up to date before merging. + Strict *bool `json:"strict,omitempty"` + // The list of status checks to require in order to merge into this + // branch. Contexts *[]string `json:"contexts,omitempty"` } +// BranchRestrictions represents the restriction that only certain users or +// teams may push to a branch. +type BranchRestrictions struct { + // The list of user logins with push access. + Users []*User `json:"users,omitempty"` + // The list of team slugs with push access. + Teams []*Team `json:"teams,omitempty"` +} + +// BranchRestrictionsRequest represents the request to create/edit the +// restriction that only certain users or teams may push to a branch. It is +// separate from BranchRestrictions above because the request structure is +// different from the response structure. +type BranchRestrictionsRequest struct { + // The list of user logins with push access. + Users *[]string `json:"users,omitempty"` + // The list of team slugs with push access. + Teams *[]string `json:"teams,omitempty"` +} + // ListBranches lists branches for the specified repository. // // GitHub API docs: http://developer.github.com/v3/repos/#list-branches @@ -539,6 +564,7 @@ func (s *RepositoriesService) ListBranches(owner string, repo string, opt *ListO return nil, nil, err } + // TODO: remove custom Accept header when this API fully launches req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) branches := new([]*Branch) @@ -560,6 +586,7 @@ func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *R return nil, nil, err } + // TODO: remove custom Accept header when this API fully launches req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) b := new(Branch) @@ -571,25 +598,64 @@ func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *R return b, resp, err } -// EditBranch edits the branch (currently only Branch Protection) +// GetBranchProtection gets the protection of a given branch. // -// GitHub API docs: https://developer.github.com/v3/repos/#enabling-and-disabling-branch-protection -func (s *RepositoriesService) EditBranch(owner, repo, branchName string, branch *Branch) (*Branch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v", owner, repo, branchName) - req, err := s.client.NewRequest("PATCH", u, branch) +// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-branch-protection +func (s *RepositoriesService) GetBranchProtection(owner, repo, branch string) (*Protection, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) + req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } + // TODO: remove custom Accept header when this API fully launches req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) - b := new(Branch) - resp, err := s.client.Do(req, b) + p := new(Protection) + resp, err := s.client.Do(req, p) if err != nil { return nil, resp, err } - return b, resp, err + return p, resp, err +} + +// UpdateBranchProtection updates the protection of a given branch. +// +// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-branch-protection +func (s *RepositoriesService) UpdateBranchProtection(owner, repo, branch string, preq *ProtectionRequest) (*Protection, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) + req, err := s.client.NewRequest("PUT", u, preq) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches + req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) + + p := new(Protection) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, err +} + +// RemoveBranchProtection removes the protection of a given branch. +// +// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-branch-protection +func (s *RepositoriesService) RemoveBranchProtection(owner, repo, branch string) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + // TODO: remove custom Accept header when this API fully launches + req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) + + return s.client.Do(req, nil) } // License gets the contents of a repository's license if one is detected. diff --git a/vendor/github.com/google/go-github/github/repos_test.go b/vendor/github.com/google/go-github/github/repos_test.go index 209227c0f..6da0684f2 100644 --- a/vendor/github.com/google/go-github/github/repos_test.go +++ b/vendor/github.com/google/go-github/github/repos_test.go @@ -447,7 +447,7 @@ func TestRepositoriesService_GetBranch(t *testing.T) { mux.HandleFunc("/repos/o/r/branches/b", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) - fmt.Fprint(w, `{"name":"n", "commit":{"sha":"s"}, "protection": {"enabled": true, "required_status_checks": {"enforcement_level": "everyone","contexts": []}}}`) + fmt.Fprint(w, `{"name":"n", "commit":{"sha":"s"}, "protected":true}`) }) branch, _, err := client.Repositories.GetBranch("o", "r", "b") @@ -456,15 +456,9 @@ func TestRepositoriesService_GetBranch(t *testing.T) { } want := &Branch{ - Name: String("n"), - Commit: &Commit{SHA: String("s")}, - Protection: &Protection{ - Enabled: Bool(true), - RequiredStatusChecks: &RequiredStatusChecks{ - EnforcementLevel: String("everyone"), - Contexts: &[]string{}, - }, - }, + Name: String("n"), + Commit: &Commit{SHA: String("s")}, + Protected: Bool(true), } if !reflect.DeepEqual(branch, want) { @@ -472,39 +466,110 @@ func TestRepositoriesService_GetBranch(t *testing.T) { } } -func TestRepositoriesService_EditBranch(t *testing.T) { +func TestRepositoriesService_GetBranchProtection(t *testing.T) { setup() defer teardown() - input := &Branch{ - Protection: &Protection{ - Enabled: Bool(true), - RequiredStatusChecks: &RequiredStatusChecks{ - EnforcementLevel: String("everyone"), - Contexts: &[]string{"continous-integration"}, + mux.HandleFunc("/repos/o/r/branches/b/protection", func(w http.ResponseWriter, r *http.Request) { + v := new(ProtectionRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) + fmt.Fprintf(w, `{"required_status_checks":{"include_admins":true,"strict":true,"contexts":["continuous-integration"]},"restrictions":{"users":[{"id":1,"login":"u"}],"teams":[{"id":2,"slug":"t"}]}}`) + }) + + protection, _, err := client.Repositories.GetBranchProtection("o", "r", "b") + if err != nil { + t.Errorf("Repositories.GetBranchProtection returned error: %v", err) + } + + want := &Protection{ + RequiredStatusChecks: &RequiredStatusChecks{ + IncludeAdmins: Bool(true), + Strict: Bool(true), + Contexts: &[]string{"continuous-integration"}, + }, + Restrictions: &BranchRestrictions{ + Users: []*User{ + {Login: String("u"), ID: Int(1)}, + }, + Teams: []*Team{ + {Slug: String("t"), ID: Int(2)}, }, }, } + if !reflect.DeepEqual(protection, want) { + t.Errorf("Repositories.GetBranchProtection returned %+v, want %+v", protection, want) + } +} - mux.HandleFunc("/repos/o/r/branches/b", func(w http.ResponseWriter, r *http.Request) { - v := new(Branch) +func TestRepositoriesService_UpdateBranchProtection(t *testing.T) { + setup() + defer teardown() + + input := &ProtectionRequest{ + RequiredStatusChecks: &RequiredStatusChecks{ + IncludeAdmins: Bool(true), + Strict: Bool(true), + Contexts: &[]string{"continuous-integration"}, + }, + Restrictions: &BranchRestrictionsRequest{ + Users: &[]string{"u"}, + Teams: &[]string{"t"}, + }, + } + + mux.HandleFunc("/repos/o/r/branches/b/protection", func(w http.ResponseWriter, r *http.Request) { + v := new(ProtectionRequest) json.NewDecoder(r.Body).Decode(v) - testMethod(t, r, "PATCH") + testMethod(t, r, "PUT") if !reflect.DeepEqual(v, input) { t.Errorf("Request body = %+v, want %+v", v, input) } testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) - fmt.Fprint(w, `{"protection": {"enabled": true, "required_status_checks": {"enforcement_level": "everyone", "contexts": ["continous-integration"]}}}`) + fmt.Fprintf(w, `{"required_status_checks":{"include_admins":true,"strict":true,"contexts":["continuous-integration"]},"restrictions":{"users":[{"id":1,"login":"u"}],"teams":[{"id":2,"slug":"t"}]}}`) }) - branch, _, err := client.Repositories.EditBranch("o", "r", "b", input) + protection, _, err := client.Repositories.UpdateBranchProtection("o", "r", "b", input) if err != nil { - t.Errorf("Repositories.EditBranch returned error: %v", err) + t.Errorf("Repositories.UpdateBranchProtection returned error: %v", err) + } + + want := &Protection{ + RequiredStatusChecks: &RequiredStatusChecks{ + IncludeAdmins: Bool(true), + Strict: Bool(true), + Contexts: &[]string{"continuous-integration"}, + }, + Restrictions: &BranchRestrictions{ + Users: []*User{ + {Login: String("u"), ID: Int(1)}, + }, + Teams: []*Team{ + {Slug: String("t"), ID: Int(2)}, + }, + }, } + if !reflect.DeepEqual(protection, want) { + t.Errorf("Repositories.UpdateBranchProtection returned %+v, want %+v", protection, want) + } +} + +func TestRepositoriesService_RemoveBranchProtection(t *testing.T) { + setup() + defer teardown() - if !reflect.DeepEqual(branch, input) { - t.Errorf("Repositories.EditBranch returned %+v, want %+v", branch, input) + mux.HandleFunc("/repos/o/r/branches/b/protection", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Repositories.RemoveBranchProtection("o", "r", "b") + if err != nil { + t.Errorf("Repositories.RemoveBranchProtection returned error: %v", err) } } diff --git a/vendor/github.com/hashicorp/hcl/decoder_test.go b/vendor/github.com/hashicorp/hcl/decoder_test.go index b3428bbf6..11d5adbee 100644 --- a/vendor/github.com/hashicorp/hcl/decoder_test.go +++ b/vendor/github.com/hashicorp/hcl/decoder_test.go @@ -82,9 +82,14 @@ func TestDecode_interface(t *testing.T) { }, { "multiline_literal.hcl", + true, + nil, + }, + { + "multiline_literal_with_hil.hcl", false, - map[string]interface{}{"multiline_literal": testhelper.Unix2dos(`hello - world`)}, + map[string]interface{}{"multiline_literal_with_hil": testhelper.Unix2dos(`${hello + world}`)}, }, { "multiline_no_marker.hcl", diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go index 54a6493fb..476ed04da 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -256,7 +256,10 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount++ keys = append(keys, &ast.ObjectKey{Token: p.tok}) case token.ILLEGAL: - fmt.Println("illegal") + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } default: return keys, &PosError{ Pos: p.tok.Pos, diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/multiline_string.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/multiline_string.golden index a1e994997..3d10c741d 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/multiline_string.golden +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/multiline_string.golden @@ -1,7 +1,7 @@ resource "null_resource" "some_command" { provisioner "local-exec" { - command = "echo ' + command = "${echo ' some newlines -and additonal output'" +and additonal output'}" } } diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/multiline_string.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/multiline_string.input index a1e994997..3d10c741d 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/multiline_string.input +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/multiline_string.input @@ -1,7 +1,7 @@ resource "null_resource" "some_command" { provisioner "local-exec" { - command = "echo ' + command = "${echo ' some newlines -and additonal output'" +and additonal output'}" } } diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go index d387794bc..69662367f 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -480,7 +480,7 @@ func (s *Scanner) scanString() { // read character after quote ch := s.next() - if ch < 0 || ch == eof { + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { s.err("literal not terminated") return } diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go index e92f650e5..4f2c9cbe0 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go @@ -476,6 +476,36 @@ EOF } +func TestScan_crlf(t *testing.T) { + complexHCL := "foo {\r\n bar = \"baz\"\r\n}\r\n" + + literals := []struct { + tokenType token.Type + literal string + }{ + {token.IDENT, `foo`}, + {token.LBRACE, `{`}, + {token.IDENT, `bar`}, + {token.ASSIGN, `=`}, + {token.STRING, `"baz"`}, + {token.RBRACE, `}`}, + {token.EOF, ``}, + } + + s := New([]byte(complexHCL)) + for _, l := range literals { + tok := s.Scan() + if l.tokenType != tok.Type { + t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String()) + } + + if l.literal != tok.Text { + t.Errorf("got:\n%+v\n%s\n want:\n%+v\n%s\n", []byte(tok.String()), tok, []byte(l.literal), l.literal) + } + } + +} + func TestError(t *testing.T) { testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) @@ -494,7 +524,8 @@ func TestError(t *testing.T) { testError(t, `"`, "1:2", "literal not terminated", token.STRING) testError(t, `"abc`, "1:5", "literal not terminated", token.STRING) - testError(t, `"abc`+"\n", "2:1", "literal not terminated", token.STRING) + testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING) + testError(t, `"${abc`+"\n", "2:1", "literal not terminated", token.STRING) testError(t, `/*/`, "1:4", "comment not terminated", token.COMMENT) testError(t, `/foo`, "1:1", "expected '/' for comment", token.COMMENT) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go index d5787693f..5f981eaa2 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -27,6 +27,9 @@ func Unquote(s string) (t string, err error) { if quote != '"' { return "", ErrSyntax } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } // Is it trivial? Avoid allocation. if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { @@ -84,6 +87,10 @@ func Unquote(s string) (t string, err error) { } } + if s[0] == '\n' { + return "", ErrSyntax + } + c, multibyte, ss, err := unquoteChar(s, quote) if err != nil { return "", err diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go index 9de8c9f2d..65be375d9 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go @@ -40,6 +40,7 @@ var unquotetests = []unQuoteTest{ {`"echo ${var.region}${element(split(",",var.zones),0)}"`, `echo ${var.region}${element(split(",",var.zones),0)}`}, {`"${HH\\:mm\\:ss}"`, `${HH\\:mm\\:ss}`}, + {`"${\n}"`, `${\n}`}, } var misquoted = []string{ @@ -65,9 +66,12 @@ var misquoted = []string{ "`\"", `"\'"`, `'\"'`, + "\"\n\"", + "\"\\n\n\"", "'\n'", `"${"`, `"${foo{}"`, + "\"${foo}\n\"", } func TestUnquote(t *testing.T) { diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/multiline_literal_with_hil.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline_literal_with_hil.hcl new file mode 100644 index 000000000..b55a361ca --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline_literal_with_hil.hcl @@ -0,0 +1,2 @@ +multiline_literal_with_hil = "${hello + world}" \ No newline at end of file diff --git a/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go b/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go index c689f73b7..827ac6f1e 100644 --- a/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go +++ b/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go @@ -12,4 +12,4 @@ func Unix2dos(unix string) string { } return strings.Replace(unix, "\n", "\r\n", -1) -} \ No newline at end of file +} diff --git a/vendor/github.com/klauspost/crc32/.gitignore b/vendor/github.com/klauspost/crc32/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/github.com/klauspost/crc32/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/klauspost/crc32/.travis.yml b/vendor/github.com/klauspost/crc32/.travis.yml deleted file mode 100644 index de64ae491..000000000 --- a/vendor/github.com/klauspost/crc32/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip - -script: - - go test -v . - - go test -v -race . diff --git a/vendor/github.com/klauspost/crc32/LICENSE b/vendor/github.com/klauspost/crc32/LICENSE deleted file mode 100644 index 4fd5963e3..000000000 --- a/vendor/github.com/klauspost/crc32/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2015 Klaus Post - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/crc32/README.md b/vendor/github.com/klauspost/crc32/README.md deleted file mode 100644 index 029625d36..000000000 --- a/vendor/github.com/klauspost/crc32/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# crc32 -CRC32 hash with x64 optimizations - -This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup. - -[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32) - -# usage - -Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer. - -Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go. - -# changes -* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match. -* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable. - - -# performance - -For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back. - - -For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction: -``` -benchmark old ns/op new ns/op delta -BenchmarkCrc32KB 99955 10258 -89.74% - -benchmark old MB/s new MB/s speedup -BenchmarkCrc32KB 327.83 3194.20 9.74x -``` - -For other tables and "CLMUL" capable machines the performance is the same as the standard library. - -Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled. - -``` -Std: Standard Go 1.5 library -Crc: Indicates IEEE type CRC. -40B: Size of each slice encoded. -NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine). -Castagnoli: Castagnoli CRC type. - -BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s -BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8) -BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8) - -BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s -BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8) -BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm) - -BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8) -BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8) -BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm) - -BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8) -BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8) -BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm) - -BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s -BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm) -BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8) -BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm) - -BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s -BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm) -BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8) -BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm) - -BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s -BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm) -BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8) -BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm) - -BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s -BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm) -BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8) -BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm) -``` - -The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library. - -However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7. - -# license - -Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions. diff --git a/vendor/github.com/klauspost/crc32/crc32.go b/vendor/github.com/klauspost/crc32/crc32.go deleted file mode 100644 index 8aa91b17e..000000000 --- a/vendor/github.com/klauspost/crc32/crc32.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32, -// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for -// information. -// -// Polynomials are represented in LSB-first form also known as reversed representation. -// -// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials -// for information. -package crc32 - -import ( - "hash" - "sync" -) - -// The size of a CRC-32 checksum in bytes. -const Size = 4 - -// Predefined polynomials. -const ( - // IEEE is by far and away the most common CRC-32 polynomial. - // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... - IEEE = 0xedb88320 - - // Castagnoli's polynomial, used in iSCSI. - // Has better error detection characteristics than IEEE. - // http://dx.doi.org/10.1109/26.231911 - Castagnoli = 0x82f63b78 - - // Koopman's polynomial. - // Also has better error detection characteristics than IEEE. - // http://dx.doi.org/10.1109/DSN.2002.1028931 - Koopman = 0xeb31d82e -) - -// Table is a 256-word table representing the polynomial for efficient processing. -type Table [256]uint32 - -// This file makes use of functions implemented in architecture-specific files. -// The interface that they implement is as follows: -// -// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE -// // algorithm is available. -// archAvailableIEEE() bool -// -// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm. -// // It can only be called if archAvailableIEEE() returns true. -// archInitIEEE() -// -// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if -// // archInitIEEE() was previously called. -// archUpdateIEEE(crc uint32, p []byte) uint32 -// -// // archAvailableCastagnoli reports whether an architecture-specific -// // CRC32-C algorithm is available. -// archAvailableCastagnoli() bool -// -// // archInitCastagnoli initializes the architecture-specific CRC32-C -// // algorithm. It can only be called if archAvailableCastagnoli() returns -// // true. -// archInitCastagnoli() -// -// // archUpdateCastagnoli updates the given CRC32-C. It can only be called -// // if archInitCastagnoli() was previously called. -// archUpdateCastagnoli(crc uint32, p []byte) uint32 - -// castagnoliTable points to a lazily initialized Table for the Castagnoli -// polynomial. MakeTable will always return this value when asked to make a -// Castagnoli table so we can compare against it to find when the caller is -// using this polynomial. -var castagnoliTable *Table -var castagnoliTable8 *slicing8Table -var castagnoliArchImpl bool -var updateCastagnoli func(crc uint32, p []byte) uint32 -var castagnoliOnce sync.Once - -func castagnoliInit() { - castagnoliTable = simpleMakeTable(Castagnoli) - castagnoliArchImpl = archAvailableCastagnoli() - - if castagnoliArchImpl { - archInitCastagnoli() - updateCastagnoli = archUpdateCastagnoli - } else { - // Initialize the slicing-by-8 table. - castagnoliTable8 = slicingMakeTable(Castagnoli) - updateCastagnoli = func(crc uint32, p []byte) uint32 { - return slicingUpdate(crc, castagnoliTable8, p) - } - } -} - -// IEEETable is the table for the IEEE polynomial. -var IEEETable = simpleMakeTable(IEEE) - -// ieeeTable8 is the slicing8Table for IEEE -var ieeeTable8 *slicing8Table -var ieeeArchImpl bool -var updateIEEE func(crc uint32, p []byte) uint32 -var ieeeOnce sync.Once - -func ieeeInit() { - ieeeArchImpl = archAvailableIEEE() - - if ieeeArchImpl { - archInitIEEE() - updateIEEE = archUpdateIEEE - } else { - // Initialize the slicing-by-8 table. - ieeeTable8 = slicingMakeTable(IEEE) - updateIEEE = func(crc uint32, p []byte) uint32 { - return slicingUpdate(crc, ieeeTable8, p) - } - } -} - -// MakeTable returns a Table constructed from the specified polynomial. -// The contents of this Table must not be modified. -func MakeTable(poly uint32) *Table { - switch poly { - case IEEE: - ieeeOnce.Do(ieeeInit) - return IEEETable - case Castagnoli: - castagnoliOnce.Do(castagnoliInit) - return castagnoliTable - } - return simpleMakeTable(poly) -} - -// digest represents the partial evaluation of a checksum. -type digest struct { - crc uint32 - tab *Table -} - -// New creates a new hash.Hash32 computing the CRC-32 checksum -// using the polynomial represented by the Table. -// Its Sum method will lay the value out in big-endian byte order. -func New(tab *Table) hash.Hash32 { - if tab == IEEETable { - ieeeOnce.Do(ieeeInit) - } - return &digest{0, tab} -} - -// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum -// using the IEEE polynomial. -// Its Sum method will lay the value out in big-endian byte order. -func NewIEEE() hash.Hash32 { return New(IEEETable) } - -func (d *digest) Size() int { return Size } - -func (d *digest) BlockSize() int { return 1 } - -func (d *digest) Reset() { d.crc = 0 } - -// Update returns the result of adding the bytes in p to the crc. -func Update(crc uint32, tab *Table, p []byte) uint32 { - switch tab { - case castagnoliTable: - return updateCastagnoli(crc, p) - case IEEETable: - // Unfortunately, because IEEETable is exported, IEEE may be used without a - // call to MakeTable. We have to make sure it gets initialized in that case. - ieeeOnce.Do(ieeeInit) - return updateIEEE(crc, p) - default: - return simpleUpdate(crc, tab, p) - } -} - -func (d *digest) Write(p []byte) (n int, err error) { - switch d.tab { - case castagnoliTable: - d.crc = updateCastagnoli(d.crc, p) - case IEEETable: - // We only create digest objects through New() which takes care of - // initialization in this case. - d.crc = updateIEEE(d.crc, p) - default: - d.crc = simpleUpdate(d.crc, d.tab, p) - } - return len(p), nil -} - -func (d *digest) Sum32() uint32 { return d.crc } - -func (d *digest) Sum(in []byte) []byte { - s := d.Sum32() - return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} - -// Checksum returns the CRC-32 checksum of data -// using the polynomial represented by the Table. -func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) } - -// ChecksumIEEE returns the CRC-32 checksum of data -// using the IEEE polynomial. -func ChecksumIEEE(data []byte) uint32 { - ieeeOnce.Do(ieeeInit) - return updateIEEE(0, data) -} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.go b/vendor/github.com/klauspost/crc32/crc32_amd64.go deleted file mode 100644 index af2a0b844..000000000 --- a/vendor/github.com/klauspost/crc32/crc32_amd64.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine,!gccgo - -// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a -// description of the interface that each architecture-specific file -// implements. - -package crc32 - -import "unsafe" - -// This file contains the code to call the SSE 4.2 version of the Castagnoli -// and IEEE CRC. - -// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use -// CPUID to test for SSE 4.1, 4.2 and CLMUL support. -func haveSSE41() bool -func haveSSE42() bool -func haveCLMUL() bool - -// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32 -// instruction. -//go:noescape -func castagnoliSSE42(crc uint32, p []byte) uint32 - -// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32 -// instruction. -//go:noescape -func castagnoliSSE42Triple( - crcA, crcB, crcC uint32, - a, b, c []byte, - rounds uint32, -) (retA uint32, retB uint32, retC uint32) - -// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ -// instruction as well as SSE 4.1. -//go:noescape -func ieeeCLMUL(crc uint32, p []byte) uint32 - -var sse42 = haveSSE42() -var useFastIEEE = haveCLMUL() && haveSSE41() - -const castagnoliK1 = 168 -const castagnoliK2 = 1344 - -type sse42Table [4]Table - -var castagnoliSSE42TableK1 *sse42Table -var castagnoliSSE42TableK2 *sse42Table - -func archAvailableCastagnoli() bool { - return sse42 -} - -func archInitCastagnoli() { - if !sse42 { - panic("arch-specific Castagnoli not available") - } - castagnoliSSE42TableK1 = new(sse42Table) - castagnoliSSE42TableK2 = new(sse42Table) - // See description in updateCastagnoli. - // t[0][i] = CRC(i000, O) - // t[1][i] = CRC(0i00, O) - // t[2][i] = CRC(00i0, O) - // t[3][i] = CRC(000i, O) - // where O is a sequence of K zeros. - var tmp [castagnoliK2]byte - for b := 0; b < 4; b++ { - for i := 0; i < 256; i++ { - val := uint32(i) << uint32(b*8) - castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1]) - castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:]) - } - } -} - -// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the -// table given) with the given initial crc value. This corresponds to -// CRC(crc, O) in the description in updateCastagnoli. -func castagnoliShift(table *sse42Table, crc uint32) uint32 { - return table[3][crc>>24] ^ - table[2][(crc>>16)&0xFF] ^ - table[1][(crc>>8)&0xFF] ^ - table[0][crc&0xFF] -} - -func archUpdateCastagnoli(crc uint32, p []byte) uint32 { - if !sse42 { - panic("not available") - } - - // This method is inspired from the algorithm in Intel's white paper: - // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction" - // The same strategy of splitting the buffer in three is used but the - // combining calculation is different; the complete derivation is explained - // below. - // - // -- The basic idea -- - // - // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a - // time. In recent Intel architectures the instruction takes 3 cycles; - // however the processor can pipeline up to three instructions if they - // don't depend on each other. - // - // Roughly this means that we can process three buffers in about the same - // time we can process one buffer. - // - // The idea is then to split the buffer in three, CRC the three pieces - // separately and then combine the results. - // - // Combining the results requires precomputed tables, so we must choose a - // fixed buffer length to optimize. The longer the length, the faster; but - // only buffers longer than this length will use the optimization. We choose - // two cutoffs and compute tables for both: - // - one around 512: 168*3=504 - // - one around 4KB: 1344*3=4032 - // - // -- The nitty gritty -- - // - // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with - // initial non-inverted CRC I). This function has the following properties: - // (a) CRC(I, AB) = CRC(CRC(I, A), B) - // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B) - // - // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of - // K bytes each, where K is a fixed constant. Let O be the sequence of K zero - // bytes. - // - // CRC(I, ABC) = CRC(I, ABO xor C) - // = CRC(I, ABO) xor CRC(0, C) - // = CRC(CRC(I, AB), O) xor CRC(0, C) - // = CRC(CRC(I, AO xor B), O) xor CRC(0, C) - // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C) - // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C) - // - // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B), - // and CRC(0, C) efficiently. We just need to find a way to quickly compute - // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these - // values; since we can't have a 32-bit table, we break it up into four - // 8-bit tables: - // - // CRC(uvwx, O) = CRC(u000, O) xor - // CRC(0v00, O) xor - // CRC(00w0, O) xor - // CRC(000x, O) - // - // We can compute tables corresponding to the four terms for all 8-bit - // values. - - crc = ^crc - - // If a buffer is long enough to use the optimization, process the first few - // bytes to align the buffer to an 8 byte boundary (if necessary). - if len(p) >= castagnoliK1*3 { - delta := int(uintptr(unsafe.Pointer(&p[0])) & 7) - if delta != 0 { - delta = 8 - delta - crc = castagnoliSSE42(crc, p[:delta]) - p = p[delta:] - } - } - - // Process 3*K2 at a time. - for len(p) >= castagnoliK2*3 { - // Compute CRC(I, A), CRC(0, B), and CRC(0, C). - crcA, crcB, crcC := castagnoliSSE42Triple( - crc, 0, 0, - p, p[castagnoliK2:], p[castagnoliK2*2:], - castagnoliK2/24) - - // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) - crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB - // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) - crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC - p = p[castagnoliK2*3:] - } - - // Process 3*K1 at a time. - for len(p) >= castagnoliK1*3 { - // Compute CRC(I, A), CRC(0, B), and CRC(0, C). - crcA, crcB, crcC := castagnoliSSE42Triple( - crc, 0, 0, - p, p[castagnoliK1:], p[castagnoliK1*2:], - castagnoliK1/24) - - // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) - crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB - // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) - crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC - p = p[castagnoliK1*3:] - } - - // Use the simple implementation for what's left. - crc = castagnoliSSE42(crc, p) - return ^crc -} - -func archAvailableIEEE() bool { - return useFastIEEE -} - -var archIeeeTable8 *slicing8Table - -func archInitIEEE() { - if !useFastIEEE { - panic("not available") - } - // We still use slicing-by-8 for small buffers. - archIeeeTable8 = slicingMakeTable(IEEE) -} - -func archUpdateIEEE(crc uint32, p []byte) uint32 { - if !useFastIEEE { - panic("not available") - } - - if len(p) >= 64 { - left := len(p) & 15 - do := len(p) - left - crc = ^ieeeCLMUL(^crc, p[:do]) - p = p[do:] - } - if len(p) == 0 { - return crc - } - return slicingUpdate(crc, archIeeeTable8, p) -} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.s b/vendor/github.com/klauspost/crc32/crc32_amd64.s deleted file mode 100644 index e8a7941ce..000000000 --- a/vendor/github.com/klauspost/crc32/crc32_amd64.s +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#define NOSPLIT 4 -#define RODATA 8 - -// castagnoliSSE42 updates the (non-inverted) crc with the given buffer. -// -// func castagnoliSSE42(crc uint32, p []byte) uint32 -TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 - MOVL crc+0(FP), AX // CRC value - MOVQ p+8(FP), SI // data pointer - MOVQ p_len+16(FP), CX // len(p) - - // If there are fewer than 8 bytes to process, skip alignment. - CMPQ CX, $8 - JL less_than_8 - - MOVQ SI, BX - ANDQ $7, BX - JZ aligned - - // Process the first few bytes to 8-byte align the input. - - // BX = 8 - BX. We need to process this many bytes to align. - SUBQ $1, BX - XORQ $7, BX - - BTQ $0, BX - JNC align_2 - - CRC32B (SI), AX - DECQ CX - INCQ SI - -align_2: - BTQ $1, BX - JNC align_4 - - // CRC32W (SI), AX - BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 - - SUBQ $2, CX - ADDQ $2, SI - -align_4: - BTQ $2, BX - JNC aligned - - // CRC32L (SI), AX - BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 - - SUBQ $4, CX - ADDQ $4, SI - -aligned: - // The input is now 8-byte aligned and we can process 8-byte chunks. - CMPQ CX, $8 - JL less_than_8 - - CRC32Q (SI), AX - ADDQ $8, SI - SUBQ $8, CX - JMP aligned - -less_than_8: - // We may have some bytes left over; process 4 bytes, then 2, then 1. - BTQ $2, CX - JNC less_than_4 - - // CRC32L (SI), AX - BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 - ADDQ $4, SI - -less_than_4: - BTQ $1, CX - JNC less_than_2 - - // CRC32W (SI), AX - BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 - ADDQ $2, SI - -less_than_2: - BTQ $0, CX - JNC done - - CRC32B (SI), AX - -done: - MOVL AX, ret+32(FP) - RET - -// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds) -// bytes from each buffer. -// -// func castagnoliSSE42Triple( -// crc1, crc2, crc3 uint32, -// a, b, c []byte, -// rounds uint32, -// ) (retA uint32, retB uint32, retC uint32) -TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0 - MOVL crcA+0(FP), AX - MOVL crcB+4(FP), CX - MOVL crcC+8(FP), DX - - MOVQ a+16(FP), R8 // data pointer - MOVQ b+40(FP), R9 // data pointer - MOVQ c+64(FP), R10 // data pointer - - MOVL rounds+88(FP), R11 - -loop: - CRC32Q (R8), AX - CRC32Q (R9), CX - CRC32Q (R10), DX - - CRC32Q 8(R8), AX - CRC32Q 8(R9), CX - CRC32Q 8(R10), DX - - CRC32Q 16(R8), AX - CRC32Q 16(R9), CX - CRC32Q 16(R10), DX - - ADDQ $24, R8 - ADDQ $24, R9 - ADDQ $24, R10 - - DECQ R11 - JNZ loop - - MOVL AX, retA+96(FP) - MOVL CX, retB+100(FP) - MOVL DX, retC+104(FP) - RET - -// func haveSSE42() bool -TEXT ·haveSSE42(SB), NOSPLIT, $0 - XORQ AX, AX - INCL AX - CPUID - SHRQ $20, CX - ANDQ $1, CX - MOVB CX, ret+0(FP) - RET - -// func haveCLMUL() bool -TEXT ·haveCLMUL(SB), NOSPLIT, $0 - XORQ AX, AX - INCL AX - CPUID - SHRQ $1, CX - ANDQ $1, CX - MOVB CX, ret+0(FP) - RET - -// func haveSSE41() bool -TEXT ·haveSSE41(SB), NOSPLIT, $0 - XORQ AX, AX - INCL AX - CPUID - SHRQ $19, CX - ANDQ $1, CX - MOVB CX, ret+0(FP) - RET - -// CRC32 polynomial data -// -// These constants are lifted from the -// Linux kernel, since they avoid the costly -// PSHUFB 16 byte reversal proposed in the -// original Intel paper. -DATA r2r1kp<>+0(SB)/8, $0x154442bd4 -DATA r2r1kp<>+8(SB)/8, $0x1c6e41596 -DATA r4r3kp<>+0(SB)/8, $0x1751997d0 -DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e -DATA rupolykp<>+0(SB)/8, $0x1db710641 -DATA rupolykp<>+8(SB)/8, $0x1f7011641 -DATA r5kp<>+0(SB)/8, $0x163cd6124 - -GLOBL r2r1kp<>(SB), RODATA, $16 -GLOBL r4r3kp<>(SB), RODATA, $16 -GLOBL rupolykp<>(SB), RODATA, $16 -GLOBL r5kp<>(SB), RODATA, $8 - -// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf -// len(p) must be at least 64, and must be a multiple of 16. - -// func ieeeCLMUL(crc uint32, p []byte) uint32 -TEXT ·ieeeCLMUL(SB), NOSPLIT, $0 - MOVL crc+0(FP), X0 // Initial CRC value - MOVQ p+8(FP), SI // data pointer - MOVQ p_len+16(FP), CX // len(p) - - MOVOU (SI), X1 - MOVOU 16(SI), X2 - MOVOU 32(SI), X3 - MOVOU 48(SI), X4 - PXOR X0, X1 - ADDQ $64, SI // buf+=64 - SUBQ $64, CX // len-=64 - CMPQ CX, $64 // Less than 64 bytes left - JB remain64 - - MOVOA r2r1kp<>+0(SB), X0 - -loopback64: - MOVOA X1, X5 - MOVOA X2, X6 - MOVOA X3, X7 - MOVOA X4, X8 - - PCLMULQDQ $0, X0, X1 - PCLMULQDQ $0, X0, X2 - PCLMULQDQ $0, X0, X3 - PCLMULQDQ $0, X0, X4 - - // Load next early - MOVOU (SI), X11 - MOVOU 16(SI), X12 - MOVOU 32(SI), X13 - MOVOU 48(SI), X14 - - PCLMULQDQ $0x11, X0, X5 - PCLMULQDQ $0x11, X0, X6 - PCLMULQDQ $0x11, X0, X7 - PCLMULQDQ $0x11, X0, X8 - - PXOR X5, X1 - PXOR X6, X2 - PXOR X7, X3 - PXOR X8, X4 - - PXOR X11, X1 - PXOR X12, X2 - PXOR X13, X3 - PXOR X14, X4 - - ADDQ $0x40, DI - ADDQ $64, SI // buf+=64 - SUBQ $64, CX // len-=64 - CMPQ CX, $64 // Less than 64 bytes left? - JGE loopback64 - - // Fold result into a single register (X1) -remain64: - MOVOA r4r3kp<>+0(SB), X0 - - MOVOA X1, X5 - PCLMULQDQ $0, X0, X1 - PCLMULQDQ $0x11, X0, X5 - PXOR X5, X1 - PXOR X2, X1 - - MOVOA X1, X5 - PCLMULQDQ $0, X0, X1 - PCLMULQDQ $0x11, X0, X5 - PXOR X5, X1 - PXOR X3, X1 - - MOVOA X1, X5 - PCLMULQDQ $0, X0, X1 - PCLMULQDQ $0x11, X0, X5 - PXOR X5, X1 - PXOR X4, X1 - - // If there is less than 16 bytes left we are done - CMPQ CX, $16 - JB finish - - // Encode 16 bytes -remain16: - MOVOU (SI), X10 - MOVOA X1, X5 - PCLMULQDQ $0, X0, X1 - PCLMULQDQ $0x11, X0, X5 - PXOR X5, X1 - PXOR X10, X1 - SUBQ $16, CX - ADDQ $16, SI - CMPQ CX, $16 - JGE remain16 - -finish: - // Fold final result into 32 bits and return it - PCMPEQB X3, X3 - PCLMULQDQ $1, X1, X0 - PSRLDQ $8, X1 - PXOR X0, X1 - - MOVOA X1, X2 - MOVQ r5kp<>+0(SB), X0 - - // Creates 32 bit mask. Note that we don't care about upper half. - PSRLQ $32, X3 - - PSRLDQ $4, X2 - PAND X3, X1 - PCLMULQDQ $0, X0, X1 - PXOR X2, X1 - - MOVOA rupolykp<>+0(SB), X0 - - MOVOA X1, X2 - PAND X3, X1 - PCLMULQDQ $0x10, X0, X1 - PAND X3, X1 - PCLMULQDQ $0, X0, X1 - PXOR X2, X1 - - // PEXTRD $1, X1, AX (SSE 4.1) - BYTE $0x66; BYTE $0x0f; BYTE $0x3a - BYTE $0x16; BYTE $0xc8; BYTE $0x01 - MOVL AX, ret+32(FP) - - RET diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.go b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go deleted file mode 100644 index 3222b06a5..000000000 --- a/vendor/github.com/klauspost/crc32/crc32_amd64p32.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine,!gccgo - -package crc32 - -// This file contains the code to call the SSE 4.2 version of the Castagnoli -// CRC. - -// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2 -// support. -func haveSSE42() bool - -// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32 -// instruction. -//go:noescape -func castagnoliSSE42(crc uint32, p []byte) uint32 - -var sse42 = haveSSE42() - -func archAvailableCastagnoli() bool { - return sse42 -} - -func archInitCastagnoli() { - if !sse42 { - panic("not available") - } - // No initialization necessary. -} - -func archUpdateCastagnoli(crc uint32, p []byte) uint32 { - if !sse42 { - panic("not available") - } - return castagnoliSSE42(crc, p) -} - -func archAvailableIEEE() bool { return false } -func archInitIEEE() { panic("not available") } -func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.s b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s deleted file mode 100644 index a578d685c..000000000 --- a/vendor/github.com/klauspost/crc32/crc32_amd64p32.s +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#define NOSPLIT 4 -#define RODATA 8 - -// func castagnoliSSE42(crc uint32, p []byte) uint32 -TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 - MOVL crc+0(FP), AX // CRC value - MOVL p+4(FP), SI // data pointer - MOVL p_len+8(FP), CX // len(p) - - NOTL AX - - // If there's less than 8 bytes to process, we do it byte-by-byte. - CMPQ CX, $8 - JL cleanup - - // Process individual bytes until the input is 8-byte aligned. -startup: - MOVQ SI, BX - ANDQ $7, BX - JZ aligned - - CRC32B (SI), AX - DECQ CX - INCQ SI - JMP startup - -aligned: - // The input is now 8-byte aligned and we can process 8-byte chunks. - CMPQ CX, $8 - JL cleanup - - CRC32Q (SI), AX - ADDQ $8, SI - SUBQ $8, CX - JMP aligned - -cleanup: - // We may have some bytes left over that we process one at a time. - CMPQ CX, $0 - JE done - - CRC32B (SI), AX - INCQ SI - DECQ CX - JMP cleanup - -done: - NOTL AX - MOVL AX, ret+16(FP) - RET - -// func haveSSE42() bool -TEXT ·haveSSE42(SB), NOSPLIT, $0 - XORQ AX, AX - INCL AX - CPUID - SHRQ $20, CX - ANDQ $1, CX - MOVB CX, ret+0(FP) - RET - diff --git a/vendor/github.com/klauspost/crc32/crc32_generic.go b/vendor/github.com/klauspost/crc32/crc32_generic.go deleted file mode 100644 index abacbb663..000000000 --- a/vendor/github.com/klauspost/crc32/crc32_generic.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file contains CRC32 algorithms that are not specific to any architecture -// and don't use hardware acceleration. -// -// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table. -// -// The slicing-by-8 algorithm is a faster implementation that uses a bigger -// table (8*256*4 bytes). - -package crc32 - -// simpleMakeTable allocates and constructs a Table for the specified -// polynomial. The table is suitable for use with the simple algorithm -// (simpleUpdate). -func simpleMakeTable(poly uint32) *Table { - t := new(Table) - simplePopulateTable(poly, t) - return t -} - -// simplePopulateTable constructs a Table for the specified polynomial, suitable -// for use with simpleUpdate. -func simplePopulateTable(poly uint32, t *Table) { - for i := 0; i < 256; i++ { - crc := uint32(i) - for j := 0; j < 8; j++ { - if crc&1 == 1 { - crc = (crc >> 1) ^ poly - } else { - crc >>= 1 - } - } - t[i] = crc - } -} - -// simpleUpdate uses the simple algorithm to update the CRC, given a table that -// was previously computed using simpleMakeTable. -func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 { - crc = ^crc - for _, v := range p { - crc = tab[byte(crc)^v] ^ (crc >> 8) - } - return ^crc -} - -// Use slicing-by-8 when payload >= this value. -const slicing8Cutoff = 16 - -// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm. -type slicing8Table [8]Table - -// slicingMakeTable constructs a slicing8Table for the specified polynomial. The -// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate). -func slicingMakeTable(poly uint32) *slicing8Table { - t := new(slicing8Table) - simplePopulateTable(poly, &t[0]) - for i := 0; i < 256; i++ { - crc := t[0][i] - for j := 1; j < 8; j++ { - crc = t[0][crc&0xFF] ^ (crc >> 8) - t[j][i] = crc - } - } - return t -} - -// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a -// table that was previously computed using slicingMakeTable. -func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 { - if len(p) >= slicing8Cutoff { - crc = ^crc - for len(p) > 8 { - crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 - crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^ - tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^ - tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF] - p = p[8:] - } - crc = ^crc - } - if len(p) == 0 { - return crc - } - return simpleUpdate(crc, &tab[0], p) -} diff --git a/vendor/github.com/klauspost/crc32/crc32_otherarch.go b/vendor/github.com/klauspost/crc32/crc32_otherarch.go deleted file mode 100644 index cc960764b..000000000 --- a/vendor/github.com/klauspost/crc32/crc32_otherarch.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!amd64p32,!s390x - -package crc32 - -func archAvailableIEEE() bool { return false } -func archInitIEEE() { panic("not available") } -func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } - -func archAvailableCastagnoli() bool { return false } -func archInitCastagnoli() { panic("not available") } -func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.go b/vendor/github.com/klauspost/crc32/crc32_s390x.go deleted file mode 100644 index ce96f0328..000000000 --- a/vendor/github.com/klauspost/crc32/crc32_s390x.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x - -package crc32 - -const ( - vxMinLen = 64 - vxAlignMask = 15 // align to 16 bytes -) - -// hasVectorFacility reports whether the machine has the z/Architecture -// vector facility installed and enabled. -func hasVectorFacility() bool - -var hasVX = hasVectorFacility() - -// vectorizedCastagnoli implements CRC32 using vector instructions. -// It is defined in crc32_s390x.s. -//go:noescape -func vectorizedCastagnoli(crc uint32, p []byte) uint32 - -// vectorizedIEEE implements CRC32 using vector instructions. -// It is defined in crc32_s390x.s. -//go:noescape -func vectorizedIEEE(crc uint32, p []byte) uint32 - -func archAvailableCastagnoli() bool { - return hasVX -} - -var archCastagnoliTable8 *slicing8Table - -func archInitCastagnoli() { - if !hasVX { - panic("not available") - } - // We still use slicing-by-8 for small buffers. - archCastagnoliTable8 = slicingMakeTable(Castagnoli) -} - -// archUpdateCastagnoli calculates the checksum of p using -// vectorizedCastagnoli. -func archUpdateCastagnoli(crc uint32, p []byte) uint32 { - if !hasVX { - panic("not available") - } - // Use vectorized function if data length is above threshold. - if len(p) >= vxMinLen { - aligned := len(p) & ^vxAlignMask - crc = vectorizedCastagnoli(crc, p[:aligned]) - p = p[aligned:] - } - if len(p) == 0 { - return crc - } - return slicingUpdate(crc, archCastagnoliTable8, p) -} - -func archAvailableIEEE() bool { - return hasVX -} - -var archIeeeTable8 *slicing8Table - -func archInitIEEE() { - if !hasVX { - panic("not available") - } - // We still use slicing-by-8 for small buffers. - archIeeeTable8 = slicingMakeTable(IEEE) -} - -// archUpdateIEEE calculates the checksum of p using vectorizedIEEE. -func archUpdateIEEE(crc uint32, p []byte) uint32 { - if !hasVX { - panic("not available") - } - // Use vectorized function if data length is above threshold. - if len(p) >= vxMinLen { - aligned := len(p) & ^vxAlignMask - crc = vectorizedIEEE(crc, p[:aligned]) - p = p[aligned:] - } - if len(p) == 0 { - return crc - } - return slicingUpdate(crc, archIeeeTable8, p) -} diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.s b/vendor/github.com/klauspost/crc32/crc32_s390x.s deleted file mode 100644 index e980ca29d..000000000 --- a/vendor/github.com/klauspost/crc32/crc32_s390x.s +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x - -#include "textflag.h" - -// Vector register range containing CRC-32 constants - -#define CONST_PERM_LE2BE V9 -#define CONST_R2R1 V10 -#define CONST_R4R3 V11 -#define CONST_R5 V12 -#define CONST_RU_POLY V13 -#define CONST_CRC_POLY V14 - -// The CRC-32 constant block contains reduction constants to fold and -// process particular chunks of the input data stream in parallel. -// -// Note that the constant definitions below are extended in order to compute -// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. -// The rightmost doubleword can be 0 to prevent contribution to the result or -// can be multiplied by 1 to perform an XOR without the need for a separate -// VECTOR EXCLUSIVE OR instruction. -// -// The polynomials used are bit-reflected: -// -// IEEE: P'(x) = 0x0edb88320 -// Castagnoli: P'(x) = 0x082f63b78 - -// IEEE polynomial constants -DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask -DATA ·crcleconskp+8(SB)/8, $0x0706050403020100 -DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2 -DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1 -DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4 -DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3 -DATA ·crcleconskp+48(SB)/8, $0x0000000000000000 -DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5 -DATA ·crcleconskp+64(SB)/8, $0x0000000000000000 -DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u' -DATA ·crcleconskp+80(SB)/8, $0x0000000000000000 -DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1 - -GLOBL ·crcleconskp(SB), RODATA, $144 - -// Castagonli Polynomial constants -DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask -DATA ·crccleconskp+8(SB)/8, $0x0706050403020100 -DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2 -DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1 -DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4 -DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3 -DATA ·crccleconskp+48(SB)/8, $0x0000000000000000 -DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5 -DATA ·crccleconskp+64(SB)/8, $0x0000000000000000 -DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u' -DATA ·crccleconskp+80(SB)/8, $0x0000000000000000 -DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1 - -GLOBL ·crccleconskp(SB), RODATA, $144 - -// func hasVectorFacility() bool -TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1 - MOVD $x-24(SP), R1 - XC $24, 0(R1), 0(R1) // clear the storage - MOVD $2, R0 // R0 is the number of double words stored -1 - WORD $0xB2B01000 // STFLE 0(R1) - XOR R0, R0 // reset the value of R0 - MOVBZ z-8(SP), R1 - AND $0x40, R1 - BEQ novector - -vectorinstalled: - // check if the vector instruction has been enabled - VLEIB $0, $0xF, V16 - VLGVB $0, V16, R1 - CMPBNE R1, $0xF, novector - MOVB $1, ret+0(FP) // have vx - RET - -novector: - MOVB $0, ret+0(FP) // no vx - RET - -// The CRC-32 function(s) use these calling conventions: -// -// Parameters: -// -// R2: Initial CRC value, typically ~0; and final CRC (return) value. -// R3: Input buffer pointer, performance might be improved if the -// buffer is on a doubleword boundary. -// R4: Length of the buffer, must be 64 bytes or greater. -// -// Register usage: -// -// R5: CRC-32 constant pool base pointer. -// V0: Initial CRC value and intermediate constants and results. -// V1..V4: Data for CRC computation. -// V5..V8: Next data chunks that are fetched from the input buffer. -// -// V9..V14: CRC-32 constants. - -// func vectorizedIEEE(crc uint32, p []byte) uint32 -TEXT ·vectorizedIEEE(SB), NOSPLIT, $0 - MOVWZ crc+0(FP), R2 // R2 stores the CRC value - MOVD p+8(FP), R3 // data pointer - MOVD p_len+16(FP), R4 // len(p) - - MOVD $·crcleconskp(SB), R5 - BR vectorizedBody<>(SB) - -// func vectorizedCastagnoli(crc uint32, p []byte) uint32 -TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0 - MOVWZ crc+0(FP), R2 // R2 stores the CRC value - MOVD p+8(FP), R3 // data pointer - MOVD p_len+16(FP), R4 // len(p) - - // R5: crc-32 constant pool base pointer, constant is used to reduce crc - MOVD $·crccleconskp(SB), R5 - BR vectorizedBody<>(SB) - -TEXT vectorizedBody<>(SB), NOSPLIT, $0 - XOR $0xffffffff, R2 // NOTW R2 - VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY - - // Load the initial CRC value into the rightmost word of V0 - VZERO V0 - VLVGF $3, R2, V0 - - // Crash if the input size is less than 64-bytes. - CMP R4, $64 - BLT crash - - // Load a 64-byte data chunk and XOR with CRC - VLM 0(R3), V1, V4 // 64-bytes into V1..V4 - - // Reflect the data if the CRC operation is in the bit-reflected domain - VPERM V1, V1, CONST_PERM_LE2BE, V1 - VPERM V2, V2, CONST_PERM_LE2BE, V2 - VPERM V3, V3, CONST_PERM_LE2BE, V3 - VPERM V4, V4, CONST_PERM_LE2BE, V4 - - VX V0, V1, V1 // V1 ^= CRC - ADD $64, R3 // BUF = BUF + 64 - ADD $(-64), R4 - - // Check remaining buffer size and jump to proper folding method - CMP R4, $64 - BLT less_than_64bytes - -fold_64bytes_loop: - // Load the next 64-byte data chunk into V5 to V8 - VLM 0(R3), V5, V8 - VPERM V5, V5, CONST_PERM_LE2BE, V5 - VPERM V6, V6, CONST_PERM_LE2BE, V6 - VPERM V7, V7, CONST_PERM_LE2BE, V7 - VPERM V8, V8, CONST_PERM_LE2BE, V8 - - // Perform a GF(2) multiplication of the doublewords in V1 with - // the reduction constants in V0. The intermediate result is - // then folded (accumulated) with the next data chunk in V5 and - // stored in V1. Repeat this step for the register contents - // in V2, V3, and V4 respectively. - - VGFMAG CONST_R2R1, V1, V5, V1 - VGFMAG CONST_R2R1, V2, V6, V2 - VGFMAG CONST_R2R1, V3, V7, V3 - VGFMAG CONST_R2R1, V4, V8, V4 - - // Adjust buffer pointer and length for next loop - ADD $64, R3 // BUF = BUF + 64 - ADD $(-64), R4 // LEN = LEN - 64 - - CMP R4, $64 - BGE fold_64bytes_loop - -less_than_64bytes: - // Fold V1 to V4 into a single 128-bit value in V1 - VGFMAG CONST_R4R3, V1, V2, V1 - VGFMAG CONST_R4R3, V1, V3, V1 - VGFMAG CONST_R4R3, V1, V4, V1 - - // Check whether to continue with 64-bit folding - CMP R4, $16 - BLT final_fold - -fold_16bytes_loop: - VL 0(R3), V2 // Load next data chunk - VPERM V2, V2, CONST_PERM_LE2BE, V2 - - VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk - - // Adjust buffer pointer and size for folding next data chunk - ADD $16, R3 - ADD $-16, R4 - - // Process remaining data chunks - CMP R4, $16 - BGE fold_16bytes_loop - -final_fold: - VLEIB $7, $0x40, V9 - VSRLB V9, CONST_R4R3, V0 - VLEIG $0, $1, V0 - - VGFMG V0, V1, V1 - - VLEIB $7, $0x20, V9 // Shift by words - VSRLB V9, V1, V2 // Store remaining bits in V2 - VUPLLF V1, V1 // Split rightmost doubleword - VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2 - - // The input values to the Barret reduction are the degree-63 polynomial - // in V1 (R(x)), degree-32 generator polynomial, and the reduction - // constant u. The Barret reduction result is the CRC value of R(x) mod - // P(x). - // - // The Barret reduction algorithm is defined as: - // - // 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u - // 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) - // 3. C(x) = R(x) XOR T2(x) mod x^32 - // - // Note: To compensate the division by x^32, use the vector unpack - // instruction to move the leftmost word into the leftmost doubleword - // of the vector register. The rightmost doubleword is multiplied - // with zero to not contribute to the intermedate results. - - // T1(x) = floor( R(x) / x^32 ) GF2MUL u - VUPLLF V1, V2 - VGFMG CONST_RU_POLY, V2, V2 - - // Compute the GF(2) product of the CRC polynomial in VO with T1(x) in - // V2 and XOR the intermediate result, T2(x), with the value in V1. - // The final result is in the rightmost word of V2. - - VUPLLF V2, V2 - VGFMAG CONST_CRC_POLY, V2, V1, V2 - -done: - VLGVF $2, V2, R2 - XOR $0xffffffff, R2 // NOTW R2 - MOVWZ R2, ret + 32(FP) - RET - -crash: - MOVD $0, (R0) // input size is less than 64-bytes diff --git a/vendor/github.com/klauspost/crc32/crc32_test.go b/vendor/github.com/klauspost/crc32/crc32_test.go deleted file mode 100644 index 039943630..000000000 --- a/vendor/github.com/klauspost/crc32/crc32_test.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package crc32 - -import ( - crand "crypto/rand" - "hash" - mrand "math/rand" - "testing" -) - -type test struct { - ieee, castagnoli uint32 - in string -} - -var golden = []test{ - {0x0, 0x0, ""}, - {0xe8b7be43, 0xc1d04330, "a"}, - {0x9e83486d, 0xe2a22936, "ab"}, - {0x352441c2, 0x364b3fb7, "abc"}, - {0xed82cd11, 0x92c80a31, "abcd"}, - {0x8587d865, 0xc450d697, "abcde"}, - {0x4b8e39ef, 0x53bceff1, "abcdef"}, - {0x312a6aa6, 0xe627f441, "abcdefg"}, - {0xaeef2a50, 0xa9421b7, "abcdefgh"}, - {0x8da988af, 0x2ddc99fc, "abcdefghi"}, - {0x3981703a, 0xe6599437, "abcdefghij"}, - {0x6b9cdfe7, 0xb2cc01fe, "Discard medicine more than two years old."}, - {0xc90ef73f, 0xe28207f, "He who has a shady past knows that nice guys finish last."}, - {0xb902341f, 0xbe93f964, "I wouldn't marry him with a ten foot pole."}, - {0x42080e8, 0x9e3be0c3, "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, - {0x154c6d11, 0xf505ef04, "The days of the digital watch are numbered. -Tom Stoppard"}, - {0x4c418325, 0x85d3dc82, "Nepal premier won't resign."}, - {0x33955150, 0xc5142380, "For every action there is an equal and opposite government program."}, - {0x26216a4b, 0x75eb77dd, "His money is twice tainted: 'taint yours and 'taint mine."}, - {0x1abbe45e, 0x91ebe9f7, "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, - {0xc89a94f7, 0xf0b1168e, "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, - {0xab3abe14, 0x572b74e2, "size: a.out: bad magic"}, - {0xbab102b6, 0x8a58a6d5, "The major problem is with sendmail. -Mark Horton"}, - {0x999149d7, 0x9c426c50, "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, - {0x6d52a33c, 0x735400a4, "If the enemy is within range, then so are you."}, - {0x90631e8d, 0xbec49c95, "It's well we cannot hear the screams/That we create in others' dreams."}, - {0x78309130, 0xa95a2079, "You remind me of a TV show, but that's all right: I watch it anyway."}, - {0x7d0a377f, 0xde2e65c5, "C is as portable as Stonehedge!!"}, - {0x8c79fd79, 0x297a88ed, "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, - {0xa20b7167, 0x66ed1d8b, "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, - {0x8e0bb443, 0xdcded527, "How can you write a big system without C++? -Paul Glick"}, -} - -// testGoldenIEEE verifies that the given function returns -// correct IEEE checksums. -func testGoldenIEEE(t *testing.T, crcFunc func(b []byte) uint32) { - for _, g := range golden { - if crc := crcFunc([]byte(g.in)); crc != g.ieee { - t.Errorf("IEEE(%s) = 0x%x want 0x%x", g.in, crc, g.ieee) - } - } -} - -// testGoldenCastagnoli verifies that the given function returns -// correct IEEE checksums. -func testGoldenCastagnoli(t *testing.T, crcFunc func(b []byte) uint32) { - for _, g := range golden { - if crc := crcFunc([]byte(g.in)); crc != g.castagnoli { - t.Errorf("Castagnoli(%s) = 0x%x want 0x%x", g.in, crc, g.castagnoli) - } - } -} - -// testCrossCheck generates random buffers of various lengths and verifies that -// the two "update" functions return the same result. -func testCrossCheck(t *testing.T, crcFunc1, crcFunc2 func(crc uint32, b []byte) uint32) { - // The AMD64 implementation has some cutoffs at lengths 168*3=504 and - // 1344*3=4032. We should make sure lengths around these values are in the - // list. - lengths := []int{0, 1, 2, 3, 4, 5, 10, 16, 50, 100, 128, - 500, 501, 502, 503, 504, 505, 512, 1000, 1024, 2000, - 4030, 4031, 4032, 4033, 4036, 4040, 4048, 4096, 5000, 10000} - for _, length := range lengths { - p := make([]byte, length) - _, _ = crand.Read(p) - crcInit := uint32(mrand.Int63()) - crc1 := crcFunc1(crcInit, p) - crc2 := crcFunc2(crcInit, p) - if crc1 != crc2 { - t.Errorf("mismatch: 0x%x vs 0x%x (buffer length %d)", crc1, crc2, length) - } - } -} - -// TestSimple tests the simple generic algorithm. -func TestSimple(t *testing.T) { - tab := simpleMakeTable(IEEE) - testGoldenIEEE(t, func(b []byte) uint32 { - return simpleUpdate(0, tab, b) - }) - - tab = simpleMakeTable(Castagnoli) - testGoldenCastagnoli(t, func(b []byte) uint32 { - return simpleUpdate(0, tab, b) - }) -} - -// TestSimple tests the slicing-by-8 algorithm. -func TestSlicing(t *testing.T) { - tab := slicingMakeTable(IEEE) - testGoldenIEEE(t, func(b []byte) uint32 { - return slicingUpdate(0, tab, b) - }) - - tab = slicingMakeTable(Castagnoli) - testGoldenCastagnoli(t, func(b []byte) uint32 { - return slicingUpdate(0, tab, b) - }) - - // Cross-check various polys against the simple algorithm. - for _, poly := range []uint32{IEEE, Castagnoli, Koopman, 0xD5828281} { - t1 := simpleMakeTable(poly) - f1 := func(crc uint32, b []byte) uint32 { - return simpleUpdate(crc, t1, b) - } - t2 := slicingMakeTable(poly) - f2 := func(crc uint32, b []byte) uint32 { - return slicingUpdate(crc, t2, b) - } - testCrossCheck(t, f1, f2) - } -} - -func TestArchIEEE(t *testing.T) { - if !archAvailableIEEE() { - t.Skip("Arch-specific IEEE not available.") - } - archInitIEEE() - slicingTable := slicingMakeTable(IEEE) - testCrossCheck(t, archUpdateIEEE, func(crc uint32, b []byte) uint32 { - return slicingUpdate(crc, slicingTable, b) - }) -} - -func TestArchCastagnoli(t *testing.T) { - if !archAvailableCastagnoli() { - t.Skip("Arch-specific Castagnoli not available.") - } - archInitCastagnoli() - slicingTable := slicingMakeTable(Castagnoli) - testCrossCheck(t, archUpdateCastagnoli, func(crc uint32, b []byte) uint32 { - return slicingUpdate(crc, slicingTable, b) - }) -} - -func TestGolden(t *testing.T) { - testGoldenIEEE(t, ChecksumIEEE) - - // Some implementations have special code to deal with misaligned - // data; test that as well. - for delta := 1; delta <= 7; delta++ { - testGoldenIEEE(t, func(b []byte) uint32 { - ieee := NewIEEE() - d := delta - if d >= len(b) { - d = len(b) - } - ieee.Write(b[:d]) - ieee.Write(b[d:]) - return ieee.Sum32() - }) - } - - castagnoliTab := MakeTable(Castagnoli) - if castagnoliTab == nil { - t.Errorf("nil Castagnoli Table") - } - - testGoldenCastagnoli(t, func(b []byte) uint32 { - castagnoli := New(castagnoliTab) - castagnoli.Write(b) - return castagnoli.Sum32() - }) - - // Some implementations have special code to deal with misaligned - // data; test that as well. - for delta := 1; delta <= 7; delta++ { - testGoldenCastagnoli(t, func(b []byte) uint32 { - castagnoli := New(castagnoliTab) - d := delta - if d >= len(b) { - d = len(b) - } - castagnoli.Write(b[:d]) - castagnoli.Write(b[d:]) - return castagnoli.Sum32() - }) - } -} - -func BenchmarkIEEECrc40B(b *testing.B) { - benchmark(b, NewIEEE(), 40, 0) -} - -func BenchmarkIEEECrc1KB(b *testing.B) { - benchmark(b, NewIEEE(), 1<<10, 0) -} - -func BenchmarkIEEECrc4KB(b *testing.B) { - benchmark(b, NewIEEE(), 4<<10, 0) -} - -func BenchmarkIEEECrc32KB(b *testing.B) { - benchmark(b, NewIEEE(), 32<<10, 0) -} - -func BenchmarkCastagnoliCrc15B(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 15, 0) -} - -func BenchmarkCastagnoliCrc15BMisaligned(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 15, 1) -} - -func BenchmarkCastagnoliCrc40B(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 40, 0) -} - -func BenchmarkCastagnoliCrc40BMisaligned(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 40, 1) -} - -func BenchmarkCastagnoliCrc512(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 512, 0) -} - -func BenchmarkCastagnoliCrc512Misaligned(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 512, 1) -} - -func BenchmarkCastagnoliCrc1KB(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 1<<10, 0) -} - -func BenchmarkCastagnoliCrc1KBMisaligned(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 1<<10, 1) -} - -func BenchmarkCastagnoliCrc4KB(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 4<<10, 0) -} - -func BenchmarkCastagnoliCrc4KBMisaligned(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 4<<10, 1) -} - -func BenchmarkCastagnoliCrc32KB(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 32<<10, 0) -} - -func BenchmarkCastagnoliCrc32KBMisaligned(b *testing.B) { - benchmark(b, New(MakeTable(Castagnoli)), 32<<10, 1) -} - -func benchmark(b *testing.B, h hash.Hash32, n, alignment int64) { - b.SetBytes(n) - data := make([]byte, n+alignment) - data = data[alignment:] - for i := range data { - data[i] = byte(i) - } - in := make([]byte, 0, h.Size()) - - // Warm up - h.Reset() - h.Write(data) - h.Sum(in) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - h.Reset() - h.Write(data) - h.Sum(in) - } -} diff --git a/vendor/github.com/klauspost/crc32/example_test.go b/vendor/github.com/klauspost/crc32/example_test.go deleted file mode 100644 index 621bf8383..000000000 --- a/vendor/github.com/klauspost/crc32/example_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package crc32_test - -import ( - "fmt" - "hash/crc32" -) - -func ExampleMakeTable() { - // In this package, the CRC polynomial is represented in reversed notation, - // or LSB-first representation. - // - // LSB-first representation is a hexadecimal number with n bits, in which the - // most significant bit represents the coefficient of x⁰ and the least significant - // bit represents the coefficient of xⁿ⁻¹ (the coefficient for xⁿ is implicit). - // - // For example, CRC32-Q, as defined by the following polynomial, - // x³²+ x³¹+ x²⁴+ x²²+ x¹⁶+ x¹⁴+ x⁸+ x⁷+ x⁵+ x³+ x¹+ x⁰ - // has the reversed notation 0b11010101100000101000001010000001, so the value - // that should be passed to MakeTable is 0xD5828281. - crc32q := crc32.MakeTable(0xD5828281) - fmt.Printf("%08x\n", crc32.Checksum([]byte("Hello world"), crc32q)) - // Output: - // 2964d064 -} diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml index 8807fe952..ddb9a8c05 100644 --- a/vendor/github.com/magiconair/properties/.travis.yml +++ b/vendor/github.com/magiconair/properties/.travis.yml @@ -3,4 +3,4 @@ go: - 1.4.3 - 1.5.3 - 1.6.3 - - 1.7.1 + - 1.7.3 diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md index bf49a1376..415b9f8aa 100644 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md @@ -1,5 +1,9 @@ ## Changelog +### Unreleased + + * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy + ### [1.7.0](https://github.com/magiconair/properties/tags/v1.7.0) - 20 Mar 2016 * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#Properties.LoadURL) method to load properties from a URL. diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go index e7e01044f..50209d838 100644 --- a/vendor/github.com/magiconair/properties/properties.go +++ b/vendor/github.com/magiconair/properties/properties.go @@ -632,14 +632,14 @@ func (p *Properties) Delete(key string) { // Merge merges properties, comments and keys from other *Properties into p func (p *Properties) Merge(other *Properties) { - for k,v := range other.m { + for k, v := range other.m { p.m[k] = v } - for k,v := range other.c { + for k, v := range other.c { p.c[k] = v } - outer: +outer: for _, otherKey := range other.k { for _, key := range p.k { if otherKey == key { diff --git a/vendor/github.com/nats-io/go-nats/.travis.yml b/vendor/github.com/nats-io/go-nats/.travis.yml deleted file mode 100644 index 7f8b6d352..000000000 --- a/vendor/github.com/nats-io/go-nats/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go -sudo: false -go: -- 1.6.3 -- 1.7.3 -install: -- go get -t ./... -- go get github.com/nats-io/gnatsd -- go get github.com/mattn/goveralls -- go get github.com/wadey/gocovmerge -- go get honnef.co/go/staticcheck/cmd/staticcheck -script: -- go fmt ./... -- go vet ./... -- go test -i -race ./... -- go test -v -race ./... -- staticcheck -ignore="github.com/nats-io/go-nats/*_test.go:SA2002 github.com/nats-io/go-nats/*/*_test.go:SA2002" ./... -after_script: -- if [ "$TRAVIS_GO_VERSION" = "1.7.3" ]; then ./scripts/cov.sh; fi diff --git a/vendor/github.com/nats-io/go-nats/README.md b/vendor/github.com/nats-io/go-nats/README.md deleted file mode 100644 index ad95e0a7e..000000000 --- a/vendor/github.com/nats-io/go-nats/README.md +++ /dev/null @@ -1,322 +0,0 @@ -# NATS - Go Client -A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io). - -[![License MIT](https://img.shields.io/npm/l/express.svg)](http://opensource.org/licenses/MIT) -[![Go Report Card](https://goreportcard.com/badge/github.com/nats-io/go-nats)](https://goreportcard.com/report/github.com/nats-io/go-nats) [![Build Status](https://travis-ci.org/nats-io/go-nats.svg?branch=master)](http://travis-ci.org/nats-io/go-nats) [![GoDoc](https://godoc.org/github.com/nats-io/go-nats?status.svg)](http://godoc.org/github.com/nats-io/go-nats) [![Coverage Status](https://coveralls.io/repos/nats-io/go-nats/badge.svg?branch=master)](https://coveralls.io/r/nats-io/go-nats?branch=master) - -## Installation - -```bash -# Go client -go get github.com/nats-io/go-nats - -# Server -go get github.com/nats-io/gnatsd -``` - -## Basic Usage - -```go - -nc, _ := nats.Connect(nats.DefaultURL) - -// Simple Publisher -nc.Publish("foo", []byte("Hello World")) - -// Simple Async Subscriber -nc.Subscribe("foo", func(m *nats.Msg) { - fmt.Printf("Received a message: %s\n", string(m.Data)) -}) - -// Simple Sync Subscriber -sub, err := nc.SubscribeSync("foo") -m, err := sub.NextMsg(timeout) - -// Channel Subscriber -ch := make(chan *nats.Msg, 64) -sub, err := nc.ChanSubscribe("foo", ch) -msg <- ch - -// Unsubscribe -sub.Unsubscribe() - -// Requests -msg, err := nc.Request("help", []byte("help me"), 10*time.Millisecond) - -// Replies -nc.Subscribe("help", func(m *Msg) { - nc.Publish(m.Reply, []byte("I can help!")) -}) - -// Close connection -nc := nats.Connect("nats://localhost:4222") -nc.Close(); -``` - -## Encoded Connections - -```go - -nc, _ := nats.Connect(nats.DefaultURL) -c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) -defer c.Close() - -// Simple Publisher -c.Publish("foo", "Hello World") - -// Simple Async Subscriber -c.Subscribe("foo", func(s string) { - fmt.Printf("Received a message: %s\n", s) -}) - -// EncodedConn can Publish any raw Go type using the registered Encoder -type person struct { - Name string - Address string - Age int -} - -// Go type Subscriber -c.Subscribe("hello", func(p *person) { - fmt.Printf("Received a person: %+v\n", p) -}) - -me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street, San Francisco, CA"} - -// Go type Publisher -c.Publish("hello", me) - -// Unsubscribe -sub, err := c.Subscribe("foo", nil) -... -sub.Unsubscribe() - -// Requests -var response string -err := c.Request("help", "help me", &response, 10*time.Millisecond) -if err != nil { - fmt.Printf("Request failed: %v\n", err) -} - -// Replying -c.Subscribe("help", func(subj, reply string, msg string) { - c.Publish(reply, "I can help!") -}) - -// Close connection -c.Close(); -``` - -## TLS - -```go -// tls as a scheme will enable secure connections by default. This will also verify the server name. -nc, err := nats.Connect("tls://nats.demo.io:4443") - -// If you are using a self-signed certificate, you need to have a tls.Config with RootCAs setup. -// We provide a helper method to make this case easier. -nc, err = nats.Connect("tls://localhost:4443", nats.RootCAs("./configs/certs/ca.pem")) - -// If the server requires client certificate, there is an helper function for that too: -cert := nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem") -nc, err = nats.Connect("tls://localhost:4443", cert) - -// You can also supply a complete tls.Config - -certFile := "./configs/certs/client-cert.pem" -keyFile := "./configs/certs/client-key.pem" -cert, err := tls.LoadX509KeyPair(certFile, keyFile) -if err != nil { - t.Fatalf("error parsing X509 certificate/key pair: %v", err) -} - -config := &tls.Config{ - ServerName: opts.Host, - Certificates: []tls.Certificate{cert}, - RootCAs: pool, - MinVersion: tls.VersionTLS12, -} - -nc, err = nats.Connect("nats://localhost:4443", nats.Secure(config)) -if err != nil { - t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err) -} - -``` - -## Using Go Channels (netchan) - -```go -nc, _ := nats.Connect(nats.DefaultURL) -ec, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) -defer ec.Close() - -type person struct { - Name string - Address string - Age int -} - -recvCh := make(chan *person) -ec.BindRecvChan("hello", recvCh) - -sendCh := make(chan *person) -ec.BindSendChan("hello", sendCh) - -me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street"} - -// Send via Go channels -sendCh <- me - -// Receive via Go channels -who := <- recvCh -``` - -## Wildcard Subscriptions - -```go - -// "*" matches any token, at any level of the subject. -nc.Subscribe("foo.*.baz", func(m *Msg) { - fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); -}) - -nc.Subscribe("foo.bar.*", func(m *Msg) { - fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); -}) - -// ">" matches any length of the tail of a subject, and can only be the last token -// E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22' -nc.Subscribe("foo.>", func(m *Msg) { - fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); -}) - -// Matches all of the above -nc.Publish("foo.bar.baz", []byte("Hello World")) - -``` - -## Queue Groups - -```go -// All subscriptions with the same queue name will form a queue group. -// Each message will be delivered to only one subscriber per queue group, -// using queuing semantics. You can have as many queue groups as you wish. -// Normal subscribers will continue to work as expected. - -nc.QueueSubscribe("foo", "job_workers", func(_ *Msg) { - received += 1; -}) - -``` - -## Advanced Usage - -```go - -// Flush connection to server, returns when all messages have been processed. -nc.Flush() -fmt.Println("All clear!") - -// FlushTimeout specifies a timeout value as well. -err := nc.FlushTimeout(1*time.Second) -if err != nil { - fmt.Println("All clear!") -} else { - fmt.Println("Flushed timed out!") -} - -// Auto-unsubscribe after MAX_WANTED messages received -const MAX_WANTED = 10 -sub, err := nc.Subscribe("foo") -sub.AutoUnsubscribe(MAX_WANTED) - -// Multiple connections -nc1 := nats.Connect("nats://host1:4222") -nc2 := nats.Connect("nats://host2:4222") - -nc1.Subscribe("foo", func(m *Msg) { - fmt.Printf("Received a message: %s\n", string(m.Data)) -}) - -nc2.Publish("foo", []byte("Hello World!")); - -``` - -## Clustered Usage - -```go - -var servers = "nats://localhost:1222, nats://localhost:1223, nats://localhost:1224" - -nc, err := nats.Connect(servers) - -// Optionally set ReconnectWait and MaxReconnect attempts. -// This example means 10 seconds total per backend. -nc, err = nats.Connect(servers, nats.MaxReconnects(5), nats.ReconnectWait(2 * time.Second)) - -// Optionally disable randomization of the server pool -nc, err = nats.Connect(servers, nats.DontRandomize()) - -// Setup callbacks to be notified on disconnects, reconnects and connection closed. -nc, err = nats.Connect(servers, - nats.DisconnectHandler(func(nc *nats.Conn) { - fmt.Printf("Got disconnected!\n") - }), - nats.ReconnectHandler(func(_ *nats.Conn) { - fmt.Printf("Got reconnected to %v!\n", nc.ConnectedUrl()) - }), - nats.ClosedHandler(func(nc *nats.Conn) { - fmt.Printf("Connection closed. Reason: %q\n", nc.LastError()) - }) -) - -// When connecting to a mesh of servers with auto-discovery capabilities, -// you may need to provide a username/password or token in order to connect -// to any server in that mesh when authentication is required. -// Instead of providing the credentials in the initial URL, you will use -// new option setters: -nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar")) - -// For token based authentication: -nc, err = nats.Connect("nats://localhost:4222", nats.Token("S3cretT0ken")) - -// You can even pass the two at the same time in case one of the server -// in the mesh requires token instead of user name and password. -nc, err = nats.Connect("nats://localhost:4222", - nats.UserInfo("foo", "bar"), - nats.Token("S3cretT0ken")) - -// Note that if credentials are specified in the initial URLs, they take -// precedence on the credentials specfied through the options. -// For instance, in the connect call below, the client library will use -// the user "my" and password "pwd" to connect to locahost:4222, however, -// it will use username "foo" and password "bar" when (re)connecting to -// a different server URL that it got as part of the auto-discovery. -nc, err = nats.Connect("nats://my:pwd@localhost:4222", nats.UserInfo("foo", "bar")) - -``` - -## License - -(The MIT License) - -Copyright (c) 2012-2016 Apcera Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/vendor/github.com/nats-io/go-nats/TODO.md b/vendor/github.com/nats-io/go-nats/TODO.md deleted file mode 100644 index 213aaeca8..000000000 --- a/vendor/github.com/nats-io/go-nats/TODO.md +++ /dev/null @@ -1,26 +0,0 @@ - -- [ ] Better constructors, options handling -- [ ] Functions for callback settings after connection created. -- [ ] Better options for subscriptions. Slow Consumer state settable, Go routines vs Inline. -- [ ] Move off of channels for subscribers, use syncPool linkedLists, etc with highwater. -- [ ] Test for valid subjects on publish and subscribe? -- [ ] SyncSubscriber and Next for EncodedConn -- [ ] Fast Publisher? -- [ ] pooling for structs used? leaky bucket? -- [ ] Timeout 0 should work as no timeout -- [x] Ping timer -- [x] Name in Connect for gnatsd -- [x] Asynchronous error handling -- [x] Parser rewrite -- [x] Reconnect -- [x] Hide Lock -- [x] Easier encoder interface -- [x] QueueSubscribeSync -- [x] Make nats specific errors prefixed with 'nats:' -- [x] API test for closed connection -- [x] TLS/SSL -- [x] Stats collection -- [x] Disconnect detection -- [x] Optimized Publish (coalescing) -- [x] Do Examples via Go style -- [x] Standardized Errors diff --git a/vendor/github.com/nats-io/go-nats/bench/bench.go b/vendor/github.com/nats-io/go-nats/bench/bench.go deleted file mode 100644 index 8353110b0..000000000 --- a/vendor/github.com/nats-io/go-nats/bench/bench.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2016 Apcera Inc. All rights reserved. - -package bench - -import ( - "bytes" - "encoding/csv" - "fmt" - "log" - "math" - "strconv" - "time" - - "github.com/nats-io/go-nats" - "github.com/nats-io/nuid" -) - -// A Sample for a particular client -type Sample struct { - JobMsgCnt int - MsgCnt uint64 - MsgBytes uint64 - IOBytes uint64 - Start time.Time - End time.Time -} - -// SampleGroup for a number of samples, the group is a Sample itself agregating the values the Samples -type SampleGroup struct { - Sample - Samples []*Sample -} - -// Benchmark to hold the various Samples organized by publishers and subscribers -type Benchmark struct { - Sample - Name string - RunID string - Pubs *SampleGroup - Subs *SampleGroup - subChannel chan *Sample - pubChannel chan *Sample -} - -// NewBenchmark initializes a Benchmark. After creating a bench call AddSubSample/AddPubSample. -// When done collecting samples, call EndBenchmark -func NewBenchmark(name string, subCnt, pubCnt int) *Benchmark { - bm := Benchmark{Name: name, RunID: nuid.Next()} - bm.Subs = NewSampleGroup() - bm.Pubs = NewSampleGroup() - bm.subChannel = make(chan *Sample, subCnt) - bm.pubChannel = make(chan *Sample, pubCnt) - return &bm -} - -// Close organizes collected Samples and calculates aggregates. After Close(), no more samples can be added. -func (bm *Benchmark) Close() { - close(bm.subChannel) - close(bm.pubChannel) - - for s := range bm.subChannel { - bm.Subs.AddSample(s) - } - for s := range bm.pubChannel { - bm.Pubs.AddSample(s) - } - - if bm.Subs.HasSamples() { - bm.Start = bm.Subs.Start - bm.End = bm.Subs.End - } else { - bm.Start = bm.Pubs.Start - bm.End = bm.Pubs.End - } - - if bm.Subs.HasSamples() && bm.Pubs.HasSamples() { - if bm.Start.After(bm.Subs.Start) { - bm.Start = bm.Subs.Start - } - if bm.Start.After(bm.Pubs.Start) { - bm.Start = bm.Pubs.Start - } - - if bm.End.Before(bm.Subs.End) { - bm.End = bm.Subs.End - } - if bm.End.Before(bm.Pubs.End) { - bm.End = bm.Pubs.End - } - } - - bm.MsgBytes = bm.Pubs.MsgBytes + bm.Subs.MsgBytes - bm.IOBytes = bm.Pubs.IOBytes + bm.Subs.IOBytes - bm.MsgCnt = bm.Pubs.MsgCnt + bm.Subs.MsgCnt - bm.JobMsgCnt = bm.Pubs.JobMsgCnt + bm.Subs.JobMsgCnt -} - -// AddSubSample to the benchmark -func (bm *Benchmark) AddSubSample(s *Sample) { - bm.subChannel <- s -} - -// AddPubSample to the benchmark -func (bm *Benchmark) AddPubSample(s *Sample) { - bm.pubChannel <- s -} - -// CSV generates a csv report of all the samples collected -func (bm *Benchmark) CSV() string { - var buffer bytes.Buffer - writer := csv.NewWriter(&buffer) - headers := []string{"#RunID", "ClientID", "MsgCount", "MsgBytes", "MsgsPerSec", "BytesPerSec", "DurationSecs"} - if err := writer.Write(headers); err != nil { - log.Fatalf("Error while serializing headers %q: %v", headers, err) - } - groups := []*SampleGroup{bm.Subs, bm.Pubs} - pre := "S" - for i, g := range groups { - if i == 1 { - pre = "P" - } - for j, c := range g.Samples { - r := []string{bm.RunID, fmt.Sprintf("%s%d", pre, j), fmt.Sprintf("%d", c.MsgCnt), fmt.Sprintf("%d", c.MsgBytes), fmt.Sprintf("%d", c.Rate()), fmt.Sprintf("%f", c.Throughput()), fmt.Sprintf("%f", c.Duration().Seconds())} - if err := writer.Write(r); err != nil { - log.Fatalf("Error while serializing %v: %v", c, err) - } - } - } - - writer.Flush() - return buffer.String() -} - -// NewSample creates a new Sample initialized to the provided values. The nats.Conn information captured -func NewSample(jobCount int, msgSize int, start, end time.Time, nc *nats.Conn) *Sample { - s := Sample{JobMsgCnt: jobCount, Start: start, End: end} - s.MsgBytes = uint64(msgSize * jobCount) - s.MsgCnt = nc.OutMsgs + nc.InMsgs - s.IOBytes = nc.OutBytes + nc.InBytes - return &s -} - -// Throughput of bytes per second -func (s *Sample) Throughput() float64 { - return float64(s.MsgBytes) / s.Duration().Seconds() -} - -// Rate of meessages in the job per second -func (s *Sample) Rate() int64 { - return int64(float64(s.JobMsgCnt) / s.Duration().Seconds()) -} - -func (s *Sample) String() string { - rate := commaFormat(s.Rate()) - throughput := HumanBytes(s.Throughput(), false) - return fmt.Sprintf("%s msgs/sec ~ %s/sec", rate, throughput) -} - -// Duration that the sample was active -func (s *Sample) Duration() time.Duration { - return s.End.Sub(s.Start) -} - -// Seconds that the sample or samples were active -func (s *Sample) Seconds() float64 { - return s.Duration().Seconds() -} - -// NewSampleGroup initializer -func NewSampleGroup() *SampleGroup { - s := new(SampleGroup) - s.Samples = make([]*Sample, 0, 0) - return s -} - -// Statistics information of the sample group (min, average, max and standard deviation) -func (sg *SampleGroup) Statistics() string { - return fmt.Sprintf("min %s | avg %s | max %s | stddev %s msgs", commaFormat(sg.MinRate()), commaFormat(sg.AvgRate()), commaFormat(sg.MaxRate()), commaFormat(int64(sg.StdDev()))) -} - -// MinRate returns the smallest message rate in the SampleGroup -func (sg *SampleGroup) MinRate() int64 { - m := int64(0) - for i, s := range sg.Samples { - if i == 0 { - m = s.Rate() - } - m = min(m, s.Rate()) - } - return m -} - -// MaxRate returns the largest message rate in the SampleGroup -func (sg *SampleGroup) MaxRate() int64 { - m := int64(0) - for i, s := range sg.Samples { - if i == 0 { - m = s.Rate() - } - m = max(m, s.Rate()) - } - return m -} - -// AvgRate returns the average of all the message rates in the SampleGroup -func (sg *SampleGroup) AvgRate() int64 { - sum := uint64(0) - for _, s := range sg.Samples { - sum += uint64(s.Rate()) - } - return int64(sum / uint64(len(sg.Samples))) -} - -// StdDev returns the standard deviation the message rates in the SampleGroup -func (sg *SampleGroup) StdDev() float64 { - avg := float64(sg.AvgRate()) - sum := float64(0) - for _, c := range sg.Samples { - sum += math.Pow(float64(c.Rate())-avg, 2) - } - variance := sum / float64(len(sg.Samples)) - return math.Sqrt(variance) -} - -// AddSample adds a Sample to the SampleGroup. After adding a Sample it shouldn't be modified. -func (sg *SampleGroup) AddSample(e *Sample) { - sg.Samples = append(sg.Samples, e) - - if len(sg.Samples) == 1 { - sg.Start = e.Start - sg.End = e.End - } - sg.IOBytes += e.IOBytes - sg.JobMsgCnt += e.JobMsgCnt - sg.MsgCnt += e.MsgCnt - sg.MsgBytes += e.MsgBytes - - if e.Start.Before(sg.Start) { - sg.Start = e.Start - } - - if e.End.After(sg.End) { - sg.End = e.End - } -} - -// HasSamples returns true if the group has samples -func (sg *SampleGroup) HasSamples() bool { - return len(sg.Samples) > 0 -} - -// Report returns a human readable report of the samples taken in the Benchmark -func (bm *Benchmark) Report() string { - var buffer bytes.Buffer - - indent := "" - if !bm.Pubs.HasSamples() && !bm.Subs.HasSamples() { - return "No publisher or subscribers. Nothing to report." - } - - if bm.Pubs.HasSamples() && bm.Subs.HasSamples() { - buffer.WriteString(fmt.Sprintf("%s Pub/Sub stats: %s\n", bm.Name, bm)) - indent += " " - } - if bm.Pubs.HasSamples() { - buffer.WriteString(fmt.Sprintf("%sPub stats: %s\n", indent, bm.Pubs)) - if len(bm.Pubs.Samples) > 1 { - for i, stat := range bm.Pubs.Samples { - buffer.WriteString(fmt.Sprintf("%s [%d] %v (%d msgs)\n", indent, i+1, stat, stat.JobMsgCnt)) - } - buffer.WriteString(fmt.Sprintf("%s %s\n", indent, bm.Pubs.Statistics())) - } - } - - if bm.Subs.HasSamples() { - buffer.WriteString(fmt.Sprintf("%sSub stats: %s\n", indent, bm.Subs)) - if len(bm.Subs.Samples) > 1 { - for i, stat := range bm.Subs.Samples { - buffer.WriteString(fmt.Sprintf("%s [%d] %v (%d msgs)\n", indent, i+1, stat, stat.JobMsgCnt)) - } - buffer.WriteString(fmt.Sprintf("%s %s\n", indent, bm.Subs.Statistics())) - } - } - return buffer.String() -} - -func commaFormat(n int64) string { - in := strconv.FormatInt(n, 10) - out := make([]byte, len(in)+(len(in)-2+int(in[0]/'0'))/3) - if in[0] == '-' { - in, out[0] = in[1:], '-' - } - for i, j, k := len(in)-1, len(out)-1, 0; ; i, j = i-1, j-1 { - out[j] = in[i] - if i == 0 { - return string(out) - } - if k++; k == 3 { - j, k = j-1, 0 - out[j] = ',' - } - } -} - -// HumanBytes formats bytes as a human readable string -func HumanBytes(bytes float64, si bool) string { - var base = 1024 - pre := []string{"K", "M", "G", "T", "P", "E"} - var post = "B" - if si { - base = 1000 - pre = []string{"k", "M", "G", "T", "P", "E"} - post = "iB" - } - if bytes < float64(base) { - return fmt.Sprintf("%.2f B", bytes) - } - exp := int(math.Log(bytes) / math.Log(float64(base))) - index := exp - 1 - units := pre[index] + post - return fmt.Sprintf("%.2f %s", bytes/math.Pow(float64(base), float64(exp)), units) -} - -func min(x, y int64) int64 { - if x < y { - return x - } - return y -} - -func max(x, y int64) int64 { - if x > y { - return x - } - return y -} - -// MsgsPerClient divides the number of messages by the number of clients and tries to distribute them as evenly as possible -func MsgsPerClient(numMsgs, numClients int) []int { - var counts []int - if numClients == 0 || numMsgs == 0 { - return counts - } - counts = make([]int, numClients) - mc := numMsgs / numClients - for i := 0; i < numClients; i++ { - counts[i] = mc - } - extra := numMsgs % numClients - for i := 0; i < extra; i++ { - counts[i]++ - } - return counts -} diff --git a/vendor/github.com/nats-io/go-nats/bench/benchlib_test.go b/vendor/github.com/nats-io/go-nats/bench/benchlib_test.go deleted file mode 100644 index 6d7dddca8..000000000 --- a/vendor/github.com/nats-io/go-nats/bench/benchlib_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package bench - -import ( - "fmt" - "strings" - "testing" - "time" - - "github.com/nats-io/go-nats" -) - -const ( - MsgSize = 8 - Million = 1000 * 1000 -) - -var baseTime = time.Now() - -func millionMessagesSecondSample(seconds int) *Sample { - messages := Million * seconds - start := baseTime - end := start.Add(time.Second * time.Duration(seconds)) - nc := new(nats.Conn) - - s := NewSample(messages, MsgSize, start, end, nc) - s.MsgCnt = uint64(messages) - s.MsgBytes = uint64(messages * MsgSize) - s.IOBytes = s.MsgBytes - return s -} - -func TestDuration(t *testing.T) { - s := millionMessagesSecondSample(1) - duration := s.End.Sub(s.Start) - if duration != s.Duration() || duration != time.Second { - t.Fatal("Expected sample duration to be 1 second") - } -} - -func TestSeconds(t *testing.T) { - s := millionMessagesSecondSample(1) - seconds := s.End.Sub(s.Start).Seconds() - if seconds != s.Seconds() || seconds != 1.0 { - t.Fatal("Expected sample seconds to be 1 second") - } -} - -func TestRate(t *testing.T) { - s := millionMessagesSecondSample(60) - if s.Rate() != Million { - t.Fatal("Expected rate at 1 million msgs") - } -} - -func TestThoughput(t *testing.T) { - s := millionMessagesSecondSample(60) - if s.Throughput() != Million*MsgSize { - t.Fatalf("Expected throughput at %d million bytes/sec", MsgSize) - } -} - -func TestStrings(t *testing.T) { - s := millionMessagesSecondSample(60) - if len(s.String()) == 0 { - t.Fatal("Sample didn't provide a String") - } -} - -func TestGroupDuration(t *testing.T) { - sg := NewSampleGroup() - sg.AddSample(millionMessagesSecondSample(1)) - sg.AddSample(millionMessagesSecondSample(2)) - duration := sg.End.Sub(sg.Start) - if duration != sg.Duration() || duration != time.Duration(2)*time.Second { - t.Fatal("Expected aggregate duration to be 2.0 seconds") - } -} - -func TestGroupSeconds(t *testing.T) { - sg := NewSampleGroup() - sg.AddSample(millionMessagesSecondSample(1)) - sg.AddSample(millionMessagesSecondSample(2)) - sg.AddSample(millionMessagesSecondSample(3)) - seconds := sg.End.Sub(sg.Start).Seconds() - if seconds != sg.Seconds() || seconds != 3.0 { - t.Fatal("Expected aggregate seconds to be 3.0 seconds") - } -} - -func TestGroupRate(t *testing.T) { - sg := NewSampleGroup() - sg.AddSample(millionMessagesSecondSample(1)) - sg.AddSample(millionMessagesSecondSample(2)) - sg.AddSample(millionMessagesSecondSample(3)) - if sg.Rate() != Million*2 { - t.Fatal("Expected MsgRate at 2 million msg/sec") - } -} - -func TestGroupThoughput(t *testing.T) { - sg := NewSampleGroup() - sg.AddSample(millionMessagesSecondSample(1)) - sg.AddSample(millionMessagesSecondSample(2)) - sg.AddSample(millionMessagesSecondSample(3)) - if sg.Throughput() != 2*Million*MsgSize { - t.Fatalf("Expected througput at %d million bytes/sec", 2*MsgSize) - } -} - -func TestMinMaxRate(t *testing.T) { - sg := NewSampleGroup() - sg.AddSample(millionMessagesSecondSample(1)) - sg.AddSample(millionMessagesSecondSample(2)) - sg.AddSample(millionMessagesSecondSample(3)) - if sg.MinRate() != sg.MaxRate() { - t.Fatal("Expected MinRate == MaxRate") - } -} - -func TestAvgRate(t *testing.T) { - sg := NewSampleGroup() - sg.AddSample(millionMessagesSecondSample(1)) - sg.AddSample(millionMessagesSecondSample(2)) - sg.AddSample(millionMessagesSecondSample(3)) - if sg.MinRate() != sg.AvgRate() { - t.Fatal("Expected MinRate == AvgRate") - } -} - -func TestStdDev(t *testing.T) { - sg := NewSampleGroup() - sg.AddSample(millionMessagesSecondSample(1)) - sg.AddSample(millionMessagesSecondSample(2)) - sg.AddSample(millionMessagesSecondSample(3)) - if sg.StdDev() != 0.0 { - t.Fatal("Expected stddev to be zero") - } -} - -func TestBenchSetup(t *testing.T) { - bench := NewBenchmark("test", 1, 1) - bench.AddSubSample(millionMessagesSecondSample(1)) - bench.AddPubSample(millionMessagesSecondSample(1)) - bench.Close() - if len(bench.RunID) == 0 { - t.Fatal("Bench doesn't have a RunID") - } - if len(bench.Pubs.Samples) != 1 { - t.Fatal("Expected one publisher") - } - if len(bench.Subs.Samples) != 1 { - t.Fatal("Expected one subscriber") - } - if bench.MsgCnt != 2*Million { - t.Fatal("Expected 2 million msgs") - } - if bench.IOBytes != 2*Million*MsgSize { - t.Fatalf("Expected %d million bytes", 2*MsgSize) - } - if bench.Duration() != time.Second { - t.Fatal("Expected duration to be 1 second") - } -} - -func makeBench(subs, pubs int) *Benchmark { - bench := NewBenchmark("test", subs, pubs) - for i := 0; i < subs; i++ { - bench.AddSubSample(millionMessagesSecondSample(1)) - } - for i := 0; i < pubs; i++ { - bench.AddPubSample(millionMessagesSecondSample(1)) - } - bench.Close() - return bench -} - -func TestCsv(t *testing.T) { - bench := makeBench(1, 1) - csv := bench.CSV() - lines := strings.Split(csv, "\n") - if len(lines) != 4 { - t.Fatal("Expected 4 lines of output from the CSV string") - } - - fields := strings.Split(lines[1], ",") - if len(fields) != 7 { - t.Fatal("Expected 7 fields") - } -} - -func TestBenchStrings(t *testing.T) { - bench := makeBench(1, 1) - s := bench.Report() - lines := strings.Split(s, "\n") - if len(lines) != 4 { - t.Fatal("Expected 3 lines of output: header, pub, sub, empty") - } - - bench = makeBench(2, 2) - s = bench.Report() - lines = strings.Split(s, "\n") - if len(lines) != 10 { - fmt.Printf("%q\n", s) - - t.Fatal("Expected 11 lines of output: header, pub header, pub x 2, stats, sub headers, sub x 2, stats, empty") - } -} - -func TestMsgsPerClient(t *testing.T) { - zero := MsgsPerClient(0, 0) - if len(zero) != 0 { - t.Fatal("Expected 0 length for 0 clients") - } - onetwo := MsgsPerClient(1, 2) - if len(onetwo) != 2 || onetwo[0] != 1 || onetwo[1] != 0 { - t.Fatal("Expected uneven distribution") - } - twotwo := MsgsPerClient(2, 2) - if len(twotwo) != 2 || twotwo[0] != 1 || twotwo[1] != 1 { - t.Fatal("Expected even distribution") - } - threetwo := MsgsPerClient(3, 2) - if len(threetwo) != 2 || threetwo[0] != 2 || threetwo[1] != 1 { - t.Fatal("Expected uneven distribution") - } -} diff --git a/vendor/github.com/nats-io/go-nats/enc.go b/vendor/github.com/nats-io/go-nats/enc.go deleted file mode 100644 index f29b0343a..000000000 --- a/vendor/github.com/nats-io/go-nats/enc.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2012-2015 Apcera Inc. All rights reserved. - -package nats - -import ( - "errors" - "fmt" - "reflect" - "sync" - "time" - - // Default Encoders - . "github.com/nats-io/go-nats/encoders/builtin" -) - -// Encoder interface is for all register encoders -type Encoder interface { - Encode(subject string, v interface{}) ([]byte, error) - Decode(subject string, data []byte, vPtr interface{}) error -} - -var encMap map[string]Encoder -var encLock sync.Mutex - -// Indexe names into the Registered Encoders. -const ( - JSON_ENCODER = "json" - GOB_ENCODER = "gob" - DEFAULT_ENCODER = "default" -) - -func init() { - encMap = make(map[string]Encoder) - // Register json, gob and default encoder - RegisterEncoder(JSON_ENCODER, &JsonEncoder{}) - RegisterEncoder(GOB_ENCODER, &GobEncoder{}) - RegisterEncoder(DEFAULT_ENCODER, &DefaultEncoder{}) -} - -// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to -// a nats server and have an extendable encoder system that will encode and decode messages -// from raw Go types. -type EncodedConn struct { - Conn *Conn - Enc Encoder -} - -// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered -// encoder. -func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) { - if c == nil { - return nil, errors.New("nats: Nil Connection") - } - if c.IsClosed() { - return nil, ErrConnectionClosed - } - ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)} - if ec.Enc == nil { - return nil, fmt.Errorf("No encoder registered for '%s'", encType) - } - return ec, nil -} - -// RegisterEncoder will register the encType with the given Encoder. Useful for customization. -func RegisterEncoder(encType string, enc Encoder) { - encLock.Lock() - defer encLock.Unlock() - encMap[encType] = enc -} - -// EncoderForType will return the registered Encoder for the encType. -func EncoderForType(encType string) Encoder { - encLock.Lock() - defer encLock.Unlock() - return encMap[encType] -} - -// Publish publishes the data argument to the given subject. The data argument -// will be encoded using the associated encoder. -func (c *EncodedConn) Publish(subject string, v interface{}) error { - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - return c.Conn.publish(subject, _EMPTY_, b) -} - -// PublishRequest will perform a Publish() expecting a response on the -// reply subject. Use Request() for automatically waiting for a response -// inline. -func (c *EncodedConn) PublishRequest(subject, reply string, v interface{}) error { - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - return c.Conn.publish(subject, reply, b) -} - -// Request will create an Inbox and perform a Request() call -// with the Inbox reply for the data v. A response will be -// decoded into the vPtrResponse. -func (c *EncodedConn) Request(subject string, v interface{}, vPtr interface{}, timeout time.Duration) error { - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - m, err := c.Conn.Request(subject, b, timeout) - if err != nil { - return err - } - if reflect.TypeOf(vPtr) == emptyMsgType { - mPtr := vPtr.(*Msg) - *mPtr = *m - } else { - err = c.Enc.Decode(m.Subject, m.Data, vPtr) - } - return err -} - -// Handler is a specific callback used for Subscribe. It is generalized to -// an interface{}, but we will discover its format and arguments at runtime -// and perform the correct callback, including de-marshalling JSON strings -// back into the appropriate struct based on the signature of the Handler. -// -// Handlers are expected to have one of four signatures. -// -// type person struct { -// Name string `json:"name,omitempty"` -// Age uint `json:"age,omitempty"` -// } -// -// handler := func(m *Msg) -// handler := func(p *person) -// handler := func(subject string, o *obj) -// handler := func(subject, reply string, o *obj) -// -// These forms allow a callback to request a raw Msg ptr, where the processing -// of the message from the wire is untouched. Process a JSON representation -// and demarshal it into the given struct, e.g. person. -// There are also variants where the callback wants either the subject, or the -// subject and the reply subject. -type Handler interface{} - -// Dissect the cb Handler's signature -func argInfo(cb Handler) (reflect.Type, int) { - cbType := reflect.TypeOf(cb) - if cbType.Kind() != reflect.Func { - panic("nats: Handler needs to be a func") - } - numArgs := cbType.NumIn() - if numArgs == 0 { - return nil, numArgs - } - return cbType.In(numArgs - 1), numArgs -} - -var emptyMsgType = reflect.TypeOf(&Msg{}) - -// Subscribe will create a subscription on the given subject and process incoming -// messages using the specified Handler. The Handler should be a func that matches -// a signature from the description of Handler from above. -func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) { - return c.subscribe(subject, _EMPTY_, cb) -} - -// QueueSubscribe will create a queue subscription on the given subject and process -// incoming messages using the specified Handler. The Handler should be a func that -// matches a signature from the description of Handler from above. -func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) { - return c.subscribe(subject, queue, cb) -} - -// Internal implementation that all public functions will use. -func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) { - if cb == nil { - return nil, errors.New("nats: Handler required for EncodedConn Subscription") - } - argType, numArgs := argInfo(cb) - if argType == nil { - return nil, errors.New("nats: Handler requires at least one argument") - } - - cbValue := reflect.ValueOf(cb) - wantsRaw := (argType == emptyMsgType) - - natsCB := func(m *Msg) { - var oV []reflect.Value - if wantsRaw { - oV = []reflect.Value{reflect.ValueOf(m)} - } else { - var oPtr reflect.Value - if argType.Kind() != reflect.Ptr { - oPtr = reflect.New(argType) - } else { - oPtr = reflect.New(argType.Elem()) - } - if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { - if c.Conn.Opts.AsyncErrorCB != nil { - c.Conn.ach <- func() { - c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error())) - } - } - return - } - if argType.Kind() != reflect.Ptr { - oPtr = reflect.Indirect(oPtr) - } - - // Callback Arity - switch numArgs { - case 1: - oV = []reflect.Value{oPtr} - case 2: - subV := reflect.ValueOf(m.Subject) - oV = []reflect.Value{subV, oPtr} - case 3: - subV := reflect.ValueOf(m.Subject) - replyV := reflect.ValueOf(m.Reply) - oV = []reflect.Value{subV, replyV, oPtr} - } - - } - cbValue.Call(oV) - } - - return c.Conn.subscribe(subject, queue, natsCB, nil) -} - -// FlushTimeout allows a Flush operation to have an associated timeout. -func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) { - return c.Conn.FlushTimeout(timeout) -} - -// Flush will perform a round trip to the server and return when it -// receives the internal reply. -func (c *EncodedConn) Flush() error { - return c.Conn.Flush() -} - -// Close will close the connection to the server. This call will release -// all blocking calls, such as Flush(), etc. -func (c *EncodedConn) Close() { - c.Conn.Close() -} - -// LastError reports the last error encountered via the Connection. -func (c *EncodedConn) LastError() error { - return c.Conn.err -} diff --git a/vendor/github.com/nats-io/go-nats/enc_test.go b/vendor/github.com/nats-io/go-nats/enc_test.go deleted file mode 100644 index ada5b0246..000000000 --- a/vendor/github.com/nats-io/go-nats/enc_test.go +++ /dev/null @@ -1,257 +0,0 @@ -package nats_test - -import ( - "fmt" - "testing" - "time" - - . "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/encoders/protobuf" - "github.com/nats-io/go-nats/encoders/protobuf/testdata" -) - -// Since we import above nats packages, we need to have a different -// const name than TEST_PORT that we used on the other packages. -const ENC_TEST_PORT = 8268 - -var options = Options{ - Url: fmt.Sprintf("nats://localhost:%d", ENC_TEST_PORT), - AllowReconnect: true, - MaxReconnect: 10, - ReconnectWait: 100 * time.Millisecond, - Timeout: DefaultTimeout, -} - -//////////////////////////////////////////////////////////////////////////////// -// Encoded connection tests -//////////////////////////////////////////////////////////////////////////////// - -func TestPublishErrorAfterSubscribeDecodeError(t *testing.T) { - ts := RunServerOnPort(ENC_TEST_PORT) - defer ts.Shutdown() - opts := options - nc, _ := opts.Connect() - defer nc.Close() - c, _ := NewEncodedConn(nc, JSON_ENCODER) - - //Test message type - type Message struct { - Message string - } - const testSubj = "test" - - c.Subscribe(testSubj, func(msg *Message) {}) - - //Publish invalid json to catch decode error in subscription callback - c.Publish(testSubj, `foo`) - c.Flush() - - //Next publish should be successful - if err := c.Publish(testSubj, Message{"2"}); err != nil { - t.Error("Fail to send correct json message after decode error in subscription") - } -} - -func TestPublishErrorAfterInvalidPublishMessage(t *testing.T) { - ts := RunServerOnPort(ENC_TEST_PORT) - defer ts.Shutdown() - opts := options - nc, _ := opts.Connect() - defer nc.Close() - c, _ := NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER) - const testSubj = "test" - - c.Publish(testSubj, &testdata.Person{Name: "Anatolii"}) - - //Publish invalid protobuff message to catch decode error - c.Publish(testSubj, "foo") - - //Next publish with valid protobuf message should be successful - if err := c.Publish(testSubj, &testdata.Person{Name: "Anatolii"}); err != nil { - t.Error("Fail to send correct protobuf message after invalid message publishing", err) - } -} - -func TestVariousFailureConditions(t *testing.T) { - ts := RunServerOnPort(ENC_TEST_PORT) - defer ts.Shutdown() - - dch := make(chan bool) - - opts := options - opts.AsyncErrorCB = func(_ *Conn, _ *Subscription, e error) { - dch <- true - } - nc, _ := opts.Connect() - nc.Close() - - if _, err := NewEncodedConn(nil, protobuf.PROTOBUF_ENCODER); err == nil { - t.Fatal("Expected an error") - } - - if _, err := NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER); err == nil || err != ErrConnectionClosed { - t.Fatalf("Wrong error: %v instead of %v", err, ErrConnectionClosed) - } - - nc, _ = opts.Connect() - defer nc.Close() - - if _, err := NewEncodedConn(nc, "foo"); err == nil { - t.Fatal("Expected an error") - } - - c, err := NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER) - if err != nil { - t.Fatalf("Unable to create encoded connection: %v", err) - } - defer c.Close() - - if _, err := c.Subscribe("bar", func(subj, obj string) {}); err != nil { - t.Fatalf("Unable to create subscription: %v", err) - } - - if err := c.Publish("bar", &testdata.Person{Name: "Ivan"}); err != nil { - t.Fatalf("Unable to publish: %v", err) - } - - if err := Wait(dch); err != nil { - t.Fatal("Did not get the async error callback") - } - - if err := c.PublishRequest("foo", "bar", "foo"); err == nil { - t.Fatal("Expected an error") - } - - if err := c.Request("foo", "foo", nil, 2*time.Second); err == nil { - t.Fatal("Expected an error") - } - - nc.Close() - - if err := c.PublishRequest("foo", "bar", &testdata.Person{Name: "Ivan"}); err == nil { - t.Fatal("Expected an error") - } - - resp := &testdata.Person{} - if err := c.Request("foo", &testdata.Person{Name: "Ivan"}, resp, 2*time.Second); err == nil { - t.Fatal("Expected an error") - } - - if _, err := c.Subscribe("foo", nil); err == nil { - t.Fatal("Expected an error") - } - - if _, err := c.Subscribe("foo", func() {}); err == nil { - t.Fatal("Expected an error") - } - - func() { - defer func() { - if r := recover(); r == nil { - t.Fatal("Expected an error") - } - }() - if _, err := c.Subscribe("foo", "bar"); err == nil { - t.Fatal("Expected an error") - } - }() -} - -func TestRequest(t *testing.T) { - ts := RunServerOnPort(ENC_TEST_PORT) - defer ts.Shutdown() - - dch := make(chan bool) - - opts := options - nc, _ := opts.Connect() - defer nc.Close() - - c, err := NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER) - if err != nil { - t.Fatalf("Unable to create encoded connection: %v", err) - } - defer c.Close() - - sentName := "Ivan" - recvName := "Kozlovic" - - if _, err := c.Subscribe("foo", func(_, reply string, p *testdata.Person) { - if p.Name != sentName { - t.Fatalf("Got wrong name: %v instead of %v", p.Name, sentName) - } - c.Publish(reply, &testdata.Person{Name: recvName}) - dch <- true - }); err != nil { - t.Fatalf("Unable to create subscription: %v", err) - } - if _, err := c.Subscribe("foo", func(_ string, p *testdata.Person) { - if p.Name != sentName { - t.Fatalf("Got wrong name: %v instead of %v", p.Name, sentName) - } - dch <- true - }); err != nil { - t.Fatalf("Unable to create subscription: %v", err) - } - - if err := c.Publish("foo", &testdata.Person{Name: sentName}); err != nil { - t.Fatalf("Unable to publish: %v", err) - } - - if err := Wait(dch); err != nil { - t.Fatal("Did not get message") - } - if err := Wait(dch); err != nil { - t.Fatal("Did not get message") - } - - response := &testdata.Person{} - if err := c.Request("foo", &testdata.Person{Name: sentName}, response, 2*time.Second); err != nil { - t.Fatalf("Unable to publish: %v", err) - } - if response == nil { - t.Fatal("No response received") - } else if response.Name != recvName { - t.Fatalf("Wrong response: %v instead of %v", response.Name, recvName) - } - - if err := Wait(dch); err != nil { - t.Fatal("Did not get message") - } - if err := Wait(dch); err != nil { - t.Fatal("Did not get message") - } - - c2, err := NewEncodedConn(nc, GOB_ENCODER) - if err != nil { - t.Fatalf("Unable to create encoded connection: %v", err) - } - defer c2.Close() - - if _, err := c2.QueueSubscribe("bar", "baz", func(m *Msg) { - response := &Msg{Subject: m.Reply, Data: []byte(recvName)} - c2.Conn.PublishMsg(response) - dch <- true - }); err != nil { - t.Fatalf("Unable to create subscription: %v", err) - } - - mReply := Msg{} - if err := c2.Request("bar", &Msg{Data: []byte(sentName)}, &mReply, 2*time.Second); err != nil { - t.Fatalf("Unable to send request: %v", err) - } - if string(mReply.Data) != recvName { - t.Fatalf("Wrong reply: %v instead of %v", string(mReply.Data), recvName) - } - - if err := Wait(dch); err != nil { - t.Fatal("Did not get message") - } - - if c.LastError() != nil { - t.Fatalf("Unexpected connection error: %v", c.LastError()) - } - if c2.LastError() != nil { - t.Fatalf("Unexpected connection error: %v", c2.LastError()) - } -} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go deleted file mode 100644 index 82467ce78..000000000 --- a/vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2012-2015 Apcera Inc. All rights reserved. - -package builtin - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "unsafe" -) - -// DefaultEncoder implementation for EncodedConn. -// This encoder will leave []byte and string untouched, but will attempt to -// turn numbers into appropriate strings that can be decoded. It will also -// propely encoded and decode bools. If will encode a struct, but if you want -// to properly handle structures you should use JsonEncoder. -type DefaultEncoder struct { - // Empty -} - -var trueB = []byte("true") -var falseB = []byte("false") -var nilB = []byte("") - -// Encode -func (je *DefaultEncoder) Encode(subject string, v interface{}) ([]byte, error) { - switch arg := v.(type) { - case string: - bytes := *(*[]byte)(unsafe.Pointer(&arg)) - return bytes, nil - case []byte: - return arg, nil - case bool: - if arg { - return trueB, nil - } else { - return falseB, nil - } - case nil: - return nilB, nil - default: - var buf bytes.Buffer - fmt.Fprintf(&buf, "%+v", arg) - return buf.Bytes(), nil - } -} - -// Decode -func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr interface{}) error { - // Figure out what it's pointing to... - sData := *(*string)(unsafe.Pointer(&data)) - switch arg := vPtr.(type) { - case *string: - *arg = sData - return nil - case *[]byte: - *arg = data - return nil - case *int: - n, err := strconv.ParseInt(sData, 10, 64) - if err != nil { - return err - } - *arg = int(n) - return nil - case *int32: - n, err := strconv.ParseInt(sData, 10, 64) - if err != nil { - return err - } - *arg = int32(n) - return nil - case *int64: - n, err := strconv.ParseInt(sData, 10, 64) - if err != nil { - return err - } - *arg = int64(n) - return nil - case *float32: - n, err := strconv.ParseFloat(sData, 32) - if err != nil { - return err - } - *arg = float32(n) - return nil - case *float64: - n, err := strconv.ParseFloat(sData, 64) - if err != nil { - return err - } - *arg = float64(n) - return nil - case *bool: - b, err := strconv.ParseBool(sData) - if err != nil { - return err - } - *arg = b - return nil - default: - vt := reflect.TypeOf(arg).Elem() - return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt) - } -} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/enc_test.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/enc_test.go deleted file mode 100644 index b57553a5a..000000000 --- a/vendor/github.com/nats-io/go-nats/encoders/builtin/enc_test.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2012-2015 Apcera Inc. All rights reserved. - -package builtin_test - -import ( - "bytes" - "testing" - "time" - - "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/encoders/builtin" - "github.com/nats-io/go-nats/test" -) - -const TEST_PORT = 8168 - -func NewEConn(t *testing.T) *nats.EncodedConn { - ec, err := nats.NewEncodedConn(test.NewConnection(t, TEST_PORT), nats.DEFAULT_ENCODER) - if err != nil { - t.Fatalf("Failed to create an encoded connection: %v\n", err) - } - return ec -} - -func TestConstructorErrs(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - c := test.NewConnection(t, TEST_PORT) - _, err := nats.NewEncodedConn(nil, "default") - if err == nil { - t.Fatal("Expected err for nil connection") - } - _, err = nats.NewEncodedConn(c, "foo22") - if err == nil { - t.Fatal("Expected err for bad encoder") - } - c.Close() - _, err = nats.NewEncodedConn(c, "default") - if err == nil { - t.Fatal("Expected err for closed connection") - } - -} - -func TestMarshalString(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - ch := make(chan bool) - - testString := "Hello World!" - - ec.Subscribe("enc_string", func(s string) { - if s != testString { - t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) - } - ch <- true - }) - ec.Publish("enc_string", testString) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestMarshalBytes(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - ch := make(chan bool) - - testBytes := []byte("Hello World!") - - ec.Subscribe("enc_bytes", func(b []byte) { - if !bytes.Equal(b, testBytes) { - t.Fatalf("Received test bytes of '%s', wanted '%s'\n", b, testBytes) - } - ch <- true - }) - ec.Publish("enc_bytes", testBytes) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestMarshalInt(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - ch := make(chan bool) - - testN := 22 - - ec.Subscribe("enc_int", func(n int) { - if n != testN { - t.Fatalf("Received test number of %d, wanted %d\n", n, testN) - } - ch <- true - }) - ec.Publish("enc_int", testN) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestMarshalInt32(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - ch := make(chan bool) - - testN := 22 - - ec.Subscribe("enc_int", func(n int32) { - if n != int32(testN) { - t.Fatalf("Received test number of %d, wanted %d\n", n, testN) - } - ch <- true - }) - ec.Publish("enc_int", testN) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestMarshalInt64(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - ch := make(chan bool) - - testN := 22 - - ec.Subscribe("enc_int", func(n int64) { - if n != int64(testN) { - t.Fatalf("Received test number of %d, wanted %d\n", n, testN) - } - ch <- true - }) - ec.Publish("enc_int", testN) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestMarshalFloat32(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - ch := make(chan bool) - - testN := float32(22) - - ec.Subscribe("enc_float", func(n float32) { - if n != testN { - t.Fatalf("Received test number of %f, wanted %f\n", n, testN) - } - ch <- true - }) - ec.Publish("enc_float", testN) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestMarshalFloat64(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - ch := make(chan bool) - - testN := float64(22.22) - - ec.Subscribe("enc_float", func(n float64) { - if n != testN { - t.Fatalf("Received test number of %f, wanted %f\n", n, testN) - } - ch <- true - }) - ec.Publish("enc_float", testN) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestMarshalBool(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - ch := make(chan bool) - expected := make(chan bool, 1) - - ec.Subscribe("enc_bool", func(b bool) { - val := <-expected - if b != val { - t.Fatal("Boolean values did not match") - } - ch <- true - }) - - expected <- false - ec.Publish("enc_bool", false) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } - - expected <- true - ec.Publish("enc_bool", true) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestExtendedSubscribeCB(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - ch := make(chan bool) - - testString := "Hello World!" - subject := "cb_args" - - ec.Subscribe(subject, func(subj, s string) { - if s != testString { - t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) - } - if subj != subject { - t.Fatalf("Received subject of '%s', wanted '%s'\n", subj, subject) - } - ch <- true - }) - ec.Publish(subject, testString) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestExtendedSubscribeCB2(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - ch := make(chan bool) - - testString := "Hello World!" - oSubj := "cb_args" - oReply := "foobar" - - ec.Subscribe(oSubj, func(subj, reply, s string) { - if s != testString { - t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) - } - if subj != oSubj { - t.Fatalf("Received subject of '%s', wanted '%s'\n", subj, oSubj) - } - if reply != oReply { - t.Fatalf("Received reply of '%s', wanted '%s'\n", reply, oReply) - } - ch <- true - }) - ec.PublishRequest(oSubj, oReply, testString) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestRawMsgSubscribeCB(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - ch := make(chan bool) - - testString := "Hello World!" - oSubj := "cb_args" - oReply := "foobar" - - ec.Subscribe(oSubj, func(m *nats.Msg) { - s := string(m.Data) - if s != testString { - t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) - } - if m.Subject != oSubj { - t.Fatalf("Received subject of '%s', wanted '%s'\n", m.Subject, oSubj) - } - if m.Reply != oReply { - t.Fatalf("Received reply of '%s', wanted '%s'\n", m.Reply, oReply) - } - ch <- true - }) - ec.PublishRequest(oSubj, oReply, testString) - if e := test.Wait(ch); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestEncRequest(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - expectedResp := "I can help!" - - ec.Subscribe("help", func(subj, reply, req string) { - ec.Publish(reply, expectedResp) - }) - - var resp string - - err := ec.Request("help", "help me", &resp, 1*time.Second) - if err != nil { - t.Fatalf("Failed at receiving proper response: %v\n", err) - } - if resp != expectedResp { - t.Fatalf("Received reply '%s', wanted '%s'\n", resp, expectedResp) - } -} - -func TestEncRequestReceivesMsg(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - expectedResp := "I can help!" - - ec.Subscribe("help", func(subj, reply, req string) { - ec.Publish(reply, expectedResp) - }) - - var resp nats.Msg - - err := ec.Request("help", "help me", &resp, 1*time.Second) - if err != nil { - t.Fatalf("Failed at receiving proper response: %v\n", err) - } - if string(resp.Data) != expectedResp { - t.Fatalf("Received reply '%s', wanted '%s'\n", string(resp.Data), expectedResp) - } -} - -func TestAsyncMarshalErr(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - ch := make(chan bool) - - testString := "Hello World!" - subject := "err_marshall" - - ec.Subscribe(subject, func(subj, num int) { - // This will never get called. - }) - - ec.Conn.Opts.AsyncErrorCB = func(c *nats.Conn, s *nats.Subscription, err error) { - ch <- true - } - - ec.Publish(subject, testString) - if e := test.Wait(ch); e != nil { - t.Fatalf("Did not receive the message: %s", e) - } -} - -func TestEncodeNil(t *testing.T) { - de := &builtin.DefaultEncoder{} - _, err := de.Encode("foo", nil) - if err != nil { - t.Fatalf("Expected no error encoding nil: %v", err) - } -} - -func TestDecodeDefault(t *testing.T) { - de := &builtin.DefaultEncoder{} - b, err := de.Encode("foo", 22) - if err != nil { - t.Fatalf("Expected no error encoding number: %v", err) - } - var c chan bool - err = de.Decode("foo", b, &c) - if err == nil { - t.Fatalf("Expected an error decoding") - } -} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go deleted file mode 100644 index 988ff42f5..000000000 --- a/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package builtin - -import ( - "bytes" - "encoding/gob" -) - -// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn. -// This encoder will use the builtin encoding/gob to Marshal -// and Unmarshal most types, including structs. -type GobEncoder struct { - // Empty -} - -// FIXME(dlc) - This could probably be more efficient. - -// Encode -func (ge *GobEncoder) Encode(subject string, v interface{}) ([]byte, error) { - b := new(bytes.Buffer) - enc := gob.NewEncoder(b) - if err := enc.Encode(v); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// Decode -func (ge *GobEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) { - dec := gob.NewDecoder(bytes.NewBuffer(data)) - err = dec.Decode(vPtr) - return -} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_test.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_test.go deleted file mode 100644 index 791192be0..000000000 --- a/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_test.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2012-2015 Apcera Inc. All rights reserved. - -package builtin_test - -import ( - "reflect" - "testing" - - "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/test" -) - -func NewGobEncodedConn(tl test.TestLogger) *nats.EncodedConn { - ec, err := nats.NewEncodedConn(test.NewConnection(tl, TEST_PORT), nats.GOB_ENCODER) - if err != nil { - tl.Fatalf("Failed to create an encoded connection: %v\n", err) - } - return ec -} - -func TestGobMarshalString(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewGobEncodedConn(t) - defer ec.Close() - ch := make(chan bool) - - testString := "Hello World!" - - ec.Subscribe("gob_string", func(s string) { - if s != testString { - t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) - } - ch <- true - }) - ec.Publish("gob_string", testString) - if e := test.Wait(ch); e != nil { - t.Fatal("Did not receive the message") - } -} - -func TestGobMarshalInt(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewGobEncodedConn(t) - defer ec.Close() - ch := make(chan bool) - - testN := 22 - - ec.Subscribe("gob_int", func(n int) { - if n != testN { - t.Fatalf("Received test int of '%d', wanted '%d'\n", n, testN) - } - ch <- true - }) - ec.Publish("gob_int", testN) - if e := test.Wait(ch); e != nil { - t.Fatal("Did not receive the message") - } -} - -func TestGobMarshalStruct(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewGobEncodedConn(t) - defer ec.Close() - ch := make(chan bool) - - me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} - me.Children = make(map[string]*person) - - me.Children["sam"] = &person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} - me.Children["meg"] = &person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} - - me.Assets = make(map[string]uint) - me.Assets["house"] = 1000 - me.Assets["car"] = 100 - - ec.Subscribe("gob_struct", func(p *person) { - if !reflect.DeepEqual(p, me) { - t.Fatalf("Did not receive the correct struct response") - } - ch <- true - }) - - ec.Publish("gob_struct", me) - if e := test.Wait(ch); e != nil { - t.Fatal("Did not receive the message") - } -} - -func BenchmarkPublishGobStruct(b *testing.B) { - // stop benchmark for set-up - b.StopTimer() - - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewGobEncodedConn(b) - defer ec.Close() - ch := make(chan bool) - - me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} - me.Children = make(map[string]*person) - - me.Children["sam"] = &person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} - me.Children["meg"] = &person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} - - ec.Subscribe("gob_struct", func(p *person) { - if !reflect.DeepEqual(p, me) { - b.Fatalf("Did not receive the correct struct response") - } - ch <- true - }) - - // resume benchmark - b.StartTimer() - - for n := 0; n < b.N; n++ { - ec.Publish("gob_struct", me) - if e := test.Wait(ch); e != nil { - b.Fatal("Did not receive the message") - } - } -} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go deleted file mode 100644 index 3b269ef02..000000000 --- a/vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012-2015 Apcera Inc. All rights reserved. - -package builtin - -import ( - "encoding/json" - "strings" -) - -// JsonEncoder is a JSON Encoder implementation for EncodedConn. -// This encoder will use the builtin encoding/json to Marshal -// and Unmarshal most types, including structs. -type JsonEncoder struct { - // Empty -} - -// Encode -func (je *JsonEncoder) Encode(subject string, v interface{}) ([]byte, error) { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - return b, nil -} - -// Decode -func (je *JsonEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) { - switch arg := vPtr.(type) { - case *string: - // If they want a string and it is a JSON string, strip quotes - // This allows someone to send a struct but receive as a plain string - // This cast should be efficient for Go 1.3 and beyond. - str := string(data) - if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) { - *arg = str[1 : len(str)-1] - } else { - *arg = str - } - case *[]byte: - *arg = data - default: - err = json.Unmarshal(data, arg) - } - return -} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/json_test.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/json_test.go deleted file mode 100644 index c0ffb2f45..000000000 --- a/vendor/github.com/nats-io/go-nats/encoders/builtin/json_test.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2012-2015 Apcera Inc. All rights reserved. - -package builtin_test - -import ( - "reflect" - "testing" - "time" - - "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/encoders/builtin" - "github.com/nats-io/go-nats/test" -) - -func NewJsonEncodedConn(tl test.TestLogger) *nats.EncodedConn { - ec, err := nats.NewEncodedConn(test.NewConnection(tl, TEST_PORT), nats.JSON_ENCODER) - if err != nil { - tl.Fatalf("Failed to create an encoded connection: %v\n", err) - } - return ec -} - -func TestJsonMarshalString(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewJsonEncodedConn(t) - defer ec.Close() - ch := make(chan bool) - - testString := "Hello World!" - - ec.Subscribe("json_string", func(s string) { - if s != testString { - t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) - } - ch <- true - }) - ec.Publish("json_string", testString) - if e := test.Wait(ch); e != nil { - t.Fatal("Did not receive the message") - } -} - -func TestJsonMarshalInt(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewJsonEncodedConn(t) - defer ec.Close() - ch := make(chan bool) - - testN := 22 - - ec.Subscribe("json_int", func(n int) { - if n != testN { - t.Fatalf("Received test int of '%d', wanted '%d'\n", n, testN) - } - ch <- true - }) - ec.Publish("json_int", testN) - if e := test.Wait(ch); e != nil { - t.Fatal("Did not receive the message") - } -} - -type person struct { - Name string - Address string - Age int - Children map[string]*person - Assets map[string]uint -} - -func TestJsonMarshalStruct(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewJsonEncodedConn(t) - defer ec.Close() - ch := make(chan bool) - - me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} - me.Children = make(map[string]*person) - - me.Children["sam"] = &person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} - me.Children["meg"] = &person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} - - me.Assets = make(map[string]uint) - me.Assets["house"] = 1000 - me.Assets["car"] = 100 - - ec.Subscribe("json_struct", func(p *person) { - if !reflect.DeepEqual(p, me) { - t.Fatal("Did not receive the correct struct response") - } - ch <- true - }) - - ec.Publish("json_struct", me) - if e := test.Wait(ch); e != nil { - t.Fatal("Did not receive the message") - } -} - -func BenchmarkJsonMarshalStruct(b *testing.B) { - me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} - me.Children = make(map[string]*person) - - me.Children["sam"] = &person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} - me.Children["meg"] = &person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} - - encoder := &builtin.JsonEncoder{} - for n := 0; n < b.N; n++ { - if _, err := encoder.Encode("protobuf_test", me); err != nil { - b.Fatal("Couldn't serialize object", err) - } - } -} - -func BenchmarkPublishJsonStruct(b *testing.B) { - // stop benchmark for set-up - b.StopTimer() - - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewJsonEncodedConn(b) - defer ec.Close() - ch := make(chan bool) - - me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} - me.Children = make(map[string]*person) - - me.Children["sam"] = &person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} - me.Children["meg"] = &person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} - - ec.Subscribe("json_struct", func(p *person) { - if !reflect.DeepEqual(p, me) { - b.Fatalf("Did not receive the correct struct response") - } - ch <- true - }) - - // resume benchmark - b.StartTimer() - - for n := 0; n < b.N; n++ { - ec.Publish("json_struct", me) - if e := test.Wait(ch); e != nil { - b.Fatal("Did not receive the message") - } - } - -} - -func TestNotMarshableToJson(t *testing.T) { - je := &builtin.JsonEncoder{} - ch := make(chan bool) - _, err := je.Encode("foo", ch) - if err == nil { - t.Fatal("Expected an error when failing encoding") - } -} - -func TestFailedEncodedPublish(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewJsonEncodedConn(t) - defer ec.Close() - - ch := make(chan bool) - err := ec.Publish("foo", ch) - if err == nil { - t.Fatal("Expected an error trying to publish a channel") - } - err = ec.PublishRequest("foo", "bar", ch) - if err == nil { - t.Fatal("Expected an error trying to publish a channel") - } - var cr chan bool - err = ec.Request("foo", ch, &cr, 1*time.Second) - if err == nil { - t.Fatal("Expected an error trying to publish a channel") - } - err = ec.LastError() - if err != nil { - t.Fatalf("Expected LastError to be nil: %q ", err) - } -} - -func TestDecodeConditionals(t *testing.T) { - je := &builtin.JsonEncoder{} - - b, err := je.Encode("foo", 22) - if err != nil { - t.Fatalf("Expected no error when encoding, got %v\n", err) - } - var foo string - var bar []byte - err = je.Decode("foo", b, &foo) - if err != nil { - t.Fatalf("Expected no error when decoding, got %v\n", err) - } - err = je.Decode("foo", b, &bar) - if err != nil { - t.Fatalf("Expected no error when decoding, got %v\n", err) - } -} diff --git a/vendor/github.com/nats-io/go-nats/encoders/protobuf/protobuf_enc.go b/vendor/github.com/nats-io/go-nats/encoders/protobuf/protobuf_enc.go deleted file mode 100644 index 0ff1d7859..000000000 --- a/vendor/github.com/nats-io/go-nats/encoders/protobuf/protobuf_enc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 Apcera Inc. All rights reserved. - -package protobuf - -import ( - "errors" - - "github.com/golang/protobuf/proto" - "github.com/nats-io/go-nats" -) - -// Additional index for registered Encoders. -const ( - PROTOBUF_ENCODER = "protobuf" -) - -func init() { - // Register protobuf encoder - nats.RegisterEncoder(PROTOBUF_ENCODER, &ProtobufEncoder{}) -} - -// ProtobufEncoder is a protobuf implementation for EncodedConn -// This encoder will use the builtin protobuf lib to Marshal -// and Unmarshal structs. -type ProtobufEncoder struct { - // Empty -} - -var ( - ErrInvalidProtoMsgEncode = errors.New("nats: Invalid protobuf proto.Message object passed to encode") - ErrInvalidProtoMsgDecode = errors.New("nats: Invalid protobuf proto.Message object passed to decode") -) - -// Encode -func (pb *ProtobufEncoder) Encode(subject string, v interface{}) ([]byte, error) { - if v == nil { - return nil, nil - } - i, found := v.(proto.Message) - if !found { - return nil, ErrInvalidProtoMsgEncode - } - - b, err := proto.Marshal(i) - if err != nil { - return nil, err - } - return b, nil -} - -// Decode -func (pb *ProtobufEncoder) Decode(subject string, data []byte, vPtr interface{}) error { - if _, ok := vPtr.(*interface{}); ok { - return nil - } - i, found := vPtr.(proto.Message) - if !found { - return ErrInvalidProtoMsgDecode - } - - err := proto.Unmarshal(data, i) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/nats-io/go-nats/encoders/protobuf/protobuf_test.go b/vendor/github.com/nats-io/go-nats/encoders/protobuf/protobuf_test.go deleted file mode 100644 index e0b360850..000000000 --- a/vendor/github.com/nats-io/go-nats/encoders/protobuf/protobuf_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package protobuf_test - -import ( - "reflect" - "testing" - "time" - - "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/test" - - "github.com/nats-io/go-nats/encoders/protobuf" - pb "github.com/nats-io/go-nats/encoders/protobuf/testdata" -) - -const TEST_PORT = 8068 - -func NewProtoEncodedConn(tl test.TestLogger) *nats.EncodedConn { - ec, err := nats.NewEncodedConn(test.NewConnection(tl, TEST_PORT), protobuf.PROTOBUF_ENCODER) - if err != nil { - tl.Fatalf("Failed to create an encoded connection: %v\n", err) - } - return ec -} - -func TestProtoMarshalStruct(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewProtoEncodedConn(t) - defer ec.Close() - ch := make(chan bool) - - me := &pb.Person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} - me.Children = make(map[string]*pb.Person) - - me.Children["sam"] = &pb.Person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} - me.Children["meg"] = &pb.Person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} - - ec.Subscribe("protobuf_test", func(p *pb.Person) { - if !reflect.DeepEqual(p, me) { - t.Fatal("Did not receive the correct protobuf response") - } - ch <- true - }) - - ec.Publish("protobuf_test", me) - if e := test.Wait(ch); e != nil { - t.Fatal("Did not receive the message") - } -} - -func TestProtoNilRequest(t *testing.T) { - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewProtoEncodedConn(t) - defer ec.Close() - - testPerson := &pb.Person{Name: "Anatolii", Age: 25, Address: "Ukraine, Nikolaev"} - - //Subscribe with empty interface shouldn't failed on empty message - ec.Subscribe("nil_test", func(_, reply string, _ interface{}) { - ec.Publish(reply, testPerson) - }) - - resp := new(pb.Person) - - //Request with nil argument shouldn't failed with nil argument - err := ec.Request("nil_test", nil, resp, 100*time.Millisecond) - ec.Flush() - - if err != nil { - t.Error("Fail to send empty message via encoded proto connection") - } - - if !reflect.DeepEqual(testPerson, resp) { - t.Error("Fail to receive encoded response") - } -} - -func BenchmarkProtobufMarshalStruct(b *testing.B) { - me := &pb.Person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} - me.Children = make(map[string]*pb.Person) - - me.Children["sam"] = &pb.Person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} - me.Children["meg"] = &pb.Person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} - - encoder := &protobuf.ProtobufEncoder{} - for n := 0; n < b.N; n++ { - if _, err := encoder.Encode("protobuf_test", me); err != nil { - b.Fatal("Couldn't serialize object", err) - } - } -} - -func BenchmarkPublishProtobufStruct(b *testing.B) { - // stop benchmark for set-up - b.StopTimer() - - s := test.RunServerOnPort(TEST_PORT) - defer s.Shutdown() - - ec := NewProtoEncodedConn(b) - defer ec.Close() - ch := make(chan bool) - - me := &pb.Person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} - me.Children = make(map[string]*pb.Person) - - me.Children["sam"] = &pb.Person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} - me.Children["meg"] = &pb.Person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} - - ec.Subscribe("protobuf_test", func(p *pb.Person) { - if !reflect.DeepEqual(p, me) { - b.Fatalf("Did not receive the correct protobuf response") - } - ch <- true - }) - - // resume benchmark - b.StartTimer() - - for n := 0; n < b.N; n++ { - ec.Publish("protobuf_test", me) - if e := test.Wait(ch); e != nil { - b.Fatal("Did not receive the message") - } - } -} diff --git a/vendor/github.com/nats-io/go-nats/encoders/protobuf/testdata/pbtest.pb.go b/vendor/github.com/nats-io/go-nats/encoders/protobuf/testdata/pbtest.pb.go deleted file mode 100644 index 718e57223..000000000 --- a/vendor/github.com/nats-io/go-nats/encoders/protobuf/testdata/pbtest.pb.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by protoc-gen-go. -// source: pbtest.proto -// DO NOT EDIT! - -/* -Package testdata is a generated protocol buffer package. - -It is generated from these files: - pbtest.proto - -It has these top-level messages: - Person -*/ -package testdata - -import proto "github.com/golang/protobuf/proto" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal - -type Person struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Age int32 `protobuf:"varint,2,opt,name=age" json:"age,omitempty"` - Address string `protobuf:"bytes,3,opt,name=address" json:"address,omitempty"` - Children map[string]*Person `protobuf:"bytes,10,rep,name=children" json:"children,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *Person) Reset() { *m = Person{} } -func (m *Person) String() string { return proto.CompactTextString(m) } -func (*Person) ProtoMessage() {} - -func (m *Person) GetChildren() map[string]*Person { - if m != nil { - return m.Children - } - return nil -} - -func init() { -} diff --git a/vendor/github.com/nats-io/go-nats/encoders/protobuf/testdata/pbtest.proto b/vendor/github.com/nats-io/go-nats/encoders/protobuf/testdata/pbtest.proto deleted file mode 100644 index 010f8081d..000000000 --- a/vendor/github.com/nats-io/go-nats/encoders/protobuf/testdata/pbtest.proto +++ /dev/null @@ -1,11 +0,0 @@ -syntax = "proto3"; - -package testdata; - -message Person { - string name = 1; - int32 age = 2; - string address = 3; - - map children = 10; -} diff --git a/vendor/github.com/nats-io/go-nats/example_test.go b/vendor/github.com/nats-io/go-nats/example_test.go deleted file mode 100644 index 2411f50ee..000000000 --- a/vendor/github.com/nats-io/go-nats/example_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package nats_test - -import ( - "fmt" - "time" - - "github.com/nats-io/go-nats" -) - -// Shows different ways to create a Conn -func ExampleConnect() { - - nc, _ := nats.Connect(nats.DefaultURL) - nc.Close() - - nc, _ = nats.Connect("nats://derek:secretpassword@demo.nats.io:4222") - nc.Close() - - nc, _ = nats.Connect("tls://derek:secretpassword@demo.nats.io:4443") - nc.Close() - - opts := nats.Options{ - AllowReconnect: true, - MaxReconnect: 10, - ReconnectWait: 5 * time.Second, - Timeout: 1 * time.Second, - } - - nc, _ = opts.Connect() - nc.Close() -} - -// This Example shows an asynchronous subscriber. -func ExampleConn_Subscribe() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - nc.Subscribe("foo", func(m *nats.Msg) { - fmt.Printf("Received a message: %s\n", string(m.Data)) - }) -} - -// This Example shows a synchronous subscriber. -func ExampleConn_SubscribeSync() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - sub, _ := nc.SubscribeSync("foo") - m, err := sub.NextMsg(1 * time.Second) - if err == nil { - fmt.Printf("Received a message: %s\n", string(m.Data)) - } else { - fmt.Println("NextMsg timed out.") - } -} - -func ExampleSubscription_NextMsg() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - sub, _ := nc.SubscribeSync("foo") - m, err := sub.NextMsg(1 * time.Second) - if err == nil { - fmt.Printf("Received a message: %s\n", string(m.Data)) - } else { - fmt.Println("NextMsg timed out.") - } -} - -func ExampleSubscription_Unsubscribe() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - sub, _ := nc.SubscribeSync("foo") - // ... - sub.Unsubscribe() -} - -func ExampleConn_Publish() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - nc.Publish("foo", []byte("Hello World!")) -} - -func ExampleConn_PublishMsg() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - msg := &nats.Msg{Subject: "foo", Reply: "bar", Data: []byte("Hello World!")} - nc.PublishMsg(msg) -} - -func ExampleConn_Flush() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - msg := &nats.Msg{Subject: "foo", Reply: "bar", Data: []byte("Hello World!")} - for i := 0; i < 1000; i++ { - nc.PublishMsg(msg) - } - err := nc.Flush() - if err == nil { - // Everything has been processed by the server for nc *Conn. - } -} - -func ExampleConn_FlushTimeout() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - msg := &nats.Msg{Subject: "foo", Reply: "bar", Data: []byte("Hello World!")} - for i := 0; i < 1000; i++ { - nc.PublishMsg(msg) - } - // Only wait for up to 1 second for Flush - err := nc.FlushTimeout(1 * time.Second) - if err == nil { - // Everything has been processed by the server for nc *Conn. - } -} - -func ExampleConn_Request() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - nc.Subscribe("foo", func(m *nats.Msg) { - nc.Publish(m.Reply, []byte("I will help you")) - }) - nc.Request("foo", []byte("help"), 50*time.Millisecond) -} - -func ExampleConn_QueueSubscribe() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - received := 0 - - nc.QueueSubscribe("foo", "worker_group", func(_ *nats.Msg) { - received++ - }) -} - -func ExampleSubscription_AutoUnsubscribe() { - nc, _ := nats.Connect(nats.DefaultURL) - defer nc.Close() - - received, wanted, total := 0, 10, 100 - - sub, _ := nc.Subscribe("foo", func(_ *nats.Msg) { - received++ - }) - sub.AutoUnsubscribe(wanted) - - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - nc.Flush() - - fmt.Printf("Received = %d", received) -} - -func ExampleConn_Close() { - nc, _ := nats.Connect(nats.DefaultURL) - nc.Close() -} - -// Shows how to wrap a Conn into an EncodedConn -func ExampleNewEncodedConn() { - nc, _ := nats.Connect(nats.DefaultURL) - c, _ := nats.NewEncodedConn(nc, "json") - c.Close() -} - -// EncodedConn can publish virtually anything just -// by passing it in. The encoder will be used to properly -// encode the raw Go type -func ExampleEncodedConn_Publish() { - nc, _ := nats.Connect(nats.DefaultURL) - c, _ := nats.NewEncodedConn(nc, "json") - defer c.Close() - - type person struct { - Name string - Address string - Age int - } - - me := &person{Name: "derek", Age: 22, Address: "85 Second St"} - c.Publish("hello", me) -} - -// EncodedConn's subscribers will automatically decode the -// wire data into the requested Go type using the Decode() -// method of the registered Encoder. The callback signature -// can also vary to include additional data, such as subject -// and reply subjects. -func ExampleEncodedConn_Subscribe() { - nc, _ := nats.Connect(nats.DefaultURL) - c, _ := nats.NewEncodedConn(nc, "json") - defer c.Close() - - type person struct { - Name string - Address string - Age int - } - - c.Subscribe("hello", func(p *person) { - fmt.Printf("Received a person! %+v\n", p) - }) - - c.Subscribe("hello", func(subj, reply string, p *person) { - fmt.Printf("Received a person on subject %s! %+v\n", subj, p) - }) - - me := &person{Name: "derek", Age: 22, Address: "85 Second St"} - c.Publish("hello", me) -} - -// BindSendChan() allows binding of a Go channel to a nats -// subject for publish operations. The Encoder attached to the -// EncodedConn will be used for marshalling. -func ExampleEncodedConn_BindSendChan() { - nc, _ := nats.Connect(nats.DefaultURL) - c, _ := nats.NewEncodedConn(nc, "json") - defer c.Close() - - type person struct { - Name string - Address string - Age int - } - - ch := make(chan *person) - c.BindSendChan("hello", ch) - - me := &person{Name: "derek", Age: 22, Address: "85 Second St"} - ch <- me -} - -// BindRecvChan() allows binding of a Go channel to a nats -// subject for subscribe operations. The Encoder attached to the -// EncodedConn will be used for un-marshalling. -func ExampleEncodedConn_BindRecvChan() { - nc, _ := nats.Connect(nats.DefaultURL) - c, _ := nats.NewEncodedConn(nc, "json") - defer c.Close() - - type person struct { - Name string - Address string - Age int - } - - ch := make(chan *person) - c.BindRecvChan("hello", ch) - - me := &person{Name: "derek", Age: 22, Address: "85 Second St"} - c.Publish("hello", me) - - // Receive the publish directly on a channel - who := <-ch - - fmt.Printf("%v says hello!\n", who) -} diff --git a/vendor/github.com/nats-io/go-nats/examples/nats-bench.go b/vendor/github.com/nats-io/go-nats/examples/nats-bench.go deleted file mode 100644 index 1194e8821..000000000 --- a/vendor/github.com/nats-io/go-nats/examples/nats-bench.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2015 Apcera Inc. All rights reserved. - -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "strings" - "sync" - "time" - - "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/bench" -) - -// Some sane defaults -const ( - DefaultNumMsgs = 100000 - DefaultNumPubs = 1 - DefaultNumSubs = 0 - DefaultMessageSize = 128 -) - -func usage() { - log.Fatalf("Usage: nats-bench [-s server (%s)] [--tls] [-np NUM_PUBLISHERS] [-ns NUM_SUBSCRIBERS] [-n NUM_MSGS] [-ms MESSAGE_SIZE] [-csv csvfile] \n", nats.DefaultURL) -} - -var benchmark *bench.Benchmark - -func main() { - var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") - var tls = flag.Bool("tls", false, "Use TLS Secure Connection") - var numPubs = flag.Int("np", DefaultNumPubs, "Number of Concurrent Publishers") - var numSubs = flag.Int("ns", DefaultNumSubs, "Number of Concurrent Subscribers") - var numMsgs = flag.Int("n", DefaultNumMsgs, "Number of Messages to Publish") - var msgSize = flag.Int("ms", DefaultMessageSize, "Size of the message.") - var csvFile = flag.String("csv", "", "Save bench data to csv file") - - log.SetFlags(0) - flag.Usage = usage - flag.Parse() - - args := flag.Args() - if len(args) != 1 { - usage() - } - - if *numMsgs <= 0 { - log.Fatal("Number of messages should be greater than zero.") - } - - // Setup the option block - opts := nats.DefaultOptions - opts.Servers = strings.Split(*urls, ",") - for i, s := range opts.Servers { - opts.Servers[i] = strings.Trim(s, " ") - } - opts.Secure = *tls - - benchmark = bench.NewBenchmark("NATS", *numSubs, *numPubs) - - var startwg sync.WaitGroup - var donewg sync.WaitGroup - - donewg.Add(*numPubs + *numSubs) - - // Run Subscribers first - startwg.Add(*numSubs) - for i := 0; i < *numSubs; i++ { - go runSubscriber(&startwg, &donewg, opts, *numMsgs, *msgSize) - } - startwg.Wait() - - // Now Publishers - startwg.Add(*numPubs) - pubCounts := bench.MsgsPerClient(*numMsgs, *numPubs) - for i := 0; i < *numPubs; i++ { - go runPublisher(&startwg, &donewg, opts, pubCounts[i], *msgSize) - } - - log.Printf("Starting benchmark [msgs=%d, msgsize=%d, pubs=%d, subs=%d]\n", *numMsgs, *msgSize, *numPubs, *numSubs) - - startwg.Wait() - donewg.Wait() - - benchmark.Close() - - fmt.Print(benchmark.Report()) - - if len(*csvFile) > 0 { - csv := benchmark.CSV() - ioutil.WriteFile(*csvFile, []byte(csv), 0644) - fmt.Printf("Saved metric data in csv file %s\n", *csvFile) - } -} - -func runPublisher(startwg, donewg *sync.WaitGroup, opts nats.Options, numMsgs int, msgSize int) { - nc, err := opts.Connect() - if err != nil { - log.Fatalf("Can't connect: %v\n", err) - } - defer nc.Close() - startwg.Done() - - args := flag.Args() - subj := args[0] - var msg []byte - if msgSize > 0 { - msg = make([]byte, msgSize) - } - - start := time.Now() - - for i := 0; i < numMsgs; i++ { - nc.Publish(subj, msg) - } - nc.Flush() - benchmark.AddPubSample(bench.NewSample(numMsgs, msgSize, start, time.Now(), nc)) - - donewg.Done() -} - -func runSubscriber(startwg, donewg *sync.WaitGroup, opts nats.Options, numMsgs int, msgSize int) { - nc, err := opts.Connect() - if err != nil { - log.Fatalf("Can't connect: %v\n", err) - } - - args := flag.Args() - subj := args[0] - - received := 0 - start := time.Now() - nc.Subscribe(subj, func(msg *nats.Msg) { - received++ - if received >= numMsgs { - benchmark.AddSubSample(bench.NewSample(numMsgs, msgSize, start, time.Now(), nc)) - donewg.Done() - nc.Close() - } - }) - nc.Flush() - startwg.Done() -} diff --git a/vendor/github.com/nats-io/go-nats/examples/nats-pub.go b/vendor/github.com/nats-io/go-nats/examples/nats-pub.go deleted file mode 100644 index 4eaedb25e..000000000 --- a/vendor/github.com/nats-io/go-nats/examples/nats-pub.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2012-2016 Apcera Inc. All rights reserved. -// +build ignore - -package main - -import ( - "flag" - "log" - - "github.com/nats-io/go-nats" -) - -// NOTE: Use tls scheme for TLS, e.g. nats-pub -s tls://demo.nats.io:4443 foo hello -func usage() { - log.Fatalf("Usage: nats-pub [-s server (%s)] \n", nats.DefaultURL) -} - -func main() { - var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") - - log.SetFlags(0) - flag.Usage = usage - flag.Parse() - - args := flag.Args() - if len(args) < 2 { - usage() - } - - nc, err := nats.Connect(*urls) - if err != nil { - log.Fatal(err) - } - defer nc.Close() - - subj, msg := args[0], []byte(args[1]) - - nc.Publish(subj, msg) - nc.Flush() - - if err := nc.LastError(); err != nil { - log.Fatal(err) - } else { - log.Printf("Published [%s] : '%s'\n", subj, msg) - } -} diff --git a/vendor/github.com/nats-io/go-nats/examples/nats-qsub.go b/vendor/github.com/nats-io/go-nats/examples/nats-qsub.go deleted file mode 100644 index 2262dba64..000000000 --- a/vendor/github.com/nats-io/go-nats/examples/nats-qsub.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2012-2016 Apcera Inc. All rights reserved. -// +build ignore - -package main - -import ( - "flag" - "log" - "os" - "runtime" - - "github.com/nats-io/go-nats" -) - -// NOTE: Use tls scheme for TLS, e.g. nats-qsub -s tls://demo.nats.io:4443 foo -func usage() { - log.Fatalf("Usage: nats-qsub [-s server] [-t] \n") -} - -func printMsg(m *nats.Msg, i int) { - log.Printf("[#%d] Received on [%s] Queue[%s] Pid[%d]: '%s'\n", i, m.Subject, m.Sub.Queue, os.Getpid(), string(m.Data)) -} - -func main() { - var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") - var showTime = flag.Bool("t", false, "Display timestamps") - - log.SetFlags(0) - flag.Usage = usage - flag.Parse() - - args := flag.Args() - if len(args) < 2 { - usage() - } - - nc, err := nats.Connect(*urls) - if err != nil { - log.Fatalf("Can't connect: %v\n", err) - } - - subj, queue, i := args[0], args[1], 0 - - nc.QueueSubscribe(subj, queue, func(msg *nats.Msg) { - i++ - printMsg(msg, i) - }) - nc.Flush() - - if err := nc.LastError(); err != nil { - log.Fatal(err) - } - - log.Printf("Listening on [%s]\n", subj) - if *showTime { - log.SetFlags(log.LstdFlags) - } - - runtime.Goexit() -} diff --git a/vendor/github.com/nats-io/go-nats/examples/nats-req.go b/vendor/github.com/nats-io/go-nats/examples/nats-req.go deleted file mode 100644 index 711be5e7c..000000000 --- a/vendor/github.com/nats-io/go-nats/examples/nats-req.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2012-2016 Apcera Inc. All rights reserved. -// +build ignore - -package main - -import ( - "flag" - "log" - "time" - - "github.com/nats-io/go-nats" -) - -// NOTE: Use tls scheme for TLS, e.g. nats-req -s tls://demo.nats.io:4443 foo hello -func usage() { - log.Fatalf("Usage: nats-req [-s server (%s)] \n", nats.DefaultURL) -} - -func main() { - var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") - - log.SetFlags(0) - flag.Usage = usage - flag.Parse() - - args := flag.Args() - if len(args) < 2 { - usage() - } - - nc, err := nats.Connect(*urls) - if err != nil { - log.Fatalf("Can't connect: %v\n", err) - } - defer nc.Close() - subj, payload := args[0], []byte(args[1]) - - msg, err := nc.Request(subj, []byte(payload), 100*time.Millisecond) - if err != nil { - if nc.LastError() != nil { - log.Fatalf("Error in Request: %v\n", nc.LastError()) - } - log.Fatalf("Error in Request: %v\n", err) - } - - log.Printf("Published [%s] : '%s'\n", subj, payload) - log.Printf("Received [%v] : '%s'\n", msg.Subject, string(msg.Data)) -} diff --git a/vendor/github.com/nats-io/go-nats/examples/nats-rply.go b/vendor/github.com/nats-io/go-nats/examples/nats-rply.go deleted file mode 100644 index d5407c42f..000000000 --- a/vendor/github.com/nats-io/go-nats/examples/nats-rply.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2012-2016 Apcera Inc. All rights reserved. -// +build ignore - -package main - -import ( - "flag" - "log" - "runtime" - - "github.com/nats-io/go-nats" -) - -// NOTE: Use tls scheme for TLS, e.g. nats-rply -s tls://demo.nats.io:4443 foo hello -func usage() { - log.Fatalf("Usage: nats-rply [-s server][-t] \n") -} - -func printMsg(m *nats.Msg, i int) { - log.Printf("[#%d] Received on [%s]: '%s'\n", i, m.Subject, string(m.Data)) -} - -func main() { - var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") - var showTime = flag.Bool("t", false, "Display timestamps") - - log.SetFlags(0) - flag.Usage = usage - flag.Parse() - - args := flag.Args() - if len(args) < 2 { - usage() - } - - nc, err := nats.Connect(*urls) - if err != nil { - log.Fatalf("Can't connect: %v\n", err) - } - - subj, reply, i := args[0], args[1], 0 - - nc.Subscribe(subj, func(msg *nats.Msg) { - i++ - printMsg(msg, i) - nc.Publish(msg.Reply, []byte(reply)) - }) - nc.Flush() - - if err := nc.LastError(); err != nil { - log.Fatal(err) - } - - log.Printf("Listening on [%s]\n", subj) - if *showTime { - log.SetFlags(log.LstdFlags) - } - - runtime.Goexit() -} diff --git a/vendor/github.com/nats-io/go-nats/examples/nats-sub.go b/vendor/github.com/nats-io/go-nats/examples/nats-sub.go deleted file mode 100644 index 048a0d964..000000000 --- a/vendor/github.com/nats-io/go-nats/examples/nats-sub.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2012-2016 Apcera Inc. All rights reserved. -// +build ignore - -package main - -import ( - "flag" - "log" - "runtime" - - "github.com/nats-io/go-nats" -) - -// NOTE: Use tls scheme for TLS, e.g. nats-sub -s tls://demo.nats.io:4443 foo -func usage() { - log.Fatalf("Usage: nats-sub [-s server] [-t] \n") -} - -func printMsg(m *nats.Msg, i int) { - log.Printf("[#%d] Received on [%s]: '%s'\n", i, m.Subject, string(m.Data)) -} - -func main() { - var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") - var showTime = flag.Bool("t", false, "Display timestamps") - - log.SetFlags(0) - flag.Usage = usage - flag.Parse() - - args := flag.Args() - if len(args) < 1 { - usage() - } - - nc, err := nats.Connect(*urls) - if err != nil { - log.Fatalf("Can't connect: %v\n", err) - } - - subj, i := args[0], 0 - - nc.Subscribe(subj, func(msg *nats.Msg) { - i += 1 - printMsg(msg, i) - }) - nc.Flush() - - if err := nc.LastError(); err != nil { - log.Fatal(err) - } - - log.Printf("Listening on [%s]\n", subj) - if *showTime { - log.SetFlags(log.LstdFlags) - } - - runtime.Goexit() -} diff --git a/vendor/github.com/nats-io/go-nats/nats.go b/vendor/github.com/nats-io/go-nats/nats.go deleted file mode 100644 index 52a3bb083..000000000 --- a/vendor/github.com/nats-io/go-nats/nats.go +++ /dev/null @@ -1,2630 +0,0 @@ -// Copyright 2012-2016 Apcera Inc. All rights reserved. - -// A Go client for the NATS messaging system (https://nats.io). -package nats - -import ( - "bufio" - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "math/rand" - "net" - "net/url" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/nats-io/go-nats/util" - "github.com/nats-io/nuid" -) - -// Default Constants -const ( - Version = "1.2.2" - DefaultURL = "nats://localhost:4222" - DefaultPort = 4222 - DefaultMaxReconnect = 60 - DefaultReconnectWait = 2 * time.Second - DefaultTimeout = 2 * time.Second - DefaultPingInterval = 2 * time.Minute - DefaultMaxPingOut = 2 - DefaultMaxChanLen = 8192 // 8k - DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB - RequestChanLen = 8 - LangString = "go" -) - -// STALE_CONNECTION is for detection and proper handling of stale connections. -const STALE_CONNECTION = "stale connection" - -// PERMISSIONS_ERR is for when nats server subject authorization has failed. -const PERMISSIONS_ERR = "permissions violation" - -// Errors -var ( - ErrConnectionClosed = errors.New("nats: connection closed") - ErrSecureConnRequired = errors.New("nats: secure connection required") - ErrSecureConnWanted = errors.New("nats: secure connection not available") - ErrBadSubscription = errors.New("nats: invalid subscription") - ErrTypeSubscription = errors.New("nats: invalid subscription type") - ErrBadSubject = errors.New("nats: invalid subject") - ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped") - ErrTimeout = errors.New("nats: timeout") - ErrBadTimeout = errors.New("nats: timeout invalid") - ErrAuthorization = errors.New("nats: authorization violation") - ErrNoServers = errors.New("nats: no servers available for connection") - ErrJsonParse = errors.New("nats: connect message, json parse error") - ErrChanArg = errors.New("nats: argument needs to be a channel type") - ErrMaxPayload = errors.New("nats: maximum payload exceeded") - ErrMaxMessages = errors.New("nats: maximum messages delivered") - ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription") - ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed") - ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received") - ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded") - ErrInvalidConnection = errors.New("nats: invalid connection") - ErrInvalidMsg = errors.New("nats: invalid message or message nil") - ErrInvalidArg = errors.New("nats: invalid argument") - ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION) -) - -var DefaultOptions = Options{ - AllowReconnect: true, - MaxReconnect: DefaultMaxReconnect, - ReconnectWait: DefaultReconnectWait, - Timeout: DefaultTimeout, - PingInterval: DefaultPingInterval, - MaxPingsOut: DefaultMaxPingOut, - SubChanLen: DefaultMaxChanLen, - ReconnectBufSize: DefaultReconnectBufSize, - Dialer: &net.Dialer{ - Timeout: DefaultTimeout, - }, -} - -// Status represents the state of the connection. -type Status int - -const ( - DISCONNECTED = Status(iota) - CONNECTED - CLOSED - RECONNECTING - CONNECTING -) - -// ConnHandler is used for asynchronous events such as -// disconnected and closed connections. -type ConnHandler func(*Conn) - -// ErrHandler is used to process asynchronous errors encountered -// while processing inbound messages. -type ErrHandler func(*Conn, *Subscription, error) - -// asyncCB is used to preserve order for async callbacks. -type asyncCB func() - -// Option is a function on the options for a connection. -type Option func(*Options) error - -// Options can be used to create a customized connection. -type Options struct { - Url string - Servers []string - NoRandomize bool - Name string - Verbose bool - Pedantic bool - Secure bool - TLSConfig *tls.Config - AllowReconnect bool - MaxReconnect int - ReconnectWait time.Duration - Timeout time.Duration - PingInterval time.Duration // disabled if 0 or negative - MaxPingsOut int - ClosedCB ConnHandler - DisconnectedCB ConnHandler - ReconnectedCB ConnHandler - AsyncErrorCB ErrHandler - - // Size of the backing bufio buffer during reconnect. Once this - // has been exhausted publish operations will error. - ReconnectBufSize int - - // The size of the buffered channel used between the socket - // Go routine and the message delivery for SyncSubscriptions. - // NOTE: This does not affect AsyncSubscriptions which are - // dictated by PendingLimits() - SubChanLen int - - User string - Password string - Token string - - // Dialer allows users setting a custom Dialer - Dialer *net.Dialer -} - -const ( - // Scratch storage for assembling protocol headers - scratchSize = 512 - - // The size of the bufio reader/writer on top of the socket. - defaultBufSize = 32768 - - // The buffered size of the flush "kick" channel - flushChanSize = 1024 - - // Default server pool size - srvPoolSize = 4 - - // Channel size for the async callback handler. - asyncCBChanSize = 32 -) - -// A Conn represents a bare connection to a nats-server. -// It can send and receive []byte payloads. -type Conn struct { - // Keep all members for which we use atomic at the beginning of the - // struct and make sure they are all 64bits (or use padding if necessary). - // atomic.* functions crash on 32bit machines if operand is not aligned - // at 64bit. See https://github.com/golang/go/issues/599 - ssid int64 - - Statistics - mu sync.Mutex - Opts Options - wg sync.WaitGroup - url *url.URL - conn net.Conn - srvPool []*srv - urls map[string]struct{} // Keep track of all known URLs (used by processInfo) - bw *bufio.Writer - pending *bytes.Buffer - fch chan bool - info serverInfo - subs map[int64]*Subscription - mch chan *Msg - ach chan asyncCB - pongs []chan bool - scratch [scratchSize]byte - status Status - err error - ps *parseState - ptmr *time.Timer - pout int -} - -// A Subscription represents interest in a given subject. -type Subscription struct { - mu sync.Mutex - sid int64 - - // Subject that represents this subscription. This can be different - // than the received subject inside a Msg if this is a wildcard. - Subject string - - // Optional queue group name. If present, all subscriptions with the - // same name will form a distributed queue, and each message will - // only be processed by one member of the group. - Queue string - - delivered uint64 - max uint64 - conn *Conn - mcb MsgHandler - mch chan *Msg - closed bool - sc bool - connClosed bool - - // Type of Subscription - typ SubscriptionType - - // Async linked list - pHead *Msg - pTail *Msg - pCond *sync.Cond - - // Pending stats, async subscriptions, high-speed etc. - pMsgs int - pBytes int - pMsgsMax int - pBytesMax int - pMsgsLimit int - pBytesLimit int - dropped int -} - -// Msg is a structure used by Subscribers and PublishMsg(). -type Msg struct { - Subject string - Reply string - Data []byte - Sub *Subscription - next *Msg -} - -// Tracks various stats received and sent on this connection, -// including counts for messages and bytes. -type Statistics struct { - InMsgs uint64 - OutMsgs uint64 - InBytes uint64 - OutBytes uint64 - Reconnects uint64 -} - -// Tracks individual backend servers. -type srv struct { - url *url.URL - didConnect bool - reconnects int - lastAttempt time.Time - isImplicit bool -} - -type serverInfo struct { - Id string `json:"server_id"` - Host string `json:"host"` - Port uint `json:"port"` - Version string `json:"version"` - AuthRequired bool `json:"auth_required"` - TLSRequired bool `json:"tls_required"` - MaxPayload int64 `json:"max_payload"` - ConnectURLs []string `json:"connect_urls,omitempty"` -} - -const ( - // clientProtoZero is the original client protocol from 2009. - // http://nats.io/documentation/internals/nats-protocol/ - clientProtoZero = iota - // clientProtoInfo signals a client can receive more then the original INFO block. - // This can be used to update clients on other cluster members, etc. - clientProtoInfo -) - -type connectInfo struct { - Verbose bool `json:"verbose"` - Pedantic bool `json:"pedantic"` - User string `json:"user,omitempty"` - Pass string `json:"pass,omitempty"` - Token string `json:"auth_token,omitempty"` - TLS bool `json:"tls_required"` - Name string `json:"name"` - Lang string `json:"lang"` - Version string `json:"version"` - Protocol int `json:"protocol"` -} - -// MsgHandler is a callback function that processes messages delivered to -// asynchronous subscribers. -type MsgHandler func(msg *Msg) - -// Connect will attempt to connect to the NATS system. -// The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222 -// Comma separated arrays are also supported, e.g. urlA, urlB. -// Options start with the defaults but can be overridden. -func Connect(url string, options ...Option) (*Conn, error) { - opts := DefaultOptions - opts.Servers = processUrlString(url) - for _, opt := range options { - if err := opt(&opts); err != nil { - return nil, err - } - } - return opts.Connect() -} - -// Options that can be passed to Connect. - -// Name is an Option to set the client name. -func Name(name string) Option { - return func(o *Options) error { - o.Name = name - return nil - } -} - -// Secure is an Option to enable TLS secure connections that skip server verification by default. -// Pass a TLS Configuration for proper TLS. -func Secure(tls ...*tls.Config) Option { - return func(o *Options) error { - o.Secure = true - // Use of variadic just simplifies testing scenarios. We only take the first one. - // fixme(DLC) - Could panic if more than one. Could also do TLS option. - if len(tls) > 1 { - return ErrMultipleTLSConfigs - } - if len(tls) == 1 { - o.TLSConfig = tls[0] - } - return nil - } -} - -// RootCAs is a helper option to provide the RootCAs pool from a list of filenames. If Secure is -// not already set this will set it as well. -func RootCAs(file ...string) Option { - return func(o *Options) error { - pool := x509.NewCertPool() - for _, f := range file { - rootPEM, err := ioutil.ReadFile(f) - if err != nil || rootPEM == nil { - return fmt.Errorf("nats: error loading or parsing rootCA file: %v", err) - } - ok := pool.AppendCertsFromPEM([]byte(rootPEM)) - if !ok { - return fmt.Errorf("nats: failed to parse root certificate from %q", f) - } - } - if o.TLSConfig == nil { - o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - o.TLSConfig.RootCAs = pool - o.Secure = true - return nil - } -} - -// ClientCert is a helper option to provide the client certificate from a file. If Secure is -// not already set this will set it as well -func ClientCert(certFile, keyFile string) Option { - return func(o *Options) error { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return fmt.Errorf("nats: error loading client certificate: %v", err) - } - cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return fmt.Errorf("nats: error parsing client certificate: %v", err) - } - if o.TLSConfig == nil { - o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - o.TLSConfig.Certificates = []tls.Certificate{cert} - o.Secure = true - return nil - } -} - -// NoReconnect is an Option to turn off reconnect behavior. -func NoReconnect() Option { - return func(o *Options) error { - o.AllowReconnect = false - return nil - } -} - -// DontRandomize is an Option to turn off randomizing the server pool. -func DontRandomize() Option { - return func(o *Options) error { - o.NoRandomize = true - return nil - } -} - -// ReconnectWait is an Option to set the wait time between reconnect attempts. -func ReconnectWait(t time.Duration) Option { - return func(o *Options) error { - o.ReconnectWait = t - return nil - } -} - -// MaxReconnects is an Option to set the maximum number of reconnect attempts. -func MaxReconnects(max int) Option { - return func(o *Options) error { - o.MaxReconnect = max - return nil - } -} - -// Timeout is an Option to set the timeout for Dial on a connection. -func Timeout(t time.Duration) Option { - return func(o *Options) error { - o.Timeout = t - return nil - } -} - -// DisconnectHandler is an Option to set the disconnected handler. -func DisconnectHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.DisconnectedCB = cb - return nil - } -} - -// ReconnectHandler is an Option to set the reconnected handler. -func ReconnectHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.ReconnectedCB = cb - return nil - } -} - -// ClosedHandler is an Option to set the closed handler. -func ClosedHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.ClosedCB = cb - return nil - } -} - -// ErrHandler is an Option to set the async error handler. -func ErrorHandler(cb ErrHandler) Option { - return func(o *Options) error { - o.AsyncErrorCB = cb - return nil - } -} - -// UserInfo is an Option to set the username and password to -// use when not included directly in the URLs. -func UserInfo(user, password string) Option { - return func(o *Options) error { - o.User = user - o.Password = password - return nil - } -} - -// Token is an Option to set the token to use when not included -// directly in the URLs. -func Token(token string) Option { - return func(o *Options) error { - o.Token = token - return nil - } -} - -// Dialer is an Option to set the dialer which will be used when -// attempting to establish a connection. -func Dialer(dialer *net.Dialer) Option { - return func(o *Options) error { - o.Dialer = dialer - return nil - } -} - -// Handler processing - -// SetDisconnectHandler will set the disconnect event handler. -func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.DisconnectedCB = dcb -} - -// SetReconnectHandler will set the reconnect event handler. -func (nc *Conn) SetReconnectHandler(rcb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.ReconnectedCB = rcb -} - -// SetClosedHandler will set the reconnect event handler. -func (nc *Conn) SetClosedHandler(cb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.ClosedCB = cb -} - -// SetErrHandler will set the async error handler. -func (nc *Conn) SetErrorHandler(cb ErrHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.AsyncErrorCB = cb -} - -// Process the url string argument to Connect. Return an array of -// urls, even if only one. -func processUrlString(url string) []string { - urls := strings.Split(url, ",") - for i, s := range urls { - urls[i] = strings.TrimSpace(s) - } - return urls -} - -// Connect will attempt to connect to a NATS server with multiple options. -func (o Options) Connect() (*Conn, error) { - nc := &Conn{Opts: o} - - // Some default options processing. - if nc.Opts.MaxPingsOut == 0 { - nc.Opts.MaxPingsOut = DefaultMaxPingOut - } - // Allow old default for channel length to work correctly. - if nc.Opts.SubChanLen == 0 { - nc.Opts.SubChanLen = DefaultMaxChanLen - } - // Default ReconnectBufSize - if nc.Opts.ReconnectBufSize == 0 { - nc.Opts.ReconnectBufSize = DefaultReconnectBufSize - } - // Ensure that Timeout is not 0 - if nc.Opts.Timeout == 0 { - nc.Opts.Timeout = DefaultTimeout - } - - // Allow custom Dialer for connecting using DialTimeout by default - if nc.Opts.Dialer == nil { - nc.Opts.Dialer = &net.Dialer{ - Timeout: nc.Opts.Timeout, - } - } - - if err := nc.setupServerPool(); err != nil { - return nil, err - } - - // Create the async callback channel. - nc.ach = make(chan asyncCB, asyncCBChanSize) - - if err := nc.connect(); err != nil { - return nil, err - } - - // Spin up the async cb dispatcher on success - go nc.asyncDispatch() - - return nc, nil -} - -const ( - _CRLF_ = "\r\n" - _EMPTY_ = "" - _SPC_ = " " - _PUB_P_ = "PUB " -) - -const ( - _OK_OP_ = "+OK" - _ERR_OP_ = "-ERR" - _MSG_OP_ = "MSG" - _PING_OP_ = "PING" - _PONG_OP_ = "PONG" - _INFO_OP_ = "INFO" -) - -const ( - conProto = "CONNECT %s" + _CRLF_ - pingProto = "PING" + _CRLF_ - pongProto = "PONG" + _CRLF_ - pubProto = "PUB %s %s %d" + _CRLF_ - subProto = "SUB %s %s %d" + _CRLF_ - unsubProto = "UNSUB %d %s" + _CRLF_ - okProto = _OK_OP_ + _CRLF_ -) - -// Return the currently selected server -func (nc *Conn) currentServer() (int, *srv) { - for i, s := range nc.srvPool { - if s == nil { - continue - } - if s.url == nc.url { - return i, s - } - } - return -1, nil -} - -// Pop the current server and put onto the end of the list. Select head of list as long -// as number of reconnect attempts under MaxReconnect. -func (nc *Conn) selectNextServer() (*srv, error) { - i, s := nc.currentServer() - if i < 0 { - return nil, ErrNoServers - } - sp := nc.srvPool - num := len(sp) - copy(sp[i:num-1], sp[i+1:num]) - maxReconnect := nc.Opts.MaxReconnect - if maxReconnect < 0 || s.reconnects < maxReconnect { - nc.srvPool[num-1] = s - } else { - nc.srvPool = sp[0 : num-1] - } - if len(nc.srvPool) <= 0 { - nc.url = nil - return nil, ErrNoServers - } - nc.url = nc.srvPool[0].url - return nc.srvPool[0], nil -} - -// Will assign the correct server to the nc.Url -func (nc *Conn) pickServer() error { - nc.url = nil - if len(nc.srvPool) <= 0 { - return ErrNoServers - } - for _, s := range nc.srvPool { - if s != nil { - nc.url = s.url - return nil - } - } - return ErrNoServers -} - -const tlsScheme = "tls" - -// Create the server pool using the options given. -// We will place a Url option first, followed by any -// Server Options. We will randomize the server pool unlesss -// the NoRandomize flag is set. -func (nc *Conn) setupServerPool() error { - nc.srvPool = make([]*srv, 0, srvPoolSize) - nc.urls = make(map[string]struct{}, srvPoolSize) - - // Create srv objects from each url string in nc.Opts.Servers - // and add them to the pool - for _, urlString := range nc.Opts.Servers { - if err := nc.addURLToPool(urlString, false); err != nil { - return err - } - } - - // Randomize if allowed to - if !nc.Opts.NoRandomize { - nc.shufflePool() - } - - // Normally, if this one is set, Options.Servers should not be, - // but we always allowed that, so continue to do so. - if nc.Opts.Url != _EMPTY_ { - // Add to the end of the array - if err := nc.addURLToPool(nc.Opts.Url, false); err != nil { - return err - } - // Then swap it with first to guarantee that Options.Url is tried first. - last := len(nc.srvPool) - 1 - if last > 0 { - nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0] - } - } else if len(nc.srvPool) <= 0 { - // Place default URL if pool is empty. - if err := nc.addURLToPool(DefaultURL, false); err != nil { - return err - } - } - - // Check for Scheme hint to move to TLS mode. - for _, srv := range nc.srvPool { - if srv.url.Scheme == tlsScheme { - // FIXME(dlc), this is for all in the pool, should be case by case. - nc.Opts.Secure = true - if nc.Opts.TLSConfig == nil { - nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - } - } - - return nc.pickServer() -} - -// addURLToPool adds an entry to the server pool -func (nc *Conn) addURLToPool(sURL string, implicit bool) error { - u, err := url.Parse(sURL) - if err != nil { - return err - } - s := &srv{url: u, isImplicit: implicit} - nc.srvPool = append(nc.srvPool, s) - nc.urls[u.Host] = struct{}{} - return nil -} - -// shufflePool swaps randomly elements in the server pool -func (nc *Conn) shufflePool() { - if len(nc.srvPool) <= 1 { - return - } - source := rand.NewSource(time.Now().UnixNano()) - r := rand.New(source) - for i := range nc.srvPool { - j := r.Intn(i + 1) - nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i] - } -} - -// createConn will connect to the server and wrap the appropriate -// bufio structures. It will do the right thing when an existing -// connection is in place. -func (nc *Conn) createConn() (err error) { - if nc.Opts.Timeout < 0 { - return ErrBadTimeout - } - if _, cur := nc.currentServer(); cur == nil { - return ErrNoServers - } else { - cur.lastAttempt = time.Now() - } - - dialer := nc.Opts.Dialer - nc.conn, err = dialer.Dial("tcp", nc.url.Host) - if err != nil { - return err - } - - // No clue why, but this stalls and kills performance on Mac (Mavericks). - // https://code.google.com/p/go/issues/detail?id=6930 - //if ip, ok := nc.conn.(*net.TCPConn); ok { - // ip.SetReadBuffer(defaultBufSize) - //} - - if nc.pending != nil && nc.bw != nil { - // Move to pending buffer. - nc.bw.Flush() - } - nc.bw = bufio.NewWriterSize(nc.conn, defaultBufSize) - return nil -} - -// makeTLSConn will wrap an existing Conn using TLS -func (nc *Conn) makeTLSConn() { - // Allow the user to configure their own tls.Config structure, otherwise - // default to InsecureSkipVerify. - // TODO(dlc) - We should make the more secure version the default. - if nc.Opts.TLSConfig != nil { - tlsCopy := util.CloneTLSConfig(nc.Opts.TLSConfig) - // If its blank we will override it with the current host - if tlsCopy.ServerName == _EMPTY_ { - h, _, _ := net.SplitHostPort(nc.url.Host) - tlsCopy.ServerName = h - } - nc.conn = tls.Client(nc.conn, tlsCopy) - } else { - nc.conn = tls.Client(nc.conn, &tls.Config{InsecureSkipVerify: true}) - } - conn := nc.conn.(*tls.Conn) - conn.Handshake() - nc.bw = bufio.NewWriterSize(nc.conn, defaultBufSize) -} - -// waitForExits will wait for all socket watcher Go routines to -// be shutdown before proceeding. -func (nc *Conn) waitForExits() { - // Kick old flusher forcefully. - select { - case nc.fch <- true: - default: - } - - // Wait for any previous go routines. - nc.wg.Wait() -} - -// spinUpGoRoutines will launch the Go routines responsible for -// reading and writing to the socket. This will be launched via a -// go routine itself to release any locks that may be held. -// We also use a WaitGroup to make sure we only start them on a -// reconnect when the previous ones have exited. -func (nc *Conn) spinUpGoRoutines() { - // Make sure everything has exited. - nc.waitForExits() - - // We will wait on both. - nc.wg.Add(2) - - // Spin up the readLoop and the socket flusher. - go nc.readLoop() - go nc.flusher() - - nc.mu.Lock() - if nc.Opts.PingInterval > 0 { - if nc.ptmr == nil { - nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer) - } else { - nc.ptmr.Reset(nc.Opts.PingInterval) - } - } - nc.mu.Unlock() -} - -// Report the connected server's Url -func (nc *Conn) ConnectedUrl() string { - if nc == nil { - return _EMPTY_ - } - nc.mu.Lock() - defer nc.mu.Unlock() - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.url.String() -} - -// Report the connected server's Id -func (nc *Conn) ConnectedServerId() string { - if nc == nil { - return _EMPTY_ - } - nc.mu.Lock() - defer nc.mu.Unlock() - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.info.Id -} - -// Low level setup for structs, etc -func (nc *Conn) setup() { - nc.subs = make(map[int64]*Subscription) - nc.pongs = make([]chan bool, 0, 8) - - nc.fch = make(chan bool, flushChanSize) - - // Setup scratch outbound buffer for PUB - pub := nc.scratch[:len(_PUB_P_)] - copy(pub, _PUB_P_) -} - -// Process a connected connection and initialize properly. -func (nc *Conn) processConnectInit() error { - - // Set out deadline for the whole connect process - nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout)) - defer nc.conn.SetDeadline(time.Time{}) - - // Set our status to connecting. - nc.status = CONNECTING - - // Process the INFO protocol received from the server - err := nc.processExpectedInfo() - if err != nil { - return err - } - - // Send the CONNECT protocol along with the initial PING protocol. - // Wait for the PONG response (or any error that we get from the server). - err = nc.sendConnect() - if err != nil { - return err - } - - // Reset the number of PING sent out - nc.pout = 0 - - go nc.spinUpGoRoutines() - - return nil -} - -// Main connect function. Will connect to the nats-server -func (nc *Conn) connect() error { - var returnedErr error - - // Create actual socket connection - // For first connect we walk all servers in the pool and try - // to connect immediately. - nc.mu.Lock() - // The pool may change inside theloop iteration due to INFO protocol. - for i := 0; i < len(nc.srvPool); i++ { - nc.url = nc.srvPool[i].url - - if err := nc.createConn(); err == nil { - // This was moved out of processConnectInit() because - // that function is now invoked from doReconnect() too. - nc.setup() - - err = nc.processConnectInit() - - if err == nil { - nc.srvPool[i].didConnect = true - nc.srvPool[i].reconnects = 0 - returnedErr = nil - break - } else { - returnedErr = err - nc.mu.Unlock() - nc.close(DISCONNECTED, false) - nc.mu.Lock() - nc.url = nil - } - } else { - // Cancel out default connection refused, will trigger the - // No servers error conditional - if matched, _ := regexp.Match(`connection refused`, []byte(err.Error())); matched { - returnedErr = nil - } - } - } - defer nc.mu.Unlock() - - if returnedErr == nil && nc.status != CONNECTED { - returnedErr = ErrNoServers - } - return returnedErr -} - -// This will check to see if the connection should be -// secure. This can be dictated from either end and should -// only be called after the INIT protocol has been received. -func (nc *Conn) checkForSecure() error { - // Check to see if we need to engage TLS - o := nc.Opts - - // Check for mismatch in setups - if o.Secure && !nc.info.TLSRequired { - return ErrSecureConnWanted - } else if nc.info.TLSRequired && !o.Secure { - return ErrSecureConnRequired - } - - // Need to rewrap with bufio - if o.Secure { - nc.makeTLSConn() - } - return nil -} - -// processExpectedInfo will look for the expected first INFO message -// sent when a connection is established. The lock should be held entering. -func (nc *Conn) processExpectedInfo() error { - - c := &control{} - - // Read the protocol - err := nc.readOp(c) - if err != nil { - return err - } - - // The nats protocol should send INFO first always. - if c.op != _INFO_OP_ { - return ErrNoInfoReceived - } - - // Parse the protocol - if err := nc.processInfo(c.args); err != nil { - return err - } - - err = nc.checkForSecure() - if err != nil { - return err - } - - return nil -} - -// Sends a protocol control message by queuing into the bufio writer -// and kicking the flush Go routine. These writes are protected. -func (nc *Conn) sendProto(proto string) { - nc.mu.Lock() - nc.bw.WriteString(proto) - nc.kickFlusher() - nc.mu.Unlock() -} - -// Generate a connect protocol message, issuing user/password if -// applicable. The lock is assumed to be held upon entering. -func (nc *Conn) connectProto() (string, error) { - o := nc.Opts - var user, pass, token string - u := nc.url.User - if u != nil { - // if no password, assume username is authToken - if _, ok := u.Password(); !ok { - token = u.Username() - } else { - user = u.Username() - pass, _ = u.Password() - } - } else { - // Take from options (pssibly all empty strings) - user = nc.Opts.User - pass = nc.Opts.Password - token = nc.Opts.Token - } - cinfo := connectInfo{o.Verbose, o.Pedantic, - user, pass, token, - o.Secure, o.Name, LangString, Version, clientProtoInfo} - b, err := json.Marshal(cinfo) - if err != nil { - return _EMPTY_, ErrJsonParse - } - return fmt.Sprintf(conProto, b), nil -} - -// normalizeErr removes the prefix -ERR, trim spaces and remove the quotes. -func normalizeErr(line string) string { - s := strings.ToLower(strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_))) - s = strings.TrimLeft(strings.TrimRight(s, "'"), "'") - return s -} - -// Send a connect protocol message to the server, issue user/password if -// applicable. Will wait for a flush to return from the server for error -// processing. -func (nc *Conn) sendConnect() error { - - // Construct the CONNECT protocol string - cProto, err := nc.connectProto() - if err != nil { - return err - } - - // Write the protocol into the buffer - _, err = nc.bw.WriteString(cProto) - if err != nil { - return err - } - - // Add to the buffer the PING protocol - _, err = nc.bw.WriteString(pingProto) - if err != nil { - return err - } - - // Flush the buffer - err = nc.bw.Flush() - if err != nil { - return err - } - - // Now read the response from the server. - br := bufio.NewReaderSize(nc.conn, defaultBufSize) - line, err := br.ReadString('\n') - if err != nil { - return err - } - - // If opts.Verbose is set, handle +OK - if nc.Opts.Verbose && line == okProto { - // Read the rest now... - line, err = br.ReadString('\n') - if err != nil { - return err - } - } - - // We expect a PONG - if line != pongProto { - // But it could be something else, like -ERR - - // Since we no longer use ReadLine(), trim the trailing "\r\n" - line = strings.TrimRight(line, "\r\n") - - // If it's a server error... - if strings.HasPrefix(line, _ERR_OP_) { - // Remove -ERR, trim spaces and quotes, and convert to lower case. - line = normalizeErr(line) - return errors.New("nats: " + line) - } - - // Notify that we got an unexpected protocol. - return errors.New(fmt.Sprintf("nats: expected '%s', got '%s'", _PONG_OP_, line)) - } - - // This is where we are truly connected. - nc.status = CONNECTED - - return nil -} - -// A control protocol line. -type control struct { - op, args string -} - -// Read a control line and process the intended op. -func (nc *Conn) readOp(c *control) error { - br := bufio.NewReaderSize(nc.conn, defaultBufSize) - line, err := br.ReadString('\n') - if err != nil { - return err - } - parseControl(line, c) - return nil -} - -// Parse a control line from the server. -func parseControl(line string, c *control) { - toks := strings.SplitN(line, _SPC_, 2) - if len(toks) == 1 { - c.op = strings.TrimSpace(toks[0]) - c.args = _EMPTY_ - } else if len(toks) == 2 { - c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) - } else { - c.op = _EMPTY_ - } -} - -// flushReconnectPending will push the pending items that were -// gathered while we were in a RECONNECTING state to the socket. -func (nc *Conn) flushReconnectPendingItems() { - if nc.pending == nil { - return - } - if nc.pending.Len() > 0 { - nc.bw.Write(nc.pending.Bytes()) - } -} - -// Try to reconnect using the option parameters. -// This function assumes we are allowed to reconnect. -func (nc *Conn) doReconnect() { - // We want to make sure we have the other watchers shutdown properly - // here before we proceed past this point. - nc.waitForExits() - - // FIXME(dlc) - We have an issue here if we have - // outstanding flush points (pongs) and they were not - // sent out, but are still in the pipe. - - // Hold the lock manually and release where needed below, - // can't do defer here. - nc.mu.Lock() - - // Clear any queued pongs, e.g. pending flush calls. - nc.clearPendingFlushCalls() - - // Clear any errors. - nc.err = nil - - // Perform appropriate callback if needed for a disconnect. - if nc.Opts.DisconnectedCB != nil { - nc.ach <- func() { nc.Opts.DisconnectedCB(nc) } - } - - for len(nc.srvPool) > 0 { - cur, err := nc.selectNextServer() - if err != nil { - nc.err = err - break - } - - sleepTime := int64(0) - - // Sleep appropriate amount of time before the - // connection attempt if connecting to same server - // we just got disconnected from.. - if time.Since(cur.lastAttempt) < nc.Opts.ReconnectWait { - sleepTime = int64(nc.Opts.ReconnectWait - time.Since(cur.lastAttempt)) - } - - // On Windows, createConn() will take more than a second when no - // server is running at that address. So it could be that the - // time elapsed between reconnect attempts is always > than - // the set option. Release the lock to give a chance to a parallel - // nc.Close() to break the loop. - nc.mu.Unlock() - if sleepTime <= 0 { - runtime.Gosched() - } else { - time.Sleep(time.Duration(sleepTime)) - } - nc.mu.Lock() - - // Check if we have been closed first. - if nc.isClosed() { - break - } - - // Mark that we tried a reconnect - cur.reconnects++ - - // Try to create a new connection - err = nc.createConn() - - // Not yet connected, retry... - // Continue to hold the lock - if err != nil { - nc.err = nil - continue - } - - // We are reconnected - nc.Reconnects++ - - // Process connect logic - if nc.err = nc.processConnectInit(); nc.err != nil { - nc.status = RECONNECTING - continue - } - - // Clear out server stats for the server we connected to.. - cur.didConnect = true - cur.reconnects = 0 - - // Send existing subscription state - nc.resendSubscriptions() - - // Now send off and clear pending buffer - nc.flushReconnectPendingItems() - - // Flush the buffer - nc.err = nc.bw.Flush() - if nc.err != nil { - nc.status = RECONNECTING - continue - } - - // Done with the pending buffer - nc.pending = nil - - // This is where we are truly connected. - nc.status = CONNECTED - - // Queue up the reconnect callback. - if nc.Opts.ReconnectedCB != nil { - nc.ach <- func() { nc.Opts.ReconnectedCB(nc) } - } - - // Release lock here, we will return below. - nc.mu.Unlock() - - // Make sure to flush everything - nc.Flush() - - return - } - - // Call into close.. We have no servers left.. - if nc.err == nil { - nc.err = ErrNoServers - } - nc.mu.Unlock() - nc.Close() -} - -// processOpErr handles errors from reading or parsing the protocol. -// The lock should not be held entering this function. -func (nc *Conn) processOpErr(err error) { - nc.mu.Lock() - if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() { - nc.mu.Unlock() - return - } - - if nc.Opts.AllowReconnect && nc.status == CONNECTED { - // Set our new status - nc.status = RECONNECTING - if nc.ptmr != nil { - nc.ptmr.Stop() - } - if nc.conn != nil { - nc.bw.Flush() - nc.conn.Close() - nc.conn = nil - } - - // Create a new pending buffer to underpin the bufio Writer while - // we are reconnecting. - nc.pending = &bytes.Buffer{} - nc.bw = bufio.NewWriterSize(nc.pending, nc.Opts.ReconnectBufSize) - - go nc.doReconnect() - nc.mu.Unlock() - return - } - - nc.status = DISCONNECTED - nc.err = err - nc.mu.Unlock() - nc.Close() -} - -// Marker to close the channel to kick out the Go routine. -func (nc *Conn) closeAsyncFunc() asyncCB { - return func() { - nc.mu.Lock() - if nc.ach != nil { - close(nc.ach) - nc.ach = nil - } - nc.mu.Unlock() - } -} - -// asyncDispatch is responsible for calling any async callbacks -func (nc *Conn) asyncDispatch() { - // snapshot since they can change from underneath of us. - nc.mu.Lock() - ach := nc.ach - nc.mu.Unlock() - - // Loop on the channel and process async callbacks. - for { - if f, ok := <-ach; !ok { - return - } else { - f() - } - } -} - -// readLoop() will sit on the socket reading and processing the -// protocol from the server. It will dispatch appropriately based -// on the op type. -func (nc *Conn) readLoop() { - // Release the wait group on exit - defer nc.wg.Done() - - // Create a parseState if needed. - nc.mu.Lock() - if nc.ps == nil { - nc.ps = &parseState{} - } - nc.mu.Unlock() - - // Stack based buffer. - b := make([]byte, defaultBufSize) - - for { - // FIXME(dlc): RWLock here? - nc.mu.Lock() - sb := nc.isClosed() || nc.isReconnecting() - if sb { - nc.ps = &parseState{} - } - conn := nc.conn - nc.mu.Unlock() - - if sb || conn == nil { - break - } - - n, err := conn.Read(b) - if err != nil { - nc.processOpErr(err) - break - } - - if err := nc.parse(b[:n]); err != nil { - nc.processOpErr(err) - break - } - } - // Clear the parseState here.. - nc.mu.Lock() - nc.ps = nil - nc.mu.Unlock() -} - -// waitForMsgs waits on the conditional shared with readLoop and processMsg. -// It is used to deliver messages to asynchronous subscribers. -func (nc *Conn) waitForMsgs(s *Subscription) { - var closed bool - var delivered, max uint64 - - for { - s.mu.Lock() - if s.pHead == nil && !s.closed { - s.pCond.Wait() - } - // Pop the msg off the list - m := s.pHead - if m != nil { - s.pHead = m.next - if s.pHead == nil { - s.pTail = nil - } - s.pMsgs-- - s.pBytes -= len(m.Data) - } - mcb := s.mcb - max = s.max - closed = s.closed - if !s.closed { - s.delivered++ - delivered = s.delivered - } - s.mu.Unlock() - - if closed { - break - } - - // Deliver the message. - if m != nil && (max == 0 || delivered <= max) { - mcb(m) - } - // If we have hit the max for delivered msgs, remove sub. - if max > 0 && delivered >= max { - nc.mu.Lock() - nc.removeSub(s) - nc.mu.Unlock() - break - } - } -} - -// processMsg is called by parse and will place the msg on the -// appropriate channel/pending queue for processing. If the channel is full, -// or the pending queue is over the pending limits, the connection is -// considered a slow consumer. -func (nc *Conn) processMsg(data []byte) { - // Lock from here on out. - nc.mu.Lock() - - // Stats - nc.InMsgs++ - nc.InBytes += uint64(len(data)) - - sub := nc.subs[nc.ps.ma.sid] - if sub == nil { - nc.mu.Unlock() - return - } - - // Copy them into string - subj := string(nc.ps.ma.subject) - reply := string(nc.ps.ma.reply) - - // Doing message create outside of the sub's lock to reduce contention. - // It's possible that we end-up not using the message, but that's ok. - - // FIXME(dlc): Need to copy, should/can do COW? - msgPayload := make([]byte, len(data)) - copy(msgPayload, data) - - // FIXME(dlc): Should we recycle these containers? - m := &Msg{Data: msgPayload, Subject: subj, Reply: reply, Sub: sub} - - sub.mu.Lock() - - // Subscription internal stats (applicable only for non ChanSubscription's) - if sub.typ != ChanSubscription { - sub.pMsgs++ - if sub.pMsgs > sub.pMsgsMax { - sub.pMsgsMax = sub.pMsgs - } - sub.pBytes += len(m.Data) - if sub.pBytes > sub.pBytesMax { - sub.pBytesMax = sub.pBytes - } - - // Check for a Slow Consumer - if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) || - (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) { - goto slowConsumer - } - } - - // We have two modes of delivery. One is the channel, used by channel - // subscribers and syncSubscribers, the other is a linked list for async. - if sub.mch != nil { - select { - case sub.mch <- m: - default: - goto slowConsumer - } - } else { - // Push onto the async pList - if sub.pHead == nil { - sub.pHead = m - sub.pTail = m - sub.pCond.Signal() - } else { - sub.pTail.next = m - sub.pTail = m - } - } - - // Clear SlowConsumer status. - sub.sc = false - - sub.mu.Unlock() - nc.mu.Unlock() - return - -slowConsumer: - sub.dropped++ - nc.processSlowConsumer(sub) - // Undo stats from above - if sub.typ != ChanSubscription { - sub.pMsgs-- - sub.pBytes -= len(m.Data) - } - sub.mu.Unlock() - nc.mu.Unlock() - return -} - -// processSlowConsumer will set SlowConsumer state and fire the -// async error handler if registered. -func (nc *Conn) processSlowConsumer(s *Subscription) { - nc.err = ErrSlowConsumer - if nc.Opts.AsyncErrorCB != nil && !s.sc { - nc.ach <- func() { nc.Opts.AsyncErrorCB(nc, s, ErrSlowConsumer) } - } - s.sc = true -} - -// processPermissionsViolation is called when the server signals a subject -// permissions violation on either publish or subscribe. -func (nc *Conn) processPermissionsViolation(err string) { - nc.err = errors.New("nats: " + err) - if nc.Opts.AsyncErrorCB != nil { - nc.ach <- func() { nc.Opts.AsyncErrorCB(nc, nil, nc.err) } - } -} - -// flusher is a separate Go routine that will process flush requests for the write -// bufio. This allows coalescing of writes to the underlying socket. -func (nc *Conn) flusher() { - // Release the wait group - defer nc.wg.Done() - - // snapshot the bw and conn since they can change from underneath of us. - nc.mu.Lock() - bw := nc.bw - conn := nc.conn - fch := nc.fch - nc.mu.Unlock() - - if conn == nil || bw == nil { - return - } - - for { - if _, ok := <-fch; !ok { - return - } - nc.mu.Lock() - - // Check to see if we should bail out. - if !nc.isConnected() || nc.isConnecting() || bw != nc.bw || conn != nc.conn { - nc.mu.Unlock() - return - } - if bw.Buffered() > 0 { - if err := bw.Flush(); err != nil { - if nc.err == nil { - nc.err = err - } - } - } - nc.mu.Unlock() - } -} - -// processPing will send an immediate pong protocol response to the -// server. The server uses this mechanism to detect dead clients. -func (nc *Conn) processPing() { - nc.sendProto(pongProto) -} - -// processPong is used to process responses to the client's ping -// messages. We use pings for the flush mechanism as well. -func (nc *Conn) processPong() { - var ch chan bool - - nc.mu.Lock() - if len(nc.pongs) > 0 { - ch = nc.pongs[0] - nc.pongs = nc.pongs[1:] - } - nc.pout = 0 - nc.mu.Unlock() - if ch != nil { - ch <- true - } -} - -// processOK is a placeholder for processing OK messages. -func (nc *Conn) processOK() { - // do nothing -} - -// processInfo is used to parse the info messages sent -// from the server. -// This function may update the server pool. -func (nc *Conn) processInfo(info string) error { - if info == _EMPTY_ { - return nil - } - if err := json.Unmarshal([]byte(info), &nc.info); err != nil { - return err - } - updated := false - urls := nc.info.ConnectURLs - for _, curl := range urls { - if _, present := nc.urls[curl]; !present { - if err := nc.addURLToPool(fmt.Sprintf("nats://%s", curl), true); err != nil { - continue - } - updated = true - } - } - if updated && !nc.Opts.NoRandomize { - nc.shufflePool() - } - return nil -} - -// processAsyncInfo does the same than processInfo, but is called -// from the parser. Calls processInfo under connection's lock -// protection. -func (nc *Conn) processAsyncInfo(info []byte) { - nc.mu.Lock() - // Ignore errors, we will simply not update the server pool... - nc.processInfo(string(info)) - nc.mu.Unlock() -} - -// LastError reports the last error encountered via the connection. -// It can be used reliably within ClosedCB in order to find out reason -// why connection was closed for example. -func (nc *Conn) LastError() error { - if nc == nil { - return ErrInvalidConnection - } - nc.mu.Lock() - err := nc.err - nc.mu.Unlock() - return err -} - -// processErr processes any error messages from the server and -// sets the connection's lastError. -func (nc *Conn) processErr(e string) { - // Trim, remove quotes, convert to lower case. - e = normalizeErr(e) - - // FIXME(dlc) - process Slow Consumer signals special. - if e == STALE_CONNECTION { - nc.processOpErr(ErrStaleConnection) - } else if strings.HasPrefix(e, PERMISSIONS_ERR) { - nc.processPermissionsViolation(e) - } else { - nc.mu.Lock() - nc.err = errors.New("nats: " + e) - nc.mu.Unlock() - nc.Close() - } -} - -// kickFlusher will send a bool on a channel to kick the -// flush Go routine to flush data to the server. -func (nc *Conn) kickFlusher() { - if nc.bw != nil { - select { - case nc.fch <- true: - default: - } - } -} - -// Publish publishes the data argument to the given subject. The data -// argument is left untouched and needs to be correctly interpreted on -// the receiver. -func (nc *Conn) Publish(subj string, data []byte) error { - return nc.publish(subj, _EMPTY_, data) -} - -// PublishMsg publishes the Msg structure, which includes the -// Subject, an optional Reply and an optional Data field. -func (nc *Conn) PublishMsg(m *Msg) error { - if m == nil { - return ErrInvalidMsg - } - return nc.publish(m.Subject, m.Reply, m.Data) -} - -// PublishRequest will perform a Publish() excpecting a response on the -// reply subject. Use Request() for automatically waiting for a response -// inline. -func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { - return nc.publish(subj, reply, data) -} - -// Used for handrolled itoa -const digits = "0123456789" - -// publish is the internal function to publish messages to a nats-server. -// Sends a protocol data message by queuing into the bufio writer -// and kicking the flush go routine. These writes should be protected. -func (nc *Conn) publish(subj, reply string, data []byte) error { - if nc == nil { - return ErrInvalidConnection - } - if subj == "" { - return ErrBadSubject - } - nc.mu.Lock() - - // Proactively reject payloads over the threshold set by server. - var msgSize int64 - msgSize = int64(len(data)) - if msgSize > nc.info.MaxPayload { - nc.mu.Unlock() - return ErrMaxPayload - } - - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - - // Check if we are reconnecting, and if so check if - // we have exceeded our reconnect outbound buffer limits. - if nc.isReconnecting() { - // Flush to underlying buffer. - nc.bw.Flush() - // Check if we are over - if nc.pending.Len() >= nc.Opts.ReconnectBufSize { - nc.mu.Unlock() - return ErrReconnectBufExceeded - } - } - - msgh := nc.scratch[:len(_PUB_P_)] - msgh = append(msgh, subj...) - msgh = append(msgh, ' ') - if reply != "" { - msgh = append(msgh, reply...) - msgh = append(msgh, ' ') - } - - // We could be smarter here, but simple loop is ok, - // just avoid strconv in fast path - // FIXME(dlc) - Find a better way here. - // msgh = strconv.AppendInt(msgh, int64(len(data)), 10) - - var b [12]byte - var i = len(b) - if len(data) > 0 { - for l := len(data); l > 0; l /= 10 { - i -= 1 - b[i] = digits[l%10] - } - } else { - i -= 1 - b[i] = digits[0] - } - - msgh = append(msgh, b[i:]...) - msgh = append(msgh, _CRLF_...) - - // FIXME, do deadlines here - _, err := nc.bw.Write(msgh) - if err == nil { - _, err = nc.bw.Write(data) - } - if err == nil { - _, err = nc.bw.WriteString(_CRLF_) - } - if err != nil { - nc.mu.Unlock() - return err - } - - nc.OutMsgs++ - nc.OutBytes += uint64(len(data)) - - if len(nc.fch) == 0 { - nc.kickFlusher() - } - nc.mu.Unlock() - return nil -} - -// Request will create an Inbox and perform a Request() call -// with the Inbox reply and return the first reply received. -// This is optimized for the case of multiple responses. -func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) { - inbox := NewInbox() - ch := make(chan *Msg, RequestChanLen) - - s, err := nc.subscribe(inbox, _EMPTY_, nil, ch) - if err != nil { - return nil, err - } - s.AutoUnsubscribe(1) - defer s.Unsubscribe() - - err = nc.PublishRequest(subj, inbox, data) - if err != nil { - return nil, err - } - return s.NextMsg(timeout) -} - -// InboxPrefix is the prefix for all inbox subjects. -const InboxPrefix = "_INBOX." -const inboxPrefixLen = len(InboxPrefix) - -// NewInbox will return an inbox string which can be used for directed replies from -// subscribers. These are guaranteed to be unique, but can be shared and subscribed -// to by others. -func NewInbox() string { - var b [inboxPrefixLen + 22]byte - pres := b[:inboxPrefixLen] - copy(pres, InboxPrefix) - ns := b[inboxPrefixLen:] - copy(ns, nuid.Next()) - return string(b[:]) -} - -// Subscribe will express interest in the given subject. The subject -// can have wildcards (partial:*, full:>). Messages will be delivered -// to the associated MsgHandler. If no MsgHandler is given, the -// subscription is a synchronous subscription and can be polled via -// Subscription.NextMsg(). -func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) { - return nc.subscribe(subj, _EMPTY_, cb, nil) -} - -// ChanSubscribe will place all messages received on the channel. -// You should not close the channel until sub.Unsubscribe() has been called. -func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) { - return nc.subscribe(subj, _EMPTY_, nil, ch) -} - -// ChanQueueSubscribe will place all messages received on the channel. -// You should not close the channel until sub.Unsubscribe() has been called. -func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) { - return nc.subscribe(subj, group, nil, ch) -} - -// SubscribeSync is syntactic sugar for Subscribe(subject, nil). -func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) { - if nc == nil { - return nil, ErrInvalidConnection - } - mch := make(chan *Msg, nc.Opts.SubChanLen) - s, e := nc.subscribe(subj, _EMPTY_, nil, mch) - if s != nil { - s.typ = SyncSubscription - } - return s, e -} - -// QueueSubscribe creates an asynchronous queue subscriber on the given subject. -// All subscribers with the same queue name will form the queue group and -// only one member of the group will be selected to receive any given -// message asynchronously. -func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) { - return nc.subscribe(subj, queue, cb, nil) -} - -// QueueSubscribeSync creates a synchronous queue subscriber on the given -// subject. All subscribers with the same queue name will form the queue -// group and only one member of the group will be selected to receive any -// given message synchronously. -func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) { - mch := make(chan *Msg, nc.Opts.SubChanLen) - s, e := nc.subscribe(subj, queue, nil, mch) - if s != nil { - s.typ = SyncSubscription - } - return s, e -} - -// QueueSubscribeSyncWithChan is syntactic sugar for ChanQueueSubscribe(subject, group, ch). -func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) { - return nc.subscribe(subj, queue, nil, ch) -} - -// subscribe is the internal subscribe function that indicates interest in a subject. -func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg) (*Subscription, error) { - if nc == nil { - return nil, ErrInvalidConnection - } - nc.mu.Lock() - // ok here, but defer is generally expensive - defer nc.mu.Unlock() - defer nc.kickFlusher() - - // Check for some error conditions. - if nc.isClosed() { - return nil, ErrConnectionClosed - } - - if cb == nil && ch == nil { - return nil, ErrBadSubscription - } - - sub := &Subscription{Subject: subj, Queue: queue, mcb: cb, conn: nc} - // Set pending limits. - sub.pMsgsLimit = DefaultSubPendingMsgsLimit - sub.pBytesLimit = DefaultSubPendingBytesLimit - - // If we have an async callback, start up a sub specific - // Go routine to deliver the messages. - if cb != nil { - sub.typ = AsyncSubscription - sub.pCond = sync.NewCond(&sub.mu) - go nc.waitForMsgs(sub) - } else { - sub.typ = ChanSubscription - sub.mch = ch - } - - sub.sid = atomic.AddInt64(&nc.ssid, 1) - nc.subs[sub.sid] = sub - - // We will send these for all subs when we reconnect - // so that we can suppress here. - if !nc.isReconnecting() { - nc.bw.WriteString(fmt.Sprintf(subProto, subj, queue, sub.sid)) - } - return sub, nil -} - -// Lock for nc should be held here upon entry -func (nc *Conn) removeSub(s *Subscription) { - delete(nc.subs, s.sid) - s.mu.Lock() - defer s.mu.Unlock() - // Release callers on NextMsg for SyncSubscription only - if s.mch != nil && s.typ == SyncSubscription { - close(s.mch) - } - s.mch = nil - - // Mark as invalid - s.conn = nil - s.closed = true - if s.pCond != nil { - s.pCond.Broadcast() - } -} - -// SubscriptionType is the type of the Subscription. -type SubscriptionType int - -// The different types of subscription types. -const ( - AsyncSubscription = SubscriptionType(iota) - SyncSubscription - ChanSubscription - NilSubscription -) - -// Type returns the type of Subscription. -func (s *Subscription) Type() SubscriptionType { - if s == nil { - return NilSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - return s.typ -} - -// IsValid returns a boolean indicating whether the subscription -// is still active. This will return false if the subscription has -// already been closed. -func (s *Subscription) IsValid() bool { - if s == nil { - return false - } - s.mu.Lock() - defer s.mu.Unlock() - return s.conn != nil -} - -// Unsubscribe will remove interest in the given subject. -func (s *Subscription) Unsubscribe() error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - conn := s.conn - s.mu.Unlock() - if conn == nil { - return ErrBadSubscription - } - return conn.unsubscribe(s, 0) -} - -// AutoUnsubscribe will issue an automatic Unsubscribe that is -// processed by the server when max messages have been received. -// This can be useful when sending a request to an unknown number -// of subscribers. Request() uses this functionality. -func (s *Subscription) AutoUnsubscribe(max int) error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - conn := s.conn - s.mu.Unlock() - if conn == nil { - return ErrBadSubscription - } - return conn.unsubscribe(s, max) -} - -// unsubscribe performs the low level unsubscribe to the server. -// Use Subscription.Unsubscribe() -func (nc *Conn) unsubscribe(sub *Subscription, max int) error { - nc.mu.Lock() - // ok here, but defer is expensive - defer nc.mu.Unlock() - defer nc.kickFlusher() - - if nc.isClosed() { - return ErrConnectionClosed - } - - s := nc.subs[sub.sid] - // Already unsubscribed - if s == nil { - return nil - } - - maxStr := _EMPTY_ - if max > 0 { - s.max = uint64(max) - maxStr = strconv.Itoa(max) - } else { - nc.removeSub(s) - } - // We will send these for all subs when we reconnect - // so that we can suppress here. - if !nc.isReconnecting() { - nc.bw.WriteString(fmt.Sprintf(unsubProto, s.sid, maxStr)) - } - return nil -} - -// NextMsg() will return the next message available to a synchronous subscriber -// or block until one is available. A timeout can be used to return when no -// message has been delivered. -func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) { - if s == nil { - return nil, ErrBadSubscription - } - s.mu.Lock() - if s.connClosed { - s.mu.Unlock() - return nil, ErrConnectionClosed - } - if s.mch == nil { - if s.max > 0 && s.delivered >= s.max { - s.mu.Unlock() - return nil, ErrMaxMessages - } else if s.closed { - s.mu.Unlock() - return nil, ErrBadSubscription - } - } - if s.mcb != nil { - s.mu.Unlock() - return nil, ErrSyncSubRequired - } - if s.sc { - s.sc = false - s.mu.Unlock() - return nil, ErrSlowConsumer - } - - // snapshot - nc := s.conn - mch := s.mch - max := s.max - s.mu.Unlock() - - var ok bool - var msg *Msg - - t := time.NewTimer(timeout) - defer t.Stop() - - select { - case msg, ok = <-mch: - if !ok { - return nil, ErrConnectionClosed - } - // Update some stats. - s.mu.Lock() - s.delivered++ - delivered := s.delivered - if s.typ == SyncSubscription { - s.pMsgs-- - s.pBytes -= len(msg.Data) - } - s.mu.Unlock() - - if max > 0 { - if delivered > max { - return nil, ErrMaxMessages - } - // Remove subscription if we have reached max. - if delivered == max { - nc.mu.Lock() - nc.removeSub(s) - nc.mu.Unlock() - } - } - - case <-t.C: - return nil, ErrTimeout - } - - return msg, nil -} - -// Queued returns the number of queued messages in the client for this subscription. -// DEPRECATED: Use Pending() -func (s *Subscription) QueuedMsgs() (int, error) { - m, _, err := s.Pending() - return int(m), err -} - -// Pending returns the number of queued messages and queued bytes in the client for this subscription. -func (s *Subscription) Pending() (int, int, error) { - if s == nil { - return -1, -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil { - return -1, -1, ErrBadSubscription - } - if s.typ == ChanSubscription { - return -1, -1, ErrTypeSubscription - } - return s.pMsgs, s.pBytes, nil -} - -// MaxPending returns the maximum number of queued messages and queued bytes seen so far. -func (s *Subscription) MaxPending() (int, int, error) { - if s == nil { - return -1, -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil { - return -1, -1, ErrBadSubscription - } - if s.typ == ChanSubscription { - return -1, -1, ErrTypeSubscription - } - return s.pMsgsMax, s.pBytesMax, nil -} - -// ClearMaxPending resets the maximums seen so far. -func (s *Subscription) ClearMaxPending() error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil { - return ErrBadSubscription - } - if s.typ == ChanSubscription { - return ErrTypeSubscription - } - s.pMsgsMax, s.pBytesMax = 0, 0 - return nil -} - -// Pending Limits -const ( - DefaultSubPendingMsgsLimit = 65536 - DefaultSubPendingBytesLimit = 65536 * 1024 -) - -// PendingLimits returns the current limits for this subscription. -// If no error is returned, a negative value indicates that the -// given metric is not limited. -func (s *Subscription) PendingLimits() (int, int, error) { - if s == nil { - return -1, -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil { - return -1, -1, ErrBadSubscription - } - if s.typ == ChanSubscription { - return -1, -1, ErrTypeSubscription - } - return s.pMsgsLimit, s.pBytesLimit, nil -} - -// SetPendingLimits sets the limits for pending msgs and bytes for this subscription. -// Zero is not allowed. Any negative value means that the given metric is not limited. -func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil { - return ErrBadSubscription - } - if s.typ == ChanSubscription { - return ErrTypeSubscription - } - if msgLimit == 0 || bytesLimit == 0 { - return ErrInvalidArg - } - s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit - return nil -} - -// Delivered returns the number of delivered messages for this subscription. -func (s *Subscription) Delivered() (int64, error) { - if s == nil { - return -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil { - return -1, ErrBadSubscription - } - return int64(s.delivered), nil -} - -// Dropped returns the number of known dropped messages for this subscription. -// This will correspond to messages dropped by violations of PendingLimits. If -// the server declares the connection a SlowConsumer, this number may not be -// valid. -func (s *Subscription) Dropped() (int, error) { - if s == nil { - return -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil { - return -1, ErrBadSubscription - } - return s.dropped, nil -} - -// FIXME: This is a hack -// removeFlushEntry is needed when we need to discard queued up responses -// for our pings as part of a flush call. This happens when we have a flush -// call outstanding and we call close. -func (nc *Conn) removeFlushEntry(ch chan bool) bool { - nc.mu.Lock() - defer nc.mu.Unlock() - if nc.pongs == nil { - return false - } - for i, c := range nc.pongs { - if c == ch { - nc.pongs[i] = nil - return true - } - } - return false -} - -// The lock must be held entering this function. -func (nc *Conn) sendPing(ch chan bool) { - nc.pongs = append(nc.pongs, ch) - nc.bw.WriteString(pingProto) - // Flush in place. - nc.bw.Flush() -} - -// This will fire periodically and send a client origin -// ping to the server. Will also check that we have received -// responses from the server. -func (nc *Conn) processPingTimer() { - nc.mu.Lock() - - if nc.status != CONNECTED { - nc.mu.Unlock() - return - } - - // Check for violation - nc.pout++ - if nc.pout > nc.Opts.MaxPingsOut { - nc.mu.Unlock() - nc.processOpErr(ErrStaleConnection) - return - } - - nc.sendPing(nil) - nc.ptmr.Reset(nc.Opts.PingInterval) - nc.mu.Unlock() -} - -// FlushTimeout allows a Flush operation to have an associated timeout. -func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) { - if nc == nil { - return ErrInvalidConnection - } - if timeout <= 0 { - return ErrBadTimeout - } - - nc.mu.Lock() - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - t := time.NewTimer(timeout) - defer t.Stop() - - ch := make(chan bool) // FIXME: Inefficient? - nc.sendPing(ch) - nc.mu.Unlock() - - select { - case _, ok := <-ch: - if !ok { - err = ErrConnectionClosed - } else { - close(ch) - } - case <-t.C: - err = ErrTimeout - } - - if err != nil { - nc.removeFlushEntry(ch) - } - return -} - -// Flush will perform a round trip to the server and return when it -// receives the internal reply. -func (nc *Conn) Flush() error { - return nc.FlushTimeout(60 * time.Second) -} - -// Buffered will return the number of bytes buffered to be sent to the server. -// FIXME(dlc) take into account disconnected state. -func (nc *Conn) Buffered() (int, error) { - nc.mu.Lock() - defer nc.mu.Unlock() - if nc.isClosed() || nc.bw == nil { - return -1, ErrConnectionClosed - } - return nc.bw.Buffered(), nil -} - -// resendSubscriptions will send our subscription state back to the -// server. Used in reconnects -func (nc *Conn) resendSubscriptions() { - for _, s := range nc.subs { - adjustedMax := uint64(0) - s.mu.Lock() - if s.max > 0 { - if s.delivered < s.max { - adjustedMax = s.max - s.delivered - } - - // adjustedMax could be 0 here if the number of delivered msgs - // reached the max, if so unsubscribe. - if adjustedMax == 0 { - s.mu.Unlock() - nc.bw.WriteString(fmt.Sprintf(unsubProto, s.sid, _EMPTY_)) - continue - } - } - s.mu.Unlock() - - nc.bw.WriteString(fmt.Sprintf(subProto, s.Subject, s.Queue, s.sid)) - if adjustedMax > 0 { - maxStr := strconv.Itoa(int(adjustedMax)) - nc.bw.WriteString(fmt.Sprintf(unsubProto, s.sid, maxStr)) - } - } -} - -// This will clear any pending flush calls and release pending calls. -// Lock is assumed to be held by the caller. -func (nc *Conn) clearPendingFlushCalls() { - // Clear any queued pongs, e.g. pending flush calls. - for _, ch := range nc.pongs { - if ch != nil { - close(ch) - } - } - nc.pongs = nil -} - -// Low level close call that will do correct cleanup and set -// desired status. Also controls whether user defined callbacks -// will be triggered. The lock should not be held entering this -// function. This function will handle the locking manually. -func (nc *Conn) close(status Status, doCBs bool) { - nc.mu.Lock() - if nc.isClosed() { - nc.status = status - nc.mu.Unlock() - return - } - nc.status = CLOSED - - // Kick the Go routines so they fall out. - nc.kickFlusher() - nc.mu.Unlock() - - nc.mu.Lock() - - // Clear any queued pongs, e.g. pending flush calls. - nc.clearPendingFlushCalls() - - if nc.ptmr != nil { - nc.ptmr.Stop() - } - - // Go ahead and make sure we have flushed the outbound - if nc.conn != nil { - nc.bw.Flush() - defer nc.conn.Close() - } - - // Close sync subscriber channels and release any - // pending NextMsg() calls. - for _, s := range nc.subs { - s.mu.Lock() - - // Release callers on NextMsg for SyncSubscription only - if s.mch != nil && s.typ == SyncSubscription { - close(s.mch) - } - s.mch = nil - // Mark as invalid, for signalling to deliverMsgs - s.closed = true - // Mark connection closed in subscription - s.connClosed = true - // If we have an async subscription, signals it to exit - if s.typ == AsyncSubscription && s.pCond != nil { - s.pCond.Signal() - } - - s.mu.Unlock() - } - nc.subs = nil - - // Perform appropriate callback if needed for a disconnect. - if doCBs { - if nc.Opts.DisconnectedCB != nil && nc.conn != nil { - nc.ach <- func() { nc.Opts.DisconnectedCB(nc) } - } - if nc.Opts.ClosedCB != nil { - nc.ach <- func() { nc.Opts.ClosedCB(nc) } - } - nc.ach <- nc.closeAsyncFunc() - } - nc.status = status - nc.mu.Unlock() -} - -// Close will close the connection to the server. This call will release -// all blocking calls, such as Flush() and NextMsg() -func (nc *Conn) Close() { - nc.close(CLOSED, true) -} - -// IsClosed tests if a Conn has been closed. -func (nc *Conn) IsClosed() bool { - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.isClosed() -} - -// IsReconnecting tests if a Conn is reconnecting. -func (nc *Conn) IsReconnecting() bool { - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.isReconnecting() -} - -// IsConnected tests if a Conn is connected. -func (nc *Conn) IsConnected() bool { - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.isConnected() -} - -// caller must lock -func (nc *Conn) getServers(implicitOnly bool) []string { - poolSize := len(nc.srvPool) - var servers = make([]string, 0) - for i := 0; i < poolSize; i++ { - if implicitOnly && !nc.srvPool[i].isImplicit { - continue - } - url := nc.srvPool[i].url - servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host)) - } - return servers -} - -// Servers returns the list of known server urls, including additional -// servers discovered after a connection has been established. If -// authentication is enabled, use UserInfo or Token when connecting with -// these urls. -func (nc *Conn) Servers() []string { - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.getServers(false) -} - -// DiscoveredServers returns only the server urls that have been discovered -// after a connection has been established. If authentication is enabled, -// use UserInfo or Token when connecting with these urls. -func (nc *Conn) DiscoveredServers() []string { - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.getServers(true) -} - -// Status returns the current state of the connection. -func (nc *Conn) Status() Status { - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.status -} - -// Test if Conn has been closed Lock is assumed held. -func (nc *Conn) isClosed() bool { - return nc.status == CLOSED -} - -// Test if Conn is in the process of connecting -func (nc *Conn) isConnecting() bool { - return nc.status == CONNECTING -} - -// Test if Conn is being reconnected. -func (nc *Conn) isReconnecting() bool { - return nc.status == RECONNECTING -} - -// Test if Conn is connected or connecting. -func (nc *Conn) isConnected() bool { - return nc.status == CONNECTED -} - -// Stats will return a race safe copy of the Statistics section for the connection. -func (nc *Conn) Stats() Statistics { - nc.mu.Lock() - defer nc.mu.Unlock() - stats := nc.Statistics - return stats -} - -// MaxPayload returns the size limit that a message payload can have. -// This is set by the server configuration and delivered to the client -// upon connect. -func (nc *Conn) MaxPayload() int64 { - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.info.MaxPayload -} - -// AuthRequired will return if the connected server requires authorization. -func (nc *Conn) AuthRequired() bool { - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.info.AuthRequired -} - -// TLSRequired will return if the connected server requires TLS connections. -func (nc *Conn) TLSRequired() bool { - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.info.TLSRequired -} diff --git a/vendor/github.com/nats-io/go-nats/nats_test.go b/vendor/github.com/nats-io/go-nats/nats_test.go deleted file mode 100644 index ef68782b3..000000000 --- a/vendor/github.com/nats-io/go-nats/nats_test.go +++ /dev/null @@ -1,1100 +0,0 @@ -package nats - -//////////////////////////////////////////////////////////////////////////////// -// Package scoped specific tests here.. -//////////////////////////////////////////////////////////////////////////////// - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "runtime" - "strings" - "testing" - "time" - - "github.com/nats-io/gnatsd/server" - gnatsd "github.com/nats-io/gnatsd/test" -) - -// Dumb wait program to sync on callbacks, etc... Will timeout -func Wait(ch chan bool) error { - return WaitTime(ch, 5*time.Second) -} - -func WaitTime(ch chan bool, timeout time.Duration) error { - select { - case <-ch: - return nil - case <-time.After(timeout): - } - return errors.New("timeout") -} - -func stackFatalf(t *testing.T, f string, args ...interface{}) { - lines := make([]string, 0, 32) - msg := fmt.Sprintf(f, args...) - lines = append(lines, msg) - - // Generate the Stack of callers: Skip us and verify* frames. - for i := 2; true; i++ { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - msg := fmt.Sprintf("%d - %s:%d", i, file, line) - lines = append(lines, msg) - } - t.Fatalf("%s", strings.Join(lines, "\n")) -} - -//////////////////////////////////////////////////////////////////////////////// -// Reconnect tests -//////////////////////////////////////////////////////////////////////////////// - -const TEST_PORT = 8368 - -var reconnectOpts = Options{ - Url: fmt.Sprintf("nats://localhost:%d", TEST_PORT), - AllowReconnect: true, - MaxReconnect: 10, - ReconnectWait: 100 * time.Millisecond, - Timeout: DefaultTimeout, -} - -func RunServerOnPort(port int) *server.Server { - opts := gnatsd.DefaultTestOptions - opts.Port = port - return RunServerWithOptions(opts) -} - -func RunServerWithOptions(opts server.Options) *server.Server { - return gnatsd.RunServer(&opts) -} - -func TestReconnectServerStats(t *testing.T) { - ts := RunServerOnPort(TEST_PORT) - - opts := reconnectOpts - nc, _ := opts.Connect() - defer nc.Close() - nc.Flush() - - ts.Shutdown() - // server is stopped here... - - ts = RunServerOnPort(TEST_PORT) - defer ts.Shutdown() - - if err := nc.FlushTimeout(5 * time.Second); err != nil { - t.Fatalf("Error on Flush: %v", err) - } - - // Make sure the server who is reconnected has the reconnects stats reset. - nc.mu.Lock() - _, cur := nc.currentServer() - nc.mu.Unlock() - - if cur.reconnects != 0 { - t.Fatalf("Current Server's reconnects should be 0 vs %d\n", cur.reconnects) - } -} - -func TestParseStateReconnectFunctionality(t *testing.T) { - ts := RunServerOnPort(TEST_PORT) - ch := make(chan bool) - - opts := reconnectOpts - dch := make(chan bool) - opts.DisconnectedCB = func(_ *Conn) { - dch <- true - } - - nc, errc := opts.Connect() - if errc != nil { - t.Fatalf("Failed to create a connection: %v\n", errc) - } - ec, errec := NewEncodedConn(nc, DEFAULT_ENCODER) - if errec != nil { - nc.Close() - t.Fatalf("Failed to create an encoded connection: %v\n", errec) - } - defer ec.Close() - - testString := "bar" - ec.Subscribe("foo", func(s string) { - if s != testString { - t.Fatal("String doesn't match") - } - ch <- true - }) - ec.Flush() - - // Got a RACE condition with Travis build. The locking below does not - // really help because the parser running in the readLoop accesses - // nc.ps without the connection lock. Sleeping may help better since - // it would make the memory write in parse.go (when processing the - // pong) further away from the modification below. - time.Sleep(1 * time.Second) - - // Simulate partialState, this needs to be cleared - nc.mu.Lock() - nc.ps.state = OP_PON - nc.mu.Unlock() - - ts.Shutdown() - // server is stopped here... - - if err := Wait(dch); err != nil { - t.Fatal("Did not get the DisconnectedCB") - } - - if err := ec.Publish("foo", testString); err != nil { - t.Fatalf("Failed to publish message: %v\n", err) - } - - ts = RunServerOnPort(TEST_PORT) - defer ts.Shutdown() - - if err := ec.FlushTimeout(5 * time.Second); err != nil { - t.Fatalf("Error on Flush: %v", err) - } - - if err := Wait(ch); err != nil { - t.Fatal("Did not receive our message") - } - - expectedReconnectCount := uint64(1) - reconnectedCount := ec.Conn.Stats().Reconnects - - if reconnectedCount != expectedReconnectCount { - t.Fatalf("Reconnect count incorrect: %d vs %d\n", - reconnectedCount, expectedReconnectCount) - } -} - -//////////////////////////////////////////////////////////////////////////////// -// ServerPool tests -//////////////////////////////////////////////////////////////////////////////// - -var testServers = []string{ - "nats://localhost:1222", - "nats://localhost:1223", - "nats://localhost:1224", - "nats://localhost:1225", - "nats://localhost:1226", - "nats://localhost:1227", - "nats://localhost:1228", -} - -func TestServersRandomize(t *testing.T) { - opts := DefaultOptions - opts.Servers = testServers - nc := &Conn{Opts: opts} - if err := nc.setupServerPool(); err != nil { - t.Fatalf("Problem setting up Server Pool: %v\n", err) - } - // Build []string from srvPool - clientServers := []string{} - for _, s := range nc.srvPool { - clientServers = append(clientServers, s.url.String()) - } - // In theory this could happen.. - if reflect.DeepEqual(testServers, clientServers) { - t.Fatalf("ServerPool list not randomized\n") - } - - // Now test that we do not randomize if proper flag is set. - opts = DefaultOptions - opts.Servers = testServers - opts.NoRandomize = true - nc = &Conn{Opts: opts} - if err := nc.setupServerPool(); err != nil { - t.Fatalf("Problem setting up Server Pool: %v\n", err) - } - // Build []string from srvPool - clientServers = []string{} - for _, s := range nc.srvPool { - clientServers = append(clientServers, s.url.String()) - } - if !reflect.DeepEqual(testServers, clientServers) { - t.Fatalf("ServerPool list should not be randomized\n") - } - - // Although the original intent was that if Opts.Url is - // set, Opts.Servers is not (and vice versa), the behavior - // is that Opts.Url is always first, even when randomization - // is enabled. So make sure that this is still the case. - opts = DefaultOptions - opts.Url = DefaultURL - opts.Servers = testServers - nc = &Conn{Opts: opts} - if err := nc.setupServerPool(); err != nil { - t.Fatalf("Problem setting up Server Pool: %v\n", err) - } - // Build []string from srvPool - clientServers = []string{} - for _, s := range nc.srvPool { - clientServers = append(clientServers, s.url.String()) - } - // In theory this could happen.. - if reflect.DeepEqual(testServers, clientServers) { - t.Fatalf("ServerPool list not randomized\n") - } - if clientServers[0] != DefaultURL { - t.Fatalf("Options.Url should be first in the array, got %v", clientServers[0]) - } -} - -func TestSelectNextServer(t *testing.T) { - opts := DefaultOptions - opts.Servers = testServers - opts.NoRandomize = true - nc := &Conn{Opts: opts} - if err := nc.setupServerPool(); err != nil { - t.Fatalf("Problem setting up Server Pool: %v\n", err) - } - if nc.url != nc.srvPool[0].url { - t.Fatalf("Wrong default selection: %v\n", nc.url) - } - - sel, err := nc.selectNextServer() - if err != nil { - t.Fatalf("Got an err: %v\n", err) - } - // Check that we are now looking at #2, and current is now last. - if len(nc.srvPool) != len(testServers) { - t.Fatalf("List is incorrect size: %d vs %d\n", len(nc.srvPool), len(testServers)) - } - if nc.url.String() != testServers[1] { - t.Fatalf("Selection incorrect: %v vs %v\n", nc.url, testServers[1]) - } - if nc.srvPool[len(nc.srvPool)-1].url.String() != testServers[0] { - t.Fatalf("Did not push old to last position\n") - } - if sel != nc.srvPool[0] { - t.Fatalf("Did not return correct server: %v vs %v\n", sel.url, nc.srvPool[0].url) - } - - // Test that we do not keep servers where we have tried to reconnect past our limit. - nc.srvPool[0].reconnects = int(opts.MaxReconnect) - if _, err := nc.selectNextServer(); err != nil { - t.Fatalf("Got an err: %v\n", err) - } - // Check that we are now looking at #3, and current is not in the list. - if len(nc.srvPool) != len(testServers)-1 { - t.Fatalf("List is incorrect size: %d vs %d\n", len(nc.srvPool), len(testServers)-1) - } - if nc.url.String() != testServers[2] { - t.Fatalf("Selection incorrect: %v vs %v\n", nc.url, testServers[2]) - } - if nc.srvPool[len(nc.srvPool)-1].url.String() == testServers[1] { - t.Fatalf("Did not throw away the last server correctly\n") - } -} - -// This will test that comma separated url strings work properly for -// the Connect() command. -func TestUrlArgument(t *testing.T) { - check := func(url string, expected []string) { - if !reflect.DeepEqual(processUrlString(url), expected) { - t.Fatalf("Got wrong response processing URL: %q, RES: %#v\n", url, processUrlString(url)) - } - } - // This is normal case - oneExpected := []string{"nats://localhost:1222"} - - check("nats://localhost:1222", oneExpected) - check("nats://localhost:1222 ", oneExpected) - check(" nats://localhost:1222", oneExpected) - check(" nats://localhost:1222 ", oneExpected) - - var multiExpected = []string{ - "nats://localhost:1222", - "nats://localhost:1223", - "nats://localhost:1224", - } - - check("nats://localhost:1222,nats://localhost:1223,nats://localhost:1224", multiExpected) - check("nats://localhost:1222, nats://localhost:1223, nats://localhost:1224", multiExpected) - check(" nats://localhost:1222, nats://localhost:1223, nats://localhost:1224 ", multiExpected) - check("nats://localhost:1222, nats://localhost:1223 ,nats://localhost:1224", multiExpected) -} - -func TestParserPing(t *testing.T) { - c := &Conn{} - fake := &bytes.Buffer{} - c.bw = bufio.NewWriterSize(fake, c.Opts.ReconnectBufSize) - - c.ps = &parseState{} - - if c.ps.state != OP_START { - t.Fatalf("Expected OP_START vs %d\n", c.ps.state) - } - ping := []byte("PING\r\n") - err := c.parse(ping[:1]) - if err != nil || c.ps.state != OP_P { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(ping[1:2]) - if err != nil || c.ps.state != OP_PI { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(ping[2:3]) - if err != nil || c.ps.state != OP_PIN { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(ping[3:4]) - if err != nil || c.ps.state != OP_PING { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(ping[4:5]) - if err != nil || c.ps.state != OP_PING { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(ping[5:6]) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(ping) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - // Should tolerate spaces - ping = []byte("PING \r") - err = c.parse(ping) - if err != nil || c.ps.state != OP_PING { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - c.ps.state = OP_START - ping = []byte("PING \r \n") - err = c.parse(ping) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } -} - -func TestParserErr(t *testing.T) { - c := &Conn{} - c.status = CLOSED - fake := &bytes.Buffer{} - c.bw = bufio.NewWriterSize(fake, c.Opts.ReconnectBufSize) - - c.ps = &parseState{} - - // This test focuses on the parser only, not how the error is - // actually processed by the upper layer. - - if c.ps.state != OP_START { - t.Fatalf("Expected OP_START vs %d\n", c.ps.state) - } - - expectedError := "'Any kind of error'" - errProto := []byte("-ERR " + expectedError + "\r\n") - err := c.parse(errProto[:1]) - if err != nil || c.ps.state != OP_MINUS { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(errProto[1:2]) - if err != nil || c.ps.state != OP_MINUS_E { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(errProto[2:3]) - if err != nil || c.ps.state != OP_MINUS_ER { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(errProto[3:4]) - if err != nil || c.ps.state != OP_MINUS_ERR { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(errProto[4:5]) - if err != nil || c.ps.state != OP_MINUS_ERR_SPC { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(errProto[5:6]) - if err != nil || c.ps.state != OP_MINUS_ERR_SPC { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - - // Check with split arg buffer - err = c.parse(errProto[6:7]) - if err != nil || c.ps.state != MINUS_ERR_ARG { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(errProto[7:10]) - if err != nil || c.ps.state != MINUS_ERR_ARG { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(errProto[10 : len(errProto)-2]) - if err != nil || c.ps.state != MINUS_ERR_ARG { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - if c.ps.argBuf == nil { - t.Fatal("ArgBuf should not be nil") - } - s := string(c.ps.argBuf) - if s != expectedError { - t.Fatalf("Expected %v, got %v", expectedError, s) - } - err = c.parse(errProto[len(errProto)-2:]) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - - // Check without split arg buffer - errProto = []byte("-ERR 'Any error'\r\n") - err = c.parse(errProto) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } -} - -func TestParserOK(t *testing.T) { - c := &Conn{} - c.ps = &parseState{} - - if c.ps.state != OP_START { - t.Fatalf("Expected OP_START vs %d\n", c.ps.state) - } - errProto := []byte("+OKay\r\n") - err := c.parse(errProto[:1]) - if err != nil || c.ps.state != OP_PLUS { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(errProto[1:2]) - if err != nil || c.ps.state != OP_PLUS_O { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(errProto[2:3]) - if err != nil || c.ps.state != OP_PLUS_OK { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(errProto[3:]) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } -} - -func TestParserShouldFail(t *testing.T) { - c := &Conn{} - c.ps = &parseState{} - - if err := c.parse([]byte(" PING")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("POO")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("Px")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("PIx")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("PINx")); err == nil { - t.Fatal("Should have received a parse error") - } - // Stop here because 'PING' protos are tolerant for anything between PING and \n - - c.ps.state = OP_START - if err := c.parse([]byte("POx")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("PONx")); err == nil { - t.Fatal("Should have received a parse error") - } - // Stop here because 'PONG' protos are tolerant for anything between PONG and \n - - c.ps.state = OP_START - if err := c.parse([]byte("ZOO")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("Mx\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("MSx\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("MSGx\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("MSG foo\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("MSG \r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("MSG foo 1\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("MSG foo bar 1\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("MSG foo bar 1 baz\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("MSG foo 1 bar baz\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("+x\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("+Ox\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("-x\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("-Ex\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("-ERx\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } - c.ps.state = OP_START - if err := c.parse([]byte("-ERRx\r\n")); err == nil { - t.Fatal("Should have received a parse error") - } -} - -func TestParserSplitMsg(t *testing.T) { - - nc := &Conn{} - nc.ps = &parseState{} - - buf := []byte("MSG a\r\n") - err := nc.parse(buf) - if err == nil { - t.Fatal("Expected an error") - } - nc.ps = &parseState{} - - buf = []byte("MSG a b c\r\n") - err = nc.parse(buf) - if err == nil { - t.Fatal("Expected an error") - } - nc.ps = &parseState{} - - expectedCount := uint64(1) - expectedSize := uint64(3) - - buf = []byte("MSG a") - err = nc.parse(buf) - if err != nil { - t.Fatalf("Parser error: %v", err) - } - if nc.ps.argBuf == nil { - t.Fatal("Arg buffer should have been created") - } - - buf = []byte(" 1 3\r\nf") - err = nc.parse(buf) - if err != nil { - t.Fatalf("Parser error: %v", err) - } - if nc.ps.ma.size != 3 { - t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size) - } - if nc.ps.ma.sid != 1 { - t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) - } - if string(nc.ps.ma.subject) != "a" { - t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) - } - if nc.ps.msgBuf == nil { - t.Fatal("Msg buffer should have been created") - } - - buf = []byte("oo\r\n") - err = nc.parse(buf) - if err != nil { - t.Fatalf("Parser error: %v", err) - } - if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { - t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) - } - if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { - t.Fatal("Buffers should be nil now") - } - - buf = []byte("MSG a 1 3\r\nfo") - err = nc.parse(buf) - if err != nil { - t.Fatalf("Parser error: %v", err) - } - if nc.ps.ma.size != 3 { - t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size) - } - if nc.ps.ma.sid != 1 { - t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) - } - if string(nc.ps.ma.subject) != "a" { - t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) - } - if nc.ps.argBuf == nil { - t.Fatal("Arg buffer should have been created") - } - if nc.ps.msgBuf == nil { - t.Fatal("Msg buffer should have been created") - } - - expectedCount++ - expectedSize += 3 - - buf = []byte("o\r\n") - err = nc.parse(buf) - if err != nil { - t.Fatalf("Parser error: %v", err) - } - if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { - t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) - } - if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { - t.Fatal("Buffers should be nil now") - } - - buf = []byte("MSG a 1 6\r\nfo") - err = nc.parse(buf) - if err != nil { - t.Fatalf("Parser error: %v", err) - } - if nc.ps.ma.size != 6 { - t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size) - } - if nc.ps.ma.sid != 1 { - t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) - } - if string(nc.ps.ma.subject) != "a" { - t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) - } - if nc.ps.argBuf == nil { - t.Fatal("Arg buffer should have been created") - } - if nc.ps.msgBuf == nil { - t.Fatal("Msg buffer should have been created") - } - - buf = []byte("ob") - err = nc.parse(buf) - if err != nil { - t.Fatalf("Parser error: %v", err) - } - - expectedCount++ - expectedSize += 6 - - buf = []byte("ar\r\n") - err = nc.parse(buf) - if err != nil { - t.Fatalf("Parser error: %v", err) - } - if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { - t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) - } - if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { - t.Fatal("Buffers should be nil now") - } - - // Let's have a msg that is bigger than the parser's scratch size. - // Since we prepopulate the msg with 'foo', adding 3 to the size. - msgSize := cap(nc.ps.scratch) + 100 + 3 - buf = []byte(fmt.Sprintf("MSG a 1 b %d\r\nfoo", msgSize)) - err = nc.parse(buf) - if err != nil { - t.Fatalf("Parser error: %v", err) - } - if nc.ps.ma.size != msgSize { - t.Fatalf("Wrong msg size: %d instead of %d", nc.ps.ma.size, msgSize) - } - if nc.ps.ma.sid != 1 { - t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) - } - if string(nc.ps.ma.subject) != "a" { - t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) - } - if string(nc.ps.ma.reply) != "b" { - t.Fatalf("Wrong reply: '%s' instead of 'b'", string(nc.ps.ma.reply)) - } - if nc.ps.argBuf == nil { - t.Fatal("Arg buffer should have been created") - } - if nc.ps.msgBuf == nil { - t.Fatal("Msg buffer should have been created") - } - - expectedCount++ - expectedSize += uint64(msgSize) - - bufSize := msgSize - 3 - - buf = make([]byte, bufSize) - for i := 0; i < bufSize; i++ { - buf[i] = byte('a' + (i % 26)) - } - - err = nc.parse(buf) - if err != nil { - t.Fatalf("Parser error: %v", err) - } - if nc.ps.state != MSG_PAYLOAD { - t.Fatalf("Wrong state: %v instead of %v", nc.ps.state, MSG_PAYLOAD) - } - if nc.ps.ma.size != msgSize { - t.Fatalf("Wrong (ma) msg size: %d instead of %d", nc.ps.ma.size, msgSize) - } - if len(nc.ps.msgBuf) != msgSize { - t.Fatalf("Wrong msg size: %d instead of %d", len(nc.ps.msgBuf), msgSize) - } - // Check content: - if string(nc.ps.msgBuf[0:3]) != "foo" { - t.Fatalf("Wrong msg content: %s", string(nc.ps.msgBuf)) - } - for k := 3; k < nc.ps.ma.size; k++ { - if nc.ps.msgBuf[k] != byte('a'+((k-3)%26)) { - t.Fatalf("Wrong msg content: %s", string(nc.ps.msgBuf)) - } - } - - buf = []byte("\r\n") - if err := nc.parse(buf); err != nil { - t.Fatalf("Unexpected error during parsing: %v", err) - } - if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { - t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) - } - if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { - t.Fatal("Buffers should be nil now") - } - if nc.ps.state != OP_START { - t.Fatalf("Wrong state: %v", nc.ps.state) - } -} - -func TestNormalizeError(t *testing.T) { - received := "Typical Error" - expected := strings.ToLower(received) - if s := normalizeErr("-ERR '" + received + "'"); s != expected { - t.Fatalf("Expected '%s', got '%s'", expected, s) - } - - received = "Trim Surrounding Spaces" - expected = strings.ToLower(received) - if s := normalizeErr("-ERR '" + received + "' "); s != expected { - t.Fatalf("Expected '%s', got '%s'", expected, s) - } - - received = "Trim Surrounding Spaces Without Quotes" - expected = strings.ToLower(received) - if s := normalizeErr("-ERR " + received + " "); s != expected { - t.Fatalf("Expected '%s', got '%s'", expected, s) - } - - received = "Error Without Quotes" - expected = strings.ToLower(received) - if s := normalizeErr("-ERR " + received); s != expected { - t.Fatalf("Expected '%s', got '%s'", expected, s) - } - - received = "Error With Quote Only On Left" - expected = strings.ToLower(received) - if s := normalizeErr("-ERR '" + received); s != expected { - t.Fatalf("Expected '%s', got '%s'", expected, s) - } - - received = "Error With Quote Only On Right" - expected = strings.ToLower(received) - if s := normalizeErr("-ERR " + received + "'"); s != expected { - t.Fatalf("Expected '%s', got '%s'", expected, s) - } -} - -func TestAsyncINFO(t *testing.T) { - opts := DefaultOptions - c := &Conn{Opts: opts} - - c.ps = &parseState{} - - if c.ps.state != OP_START { - t.Fatalf("Expected OP_START vs %d\n", c.ps.state) - } - - info := []byte("INFO {}\r\n") - if c.ps.state != OP_START { - t.Fatalf("Expected OP_START vs %d\n", c.ps.state) - } - err := c.parse(info[:1]) - if err != nil || c.ps.state != OP_I { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(info[1:2]) - if err != nil || c.ps.state != OP_IN { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(info[2:3]) - if err != nil || c.ps.state != OP_INF { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(info[3:4]) - if err != nil || c.ps.state != OP_INFO { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(info[4:5]) - if err != nil || c.ps.state != OP_INFO_SPC { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - err = c.parse(info[5:]) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - - // All at once - err = c.parse(info) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - - // Server pool needs to be setup - c.setupServerPool() - - // Partials requiring argBuf - expectedServer := serverInfo{ - Id: "test", - Host: "localhost", - Port: 4222, - Version: "1.2.3", - AuthRequired: true, - TLSRequired: true, - MaxPayload: 2 * 1024 * 1024, - ConnectURLs: []string{"localhost:5222", "localhost:6222"}, - } - b, _ := json.Marshal(expectedServer) - info = []byte(fmt.Sprintf("INFO %s\r\n", b)) - if c.ps.state != OP_START { - t.Fatalf("Expected OP_START vs %d\n", c.ps.state) - } - err = c.parse(info[:9]) - if err != nil || c.ps.state != INFO_ARG || c.ps.argBuf == nil { - t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf) - } - err = c.parse(info[9:11]) - if err != nil || c.ps.state != INFO_ARG || c.ps.argBuf == nil { - t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf) - } - err = c.parse(info[11:]) - if err != nil || c.ps.state != OP_START || c.ps.argBuf != nil { - t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf) - } - if !reflect.DeepEqual(c.info, expectedServer) { - t.Fatalf("Expected server info to be: %v, got: %v", expectedServer, c.info) - } - - // Good INFOs - good := []string{"INFO {}\r\n", "INFO {}\r\n", "INFO {} \r\n", "INFO { \"server_id\": \"test\" } \r\n", "INFO {\"connect_urls\":[]}\r\n"} - for _, gi := range good { - c.ps = &parseState{} - err = c.parse([]byte(gi)) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Protocol %q should be fine. Err=%v state=%v", gi, err, c.ps.state) - } - } - - // Wrong INFOs - wrong := []string{"IxNFO {}\r\n", "INxFO {}\r\n", "INFxO {}\r\n", "INFOx {}\r\n", "INFO{}\r\n", "INFO {}"} - for _, wi := range wrong { - c.ps = &parseState{} - err = c.parse([]byte(wi)) - if err == nil && c.ps.state == OP_START { - t.Fatalf("Protocol %q should have failed", wi) - } - } - - checkPool := func(urls ...string) { - // Check both pool and urls map - if len(c.srvPool) != len(urls) { - stackFatalf(t, "Pool should have %d elements, has %d", len(urls), len(c.srvPool)) - } - if len(c.urls) != len(urls) { - stackFatalf(t, "Map should have %d elements, has %d", len(urls), len(c.urls)) - } - for i, url := range urls { - if c.Opts.NoRandomize { - if c.srvPool[i].url.Host != url { - stackFatalf(t, "Pool should have %q at index %q, has %q", url, i, c.srvPool[i].url.Host) - } - } else { - if _, present := c.urls[url]; !present { - stackFatalf(t, "Pool should have %q", url) - } - } - } - } - - // Now test the decoding of "connect_urls" - - // No randomize for now - c.Opts.NoRandomize = true - // Reset the pool - c.setupServerPool() - // Reinitialize the parser - c.ps = &parseState{} - - info = []byte("INFO {\"connect_urls\":[\"localhost:5222\"]}\r\n") - err = c.parse(info) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - // Pool now should contain localhost:4222 (the default URL) and localhost:5222 - checkPool("localhost:4222", "localhost:5222") - - // Make sure that if client receives the same, it is not added again. - err = c.parse(info) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - // Pool should still contain localhost:4222 (the default URL) and localhost:5222 - checkPool("localhost:4222", "localhost:5222") - - // Receive a new URL - info = []byte("INFO {\"connect_urls\":[\"localhost:6222\"]}\r\n") - err = c.parse(info) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - // Pool now should contain localhost:4222 (the default URL) localhost:5222 and localhost:6222 - checkPool("localhost:4222", "localhost:5222", "localhost:6222") - - // Receive more than 1 URL at once - info = []byte("INFO {\"connect_urls\":[\"localhost:7222\", \"localhost:8222\"]}\r\n") - err = c.parse(info) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - // Pool now should contain localhost:4222 (the default URL) localhost:5222, localhost:6222 - // localhost:7222 and localhost:8222 - checkPool("localhost:4222", "localhost:5222", "localhost:6222", "localhost:7222", "localhost:8222") - - // Test with pool randomization now - c.Opts.NoRandomize = false - c.setupServerPool() - - info = []byte("INFO {\"connect_urls\":[\"localhost:5222\"]}\r\n") - err = c.parse(info) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - // Pool now should contain localhost:4222 (the default URL) and localhost:5222 - checkPool("localhost:4222", "localhost:5222") - - // Make sure that if client receives the same, it is not added again. - err = c.parse(info) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - // Pool should still contain localhost:4222 (the default URL) and localhost:5222 - checkPool("localhost:4222", "localhost:5222") - - // Receive a new URL - info = []byte("INFO {\"connect_urls\":[\"localhost:6222\"]}\r\n") - err = c.parse(info) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - // Pool now should contain localhost:4222 (the default URL) localhost:5222 and localhost:6222 - checkPool("localhost:4222", "localhost:5222", "localhost:6222") - - // Receive more than 1 URL at once - info = []byte("INFO {\"connect_urls\":[\"localhost:7222\", \"localhost:8222\"]}\r\n") - err = c.parse(info) - if err != nil || c.ps.state != OP_START { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - // Pool now should contain localhost:4222 (the default URL) localhost:5222, localhost:6222 - // localhost:7222 and localhost:8222 - checkPool("localhost:4222", "localhost:5222", "localhost:6222", "localhost:7222", "localhost:8222") - - // Finally, check that the pool should be randomized. - allUrls := []string{"localhost:4222", "localhost:5222", "localhost:6222", "localhost:7222", "localhost:8222"} - same := 0 - for i, url := range c.srvPool { - if url.url.Host == allUrls[i] { - same++ - } - } - if same == len(allUrls) { - t.Fatal("Pool does not seem to be randomized") - } -} - -func TestConnServers(t *testing.T) { - opts := DefaultOptions - c := &Conn{Opts: opts} - c.ps = &parseState{} - c.setupServerPool() - - validateURLs := func(serverUrls []string, expectedUrls ...string) { - var found bool - if len(serverUrls) != len(expectedUrls) { - stackFatalf(t, "Array should have %d elements, has %d", len(expectedUrls), len(serverUrls)) - } - - for _, ev := range expectedUrls { - found = false - for _, av := range serverUrls { - if ev == av { - found = true - break - } - } - if !found { - stackFatalf(t, "array is missing %q in %v", ev, serverUrls) - } - } - } - - // check the default url - validateURLs(c.Servers(), "nats://localhost:4222") - if len(c.DiscoveredServers()) != 0 { - t.Fatalf("Expected no discovered servers") - } - - // Add a new URL - err := c.parse([]byte("INFO {\"connect_urls\":[\"localhost:5222\"]}\r\n")) - if err != nil { - t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) - } - // Server list should now contain both the default and the new url. - validateURLs(c.Servers(), "nats://localhost:4222", "nats://localhost:5222") - // Discovered servers should only contain the new url. - validateURLs(c.DiscoveredServers(), "nats://localhost:5222") - - // verify user credentials are stripped out. - opts.Servers = []string{"nats://user:pass@localhost:4333", "nats://token@localhost:4444"} - c = &Conn{Opts: opts} - c.ps = &parseState{} - c.setupServerPool() - - validateURLs(c.Servers(), "nats://localhost:4333", "nats://localhost:4444") -} diff --git a/vendor/github.com/nats-io/go-nats/netchan.go b/vendor/github.com/nats-io/go-nats/netchan.go deleted file mode 100644 index 337674e04..000000000 --- a/vendor/github.com/nats-io/go-nats/netchan.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2013-2014 Apcera Inc. All rights reserved. - -package nats - -import ( - "errors" - "reflect" -) - -// This allows the functionality for network channels by binding send and receive Go chans -// to subjects and optionally queue groups. -// Data will be encoded and decoded via the EncodedConn and its associated encoders. - -// BindSendChan binds a channel for send operations to NATS. -func (c *EncodedConn) BindSendChan(subject string, channel interface{}) error { - chVal := reflect.ValueOf(channel) - if chVal.Kind() != reflect.Chan { - return ErrChanArg - } - go chPublish(c, chVal, subject) - return nil -} - -// Publish all values that arrive on the channel until it is closed or we -// encounter an error. -func chPublish(c *EncodedConn, chVal reflect.Value, subject string) { - for { - val, ok := chVal.Recv() - if !ok { - // Channel has most likely been closed. - return - } - if e := c.Publish(subject, val.Interface()); e != nil { - // Do this under lock. - c.Conn.mu.Lock() - defer c.Conn.mu.Unlock() - - if c.Conn.Opts.AsyncErrorCB != nil { - // FIXME(dlc) - Not sure this is the right thing to do. - // FIXME(ivan) - If the connection is not yet closed, try to schedule the callback - if c.Conn.isClosed() { - go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) - } else { - c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) } - } - } - return - } - } -} - -// BindRecvChan binds a channel for receive operations from NATS. -func (c *EncodedConn) BindRecvChan(subject string, channel interface{}) (*Subscription, error) { - return c.bindRecvChan(subject, _EMPTY_, channel) -} - -// BindRecvQueueChan binds a channel for queue-based receive operations from NATS. -func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel interface{}) (*Subscription, error) { - return c.bindRecvChan(subject, queue, channel) -} - -// Internal function to bind receive operations for a channel. -func (c *EncodedConn) bindRecvChan(subject, queue string, channel interface{}) (*Subscription, error) { - chVal := reflect.ValueOf(channel) - if chVal.Kind() != reflect.Chan { - return nil, ErrChanArg - } - argType := chVal.Type().Elem() - - cb := func(m *Msg) { - var oPtr reflect.Value - if argType.Kind() != reflect.Ptr { - oPtr = reflect.New(argType) - } else { - oPtr = reflect.New(argType.Elem()) - } - if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { - c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error()) - if c.Conn.Opts.AsyncErrorCB != nil { - c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) } - } - return - } - if argType.Kind() != reflect.Ptr { - oPtr = reflect.Indirect(oPtr) - } - // This is a bit hacky, but in this instance we may be trying to send to a closed channel. - // and the user does not know when it is safe to close the channel. - defer func() { - // If we have panicked, recover and close the subscription. - if r := recover(); r != nil { - m.Sub.Unsubscribe() - } - }() - // Actually do the send to the channel. - chVal.Send(oPtr) - } - - return c.Conn.subscribe(subject, queue, cb, nil) -} diff --git a/vendor/github.com/nats-io/go-nats/parser.go b/vendor/github.com/nats-io/go-nats/parser.go deleted file mode 100644 index 0911954a1..000000000 --- a/vendor/github.com/nats-io/go-nats/parser.go +++ /dev/null @@ -1,470 +0,0 @@ -// Copyright 2012-2014 Apcera Inc. All rights reserved. - -package nats - -import ( - "fmt" -) - -type msgArg struct { - subject []byte - reply []byte - sid int64 - size int -} - -const MAX_CONTROL_LINE_SIZE = 1024 - -type parseState struct { - state int - as int - drop int - ma msgArg - argBuf []byte - msgBuf []byte - scratch [MAX_CONTROL_LINE_SIZE]byte -} - -const ( - OP_START = iota - OP_PLUS - OP_PLUS_O - OP_PLUS_OK - OP_MINUS - OP_MINUS_E - OP_MINUS_ER - OP_MINUS_ERR - OP_MINUS_ERR_SPC - MINUS_ERR_ARG - OP_M - OP_MS - OP_MSG - OP_MSG_SPC - MSG_ARG - MSG_PAYLOAD - MSG_END - OP_P - OP_PI - OP_PIN - OP_PING - OP_PO - OP_PON - OP_PONG - OP_I - OP_IN - OP_INF - OP_INFO - OP_INFO_SPC - INFO_ARG -) - -// parse is the fast protocol parser engine. -func (nc *Conn) parse(buf []byte) error { - var i int - var b byte - - // Move to loop instead of range syntax to allow jumping of i - for i = 0; i < len(buf); i++ { - b = buf[i] - - switch nc.ps.state { - case OP_START: - switch b { - case 'M', 'm': - nc.ps.state = OP_M - case 'P', 'p': - nc.ps.state = OP_P - case '+': - nc.ps.state = OP_PLUS - case '-': - nc.ps.state = OP_MINUS - case 'I', 'i': - nc.ps.state = OP_I - default: - goto parseErr - } - case OP_M: - switch b { - case 'S', 's': - nc.ps.state = OP_MS - default: - goto parseErr - } - case OP_MS: - switch b { - case 'G', 'g': - nc.ps.state = OP_MSG - default: - goto parseErr - } - case OP_MSG: - switch b { - case ' ', '\t': - nc.ps.state = OP_MSG_SPC - default: - goto parseErr - } - case OP_MSG_SPC: - switch b { - case ' ', '\t': - continue - default: - nc.ps.state = MSG_ARG - nc.ps.as = i - } - case MSG_ARG: - switch b { - case '\r': - nc.ps.drop = 1 - case '\n': - var arg []byte - if nc.ps.argBuf != nil { - arg = nc.ps.argBuf - } else { - arg = buf[nc.ps.as : i-nc.ps.drop] - } - if err := nc.processMsgArgs(arg); err != nil { - return err - } - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD - - // jump ahead with the index. If this overruns - // what is left we fall out and process split - // buffer. - i = nc.ps.as + nc.ps.ma.size - 1 - default: - if nc.ps.argBuf != nil { - nc.ps.argBuf = append(nc.ps.argBuf, b) - } - } - case MSG_PAYLOAD: - if nc.ps.msgBuf != nil { - if len(nc.ps.msgBuf) >= nc.ps.ma.size { - nc.processMsg(nc.ps.msgBuf) - nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END - } else { - // copy as much as we can to the buffer and skip ahead. - toCopy := nc.ps.ma.size - len(nc.ps.msgBuf) - avail := len(buf) - i - - if avail < toCopy { - toCopy = avail - } - - if toCopy > 0 { - start := len(nc.ps.msgBuf) - // This is needed for copy to work. - nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy] - copy(nc.ps.msgBuf[start:], buf[i:i+toCopy]) - // Update our index - i = (i + toCopy) - 1 - } else { - nc.ps.msgBuf = append(nc.ps.msgBuf, b) - } - } - } else if i-nc.ps.as >= nc.ps.ma.size { - nc.processMsg(buf[nc.ps.as:i]) - nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END - } - case MSG_END: - switch b { - case '\n': - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START - default: - continue - } - case OP_PLUS: - switch b { - case 'O', 'o': - nc.ps.state = OP_PLUS_O - default: - goto parseErr - } - case OP_PLUS_O: - switch b { - case 'K', 'k': - nc.ps.state = OP_PLUS_OK - default: - goto parseErr - } - case OP_PLUS_OK: - switch b { - case '\n': - nc.processOK() - nc.ps.drop, nc.ps.state = 0, OP_START - } - case OP_MINUS: - switch b { - case 'E', 'e': - nc.ps.state = OP_MINUS_E - default: - goto parseErr - } - case OP_MINUS_E: - switch b { - case 'R', 'r': - nc.ps.state = OP_MINUS_ER - default: - goto parseErr - } - case OP_MINUS_ER: - switch b { - case 'R', 'r': - nc.ps.state = OP_MINUS_ERR - default: - goto parseErr - } - case OP_MINUS_ERR: - switch b { - case ' ', '\t': - nc.ps.state = OP_MINUS_ERR_SPC - default: - goto parseErr - } - case OP_MINUS_ERR_SPC: - switch b { - case ' ', '\t': - continue - default: - nc.ps.state = MINUS_ERR_ARG - nc.ps.as = i - } - case MINUS_ERR_ARG: - switch b { - case '\r': - nc.ps.drop = 1 - case '\n': - var arg []byte - if nc.ps.argBuf != nil { - arg = nc.ps.argBuf - nc.ps.argBuf = nil - } else { - arg = buf[nc.ps.as : i-nc.ps.drop] - } - nc.processErr(string(arg)) - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START - default: - if nc.ps.argBuf != nil { - nc.ps.argBuf = append(nc.ps.argBuf, b) - } - } - case OP_P: - switch b { - case 'I', 'i': - nc.ps.state = OP_PI - case 'O', 'o': - nc.ps.state = OP_PO - default: - goto parseErr - } - case OP_PO: - switch b { - case 'N', 'n': - nc.ps.state = OP_PON - default: - goto parseErr - } - case OP_PON: - switch b { - case 'G', 'g': - nc.ps.state = OP_PONG - default: - goto parseErr - } - case OP_PONG: - switch b { - case '\n': - nc.processPong() - nc.ps.drop, nc.ps.state = 0, OP_START - } - case OP_PI: - switch b { - case 'N', 'n': - nc.ps.state = OP_PIN - default: - goto parseErr - } - case OP_PIN: - switch b { - case 'G', 'g': - nc.ps.state = OP_PING - default: - goto parseErr - } - case OP_PING: - switch b { - case '\n': - nc.processPing() - nc.ps.drop, nc.ps.state = 0, OP_START - } - case OP_I: - switch b { - case 'N', 'n': - nc.ps.state = OP_IN - default: - goto parseErr - } - case OP_IN: - switch b { - case 'F', 'f': - nc.ps.state = OP_INF - default: - goto parseErr - } - case OP_INF: - switch b { - case 'O', 'o': - nc.ps.state = OP_INFO - default: - goto parseErr - } - case OP_INFO: - switch b { - case ' ', '\t': - nc.ps.state = OP_INFO_SPC - default: - goto parseErr - } - case OP_INFO_SPC: - switch b { - case ' ', '\t': - continue - default: - nc.ps.state = INFO_ARG - nc.ps.as = i - } - case INFO_ARG: - switch b { - case '\r': - nc.ps.drop = 1 - case '\n': - var arg []byte - if nc.ps.argBuf != nil { - arg = nc.ps.argBuf - nc.ps.argBuf = nil - } else { - arg = buf[nc.ps.as : i-nc.ps.drop] - } - nc.processAsyncInfo(arg) - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START - default: - if nc.ps.argBuf != nil { - nc.ps.argBuf = append(nc.ps.argBuf, b) - } - } - default: - goto parseErr - } - } - // Check for split buffer scenarios - if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil { - nc.ps.argBuf = nc.ps.scratch[:0] - nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...) - // FIXME, check max len - } - // Check for split msg - if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil { - // We need to clone the msgArg if it is still referencing the - // read buffer and we are not able to process the msg. - if nc.ps.argBuf == nil { - nc.cloneMsgArg() - } - - // If we will overflow the scratch buffer, just create a - // new buffer to hold the split message. - if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) { - lrem := len(buf[nc.ps.as:]) - - nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size) - copy(nc.ps.msgBuf, buf[nc.ps.as:]) - } else { - nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)] - nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...) - } - } - - return nil - -parseErr: - return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:]) -} - -// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but -// we need to hold onto it into the next read. -func (nc *Conn) cloneMsgArg() { - nc.ps.argBuf = nc.ps.scratch[:0] - nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...) - nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...) - nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)] - if nc.ps.ma.reply != nil { - nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):] - } -} - -const argsLenMax = 4 - -func (nc *Conn) processMsgArgs(arg []byte) error { - // Unroll splitArgs to avoid runtime/heap issues - a := [argsLenMax][]byte{} - args := a[:0] - start := -1 - for i, b := range arg { - switch b { - case ' ', '\t', '\r', '\n': - if start >= 0 { - args = append(args, arg[start:i]) - start = -1 - } - default: - if start < 0 { - start = i - } - } - } - if start >= 0 { - args = append(args, arg[start:]) - } - - switch len(args) { - case 3: - nc.ps.ma.subject = args[0] - nc.ps.ma.sid = parseInt64(args[1]) - nc.ps.ma.reply = nil - nc.ps.ma.size = int(parseInt64(args[2])) - case 4: - nc.ps.ma.subject = args[0] - nc.ps.ma.sid = parseInt64(args[1]) - nc.ps.ma.reply = args[2] - nc.ps.ma.size = int(parseInt64(args[3])) - default: - return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg) - } - if nc.ps.ma.sid < 0 { - return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg) - } - if nc.ps.ma.size < 0 { - return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg) - } - return nil -} - -// Ascii numbers 0-9 -const ( - ascii_0 = 48 - ascii_9 = 57 -) - -// parseInt64 expects decimal positive numbers. We -// return -1 to signal error -func parseInt64(d []byte) (n int64) { - if len(d) == 0 { - return -1 - } - for _, dec := range d { - if dec < ascii_0 || dec > ascii_9 { - return -1 - } - n = n*10 + (int64(dec) - ascii_0) - } - return n -} diff --git a/vendor/github.com/nats-io/go-nats/scripts/cov.sh b/vendor/github.com/nats-io/go-nats/scripts/cov.sh deleted file mode 100755 index 437b8d492..000000000 --- a/vendor/github.com/nats-io/go-nats/scripts/cov.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -e -# Run from directory above via ./scripts/cov.sh - -rm -rf ./cov -mkdir cov -go test -v -covermode=atomic -coverprofile=./cov/nats.out -go test -v -covermode=atomic -coverprofile=./cov/builtin.out ./encoders/builtin -go test -v -covermode=atomic -coverprofile=./cov/protobuf.out ./encoders/protobuf -go test -v -covermode=atomic -coverprofile=./cov/test.out -coverpkg=github.com/nats-io/go-nats ./test -gocovmerge ./cov/*.out > acc.out -rm -rf ./cov - -# If we have an arg, assume travis run and push to coveralls. Otherwise launch browser results -if [[ -n $1 ]]; then - $HOME/gopath/bin/goveralls -coverprofile=acc.out -service travis-ci - rm -rf ./acc.out -else - go tool cover -html=acc.out -fi diff --git a/vendor/github.com/nats-io/go-nats/test/auth_test.go b/vendor/github.com/nats-io/go-nats/test/auth_test.go deleted file mode 100644 index d343bfb0c..000000000 --- a/vendor/github.com/nats-io/go-nats/test/auth_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package test - -import ( - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/nats-io/gnatsd/auth" - "github.com/nats-io/go-nats" -) - -func TestAuth(t *testing.T) { - s := RunServerOnPort(8232) - - // Auth is pluggable, so need to set here.. - auth := &auth.Plain{ - Username: "derek", - Password: "foo", - } - s.SetClientAuthMethod(auth) - - defer s.Shutdown() - - _, err := nats.Connect("nats://localhost:8232") - if err == nil { - t.Fatal("Should have received an error while trying to connect") - } - - // This test may be a bit too strict for the future, but for now makes - // sure that we correctly process the -ERR content on connect. - if err.Error() != nats.ErrAuthorization.Error() { - t.Fatalf("Expected error '%v', got '%v'", nats.ErrAuthorization, err) - } - - nc, err := nats.Connect("nats://derek:foo@localhost:8232") - if err != nil { - t.Fatal("Should have connected successfully with a token") - } - nc.Close() - - // Use Options - nc, err = nats.Connect("nats://localhost:8232", nats.UserInfo("derek", "foo")) - if err != nil { - t.Fatalf("Should have connected successfully with a token: %v", err) - } - nc.Close() - // Verify that credentials in URL take precedence. - nc, err = nats.Connect("nats://derek:foo@localhost:8232", nats.UserInfo("foo", "bar")) - if err != nil { - t.Fatalf("Should have connected successfully with a token: %v", err) - } - nc.Close() -} - -func TestAuthFailNoDisconnectCB(t *testing.T) { - s := RunServerOnPort(8232) - - // Auth is pluggable, so need to set here.. - auth := &auth.Plain{ - Username: "derek", - Password: "foo", - } - s.SetClientAuthMethod(auth) - - defer s.Shutdown() - - copts := nats.DefaultOptions - copts.Url = "nats://localhost:8232" - receivedDisconnectCB := int32(0) - copts.DisconnectedCB = func(nc *nats.Conn) { - atomic.AddInt32(&receivedDisconnectCB, 1) - } - - _, err := copts.Connect() - if err == nil { - t.Fatal("Should have received an error while trying to connect") - } - if atomic.LoadInt32(&receivedDisconnectCB) > 0 { - t.Fatal("Should not have received a disconnect callback on auth failure") - } -} - -func TestAuthFailAllowReconnect(t *testing.T) { - ts := RunServerOnPort(23232) - defer ts.Shutdown() - - var servers = []string{ - "nats://localhost:23232", - "nats://localhost:23233", - "nats://localhost:23234", - } - - ts2 := RunServerOnPort(23233) - // Auth is pluggable, so need to set here.. - auth := &auth.Plain{ - Username: "ivan", - Password: "foo", - } - ts2.SetClientAuthMethod(auth) - defer ts2.Shutdown() - - ts3 := RunServerOnPort(23234) - defer ts3.Shutdown() - - reconnectch := make(chan bool) - - opts := nats.DefaultOptions - opts.Servers = servers - opts.AllowReconnect = true - opts.NoRandomize = true - opts.MaxReconnect = 10 - opts.ReconnectWait = 100 * time.Millisecond - - opts.ReconnectedCB = func(_ *nats.Conn) { - reconnectch <- true - } - - // Connect - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - defer nc.Close() - - // Stop the server - ts.Shutdown() - - // The client will try to connect to the second server, and that - // should fail. It should then try to connect to the third and succeed. - - // Wait for the reconnect CB. - if e := Wait(reconnectch); e != nil { - t.Fatal("Reconnect callback should have been triggered") - } - - if nc.IsClosed() { - t.Fatal("Should have reconnected") - } - - if nc.ConnectedUrl() != servers[2] { - t.Fatalf("Should have reconnected to %s, reconnected to %s instead", servers[2], nc.ConnectedUrl()) - } -} - -func TestTokenAuth(t *testing.T) { - s := RunServerOnPort(8232) - - secret := "S3Cr3T0k3n!" - // Auth is pluggable, so need to set here.. - auth := &auth.Token{Token: secret} - s.SetClientAuthMethod(auth) - - defer s.Shutdown() - - _, err := nats.Connect("nats://localhost:8232") - if err == nil { - t.Fatal("Should have received an error while trying to connect") - } - - tokenURL := fmt.Sprintf("nats://%s@localhost:8232", secret) - nc, err := nats.Connect(tokenURL) - if err != nil { - t.Fatal("Should have connected successfully") - } - nc.Close() - - // Use Options - nc, err = nats.Connect("nats://localhost:8232", nats.Token(secret)) - if err != nil { - t.Fatalf("Should have connected successfully: %v", err) - } - nc.Close() - // Verify that token in the URL takes precedence. - nc, err = nats.Connect(tokenURL, nats.Token("badtoken")) - if err != nil { - t.Fatalf("Should have connected successfully: %v", err) - } - nc.Close() -} diff --git a/vendor/github.com/nats-io/go-nats/test/basic_test.go b/vendor/github.com/nats-io/go-nats/test/basic_test.go deleted file mode 100644 index d18f4cb51..000000000 --- a/vendor/github.com/nats-io/go-nats/test/basic_test.go +++ /dev/null @@ -1,797 +0,0 @@ -package test - -import ( - "bytes" - "math" - "regexp" - "runtime" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/nats-io/go-nats" -) - -func TestCloseLeakingGoRoutines(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - // Give time for things to settle before capturing the number of - // go routines - time.Sleep(500 * time.Millisecond) - - base := runtime.NumGoroutine() - - nc := NewDefaultConnection(t) - - nc.Flush() - nc.Close() - - // Give time for things to settle before capturing the number of - // go routines - time.Sleep(500 * time.Millisecond) - - delta := (runtime.NumGoroutine() - base) - if delta > 0 { - t.Fatalf("%d Go routines still exist post Close()", delta) - } - // Make sure we can call Close() multiple times - nc.Close() -} - -func TestLeakingGoRoutinesOnFailedConnect(t *testing.T) { - // Give time for things to settle before capturing the number of - // go routines - time.Sleep(500 * time.Millisecond) - - base := runtime.NumGoroutine() - - nc, err := nats.Connect(nats.DefaultURL) - if err == nil { - nc.Close() - t.Fatalf("Expected failure to connect") - } - - // Give time for things to settle before capturing the number of - // go routines - time.Sleep(500 * time.Millisecond) - - delta := (runtime.NumGoroutine() - base) - if delta > 0 { - t.Fatalf("%d Go routines still exist post Close()", delta) - } -} - -func TestConnectedServer(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - u := nc.ConnectedUrl() - if u == "" || u != nats.DefaultURL { - t.Fatalf("Unexpected connected URL of %s\n", u) - } - srv := nc.ConnectedServerId() - if srv == "" { - t.Fatal("Expeced a connected server id") - } - nc.Close() - u = nc.ConnectedUrl() - if u != "" { - t.Fatalf("Expected a nil connected URL, got %s\n", u) - } - srv = nc.ConnectedServerId() - if srv != "" { - t.Fatalf("Expected a nil connect server, got %s\n", srv) - } -} - -func TestMultipleClose(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - nc.Close() - wg.Done() - }() - } - wg.Wait() -} - -func TestBadOptionTimeoutConnect(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - opts := nats.DefaultOptions - opts.Timeout = -1 - opts.Url = "nats://localhost:4222" - - _, err := opts.Connect() - if err == nil { - t.Fatal("Expected an error") - } - if err != nats.ErrNoServers { - t.Fatalf("Expected a ErrNoServers error: Got %v\n", err) - } -} - -func TestSimplePublish(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - if err := nc.Publish("foo", []byte("Hello World")); err != nil { - t.Fatal("Failed to publish string message: ", err) - } -} - -func TestSimplePublishNoData(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - if err := nc.Publish("foo", nil); err != nil { - t.Fatal("Failed to publish empty message: ", err) - } -} - -func TestPublishDoesNotFailOnSlowConsumer(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - sub, err := nc.SubscribeSync("foo") - if err != nil { - t.Fatalf("Unable to create subscription: %v", err) - } - - if err := sub.SetPendingLimits(1, 1000); err != nil { - t.Fatalf("Unable to set pending limits: %v", err) - } - - var pubErr error - - msg := []byte("Hello") - for i := 0; i < 10; i++ { - pubErr = nc.Publish("foo", msg) - if pubErr != nil { - break - } - nc.Flush() - } - - if pubErr != nil { - t.Fatalf("Publish() should not fail because of slow consumer. Got '%v'", pubErr) - } -} - -func TestAsyncSubscribe(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - omsg := []byte("Hello World") - ch := make(chan bool) - - // Callback is mandatory - if _, err := nc.Subscribe("foo", nil); err == nil { - t.Fatal("Creating subscription without callback should have failed") - } - - _, err := nc.Subscribe("foo", func(m *nats.Msg) { - if !bytes.Equal(m.Data, omsg) { - t.Fatal("Message received does not match") - } - if m.Sub == nil { - t.Fatal("Callback does not have a valid Subscription") - } - ch <- true - }) - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - nc.Publish("foo", omsg) - if e := Wait(ch); e != nil { - t.Fatal("Message not received for subscription") - } -} - -func TestAsyncSubscribeRoutineLeakOnUnsubscribe(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - ch := make(chan bool) - - // Give time for things to settle before capturing the number of - // go routines - time.Sleep(500 * time.Millisecond) - - // Take the base once the connection is established, but before - // the subscriber is created. - base := runtime.NumGoroutine() - - sub, err := nc.Subscribe("foo", func(m *nats.Msg) { ch <- true }) - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - - // Send to ourself - nc.Publish("foo", []byte("hello")) - - // This ensures that the async delivery routine is up and running. - if err := Wait(ch); err != nil { - t.Fatal("Failed to receive message") - } - - // Make sure to give it time to go back into wait - time.Sleep(200 * time.Millisecond) - - // Explicit unsubscribe - sub.Unsubscribe() - - // Give time for things to settle before capturing the number of - // go routines - time.Sleep(500 * time.Millisecond) - - delta := (runtime.NumGoroutine() - base) - if delta > 0 { - t.Fatalf("%d Go routines still exist post Unsubscribe()", delta) - } -} - -func TestAsyncSubscribeRoutineLeakOnClose(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ch := make(chan bool) - - // Give time for things to settle before capturing the number of - // go routines - time.Sleep(500 * time.Millisecond) - - // Take the base before creating the connection, since we are going - // to close it before taking the delta. - base := runtime.NumGoroutine() - - nc := NewDefaultConnection(t) - defer nc.Close() - - _, err := nc.Subscribe("foo", func(m *nats.Msg) { ch <- true }) - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - - // Send to ourself - nc.Publish("foo", []byte("hello")) - - // This ensures that the async delivery routine is up and running. - if err := Wait(ch); err != nil { - t.Fatal("Failed to receive message") - } - - // Make sure to give it time to go back into wait - time.Sleep(200 * time.Millisecond) - - // Close connection without explicit unsubscribe - nc.Close() - - // Give time for things to settle before capturing the number of - // go routines - time.Sleep(500 * time.Millisecond) - - delta := (runtime.NumGoroutine() - base) - if delta > 0 { - t.Fatalf("%d Go routines still exist post Close()", delta) - } -} - -func TestSyncSubscribe(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - sub, err := nc.SubscribeSync("foo") - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - omsg := []byte("Hello World") - nc.Publish("foo", omsg) - msg, err := sub.NextMsg(1 * time.Second) - if err != nil || !bytes.Equal(msg.Data, omsg) { - t.Fatal("Message received does not match") - } -} - -func TestPubSubWithReply(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - sub, err := nc.SubscribeSync("foo") - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - omsg := []byte("Hello World") - nc.PublishMsg(&nats.Msg{Subject: "foo", Reply: "bar", Data: omsg}) - msg, err := sub.NextMsg(10 * time.Second) - if err != nil || !bytes.Equal(msg.Data, omsg) { - t.Fatal("Message received does not match") - } -} - -func TestFlush(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - omsg := []byte("Hello World") - for i := 0; i < 10000; i++ { - nc.Publish("flush", omsg) - } - if err := nc.FlushTimeout(0); err == nil { - t.Fatal("Calling FlushTimeout() with invalid timeout should fail") - } - if err := nc.Flush(); err != nil { - t.Fatalf("Received error from flush: %s\n", err) - } - if nb, _ := nc.Buffered(); nb > 0 { - t.Fatalf("Outbound buffer not empty: %d bytes\n", nb) - } - - nc.Close() - if _, err := nc.Buffered(); err == nil { - t.Fatal("Calling Buffered() on closed connection should fail") - } -} - -func TestQueueSubscriber(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - s1, _ := nc.QueueSubscribeSync("foo", "bar") - s2, _ := nc.QueueSubscribeSync("foo", "bar") - omsg := []byte("Hello World") - nc.Publish("foo", omsg) - nc.Flush() - r1, _ := s1.QueuedMsgs() - r2, _ := s2.QueuedMsgs() - if (r1 + r2) != 1 { - t.Fatal("Received too many messages for multiple queue subscribers") - } - // Drain messages - s1.NextMsg(time.Second) - s2.NextMsg(time.Second) - - total := 1000 - for i := 0; i < total; i++ { - nc.Publish("foo", omsg) - } - nc.Flush() - v := uint(float32(total) * 0.15) - r1, _ = s1.QueuedMsgs() - r2, _ = s2.QueuedMsgs() - if r1+r2 != total { - t.Fatalf("Incorrect number of messages: %d vs %d", (r1 + r2), total) - } - expected := total / 2 - d1 := uint(math.Abs(float64(expected - r1))) - d2 := uint(math.Abs(float64(expected - r2))) - if d1 > v || d2 > v { - t.Fatalf("Too much variance in totals: %d, %d > %d", d1, d2, v) - } -} - -func TestReplyArg(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - ch := make(chan bool) - replyExpected := "bar" - - nc.Subscribe("foo", func(m *nats.Msg) { - if m.Reply != replyExpected { - t.Fatalf("Did not receive correct reply arg in callback: "+ - "('%s' vs '%s')", m.Reply, replyExpected) - } - ch <- true - }) - nc.PublishMsg(&nats.Msg{Subject: "foo", Reply: replyExpected, Data: []byte("Hello")}) - if e := Wait(ch); e != nil { - t.Fatal("Did not receive callback") - } -} - -func TestSyncReplyArg(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - replyExpected := "bar" - sub, _ := nc.SubscribeSync("foo") - nc.PublishMsg(&nats.Msg{Subject: "foo", Reply: replyExpected, Data: []byte("Hello")}) - msg, err := sub.NextMsg(1 * time.Second) - if err != nil { - t.Fatal("Received an err on NextMsg()") - } - if msg.Reply != replyExpected { - t.Fatalf("Did not receive correct reply arg in callback: "+ - "('%s' vs '%s')", msg.Reply, replyExpected) - } -} - -func TestUnsubscribe(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - received := int32(0) - max := int32(10) - ch := make(chan bool) - nc.Subscribe("foo", func(m *nats.Msg) { - atomic.AddInt32(&received, 1) - if received == max { - err := m.Sub.Unsubscribe() - if err != nil { - t.Fatal("Unsubscribe failed with err:", err) - } - ch <- true - } - }) - send := 20 - for i := 0; i < send; i++ { - nc.Publish("foo", []byte("hello")) - } - nc.Flush() - <-ch - - r := atomic.LoadInt32(&received) - if r != max { - t.Fatalf("Received wrong # of messages after unsubscribe: %d vs %d", - r, max) - } -} - -func TestDoubleUnsubscribe(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - sub, err := nc.SubscribeSync("foo") - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - if err = sub.Unsubscribe(); err != nil { - t.Fatal("Unsubscribe failed with err:", err) - } - if err = sub.Unsubscribe(); err == nil { - t.Fatal("Unsubscribe should have reported an error") - } -} - -func TestRequestTimeout(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - if _, err := nc.Request("foo", []byte("help"), 10*time.Millisecond); err == nil { - t.Fatalf("Expected to receive a timeout error") - } -} - -func TestRequest(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - response := []byte("I will help you") - nc.Subscribe("foo", func(m *nats.Msg) { - nc.Publish(m.Reply, response) - }) - msg, err := nc.Request("foo", []byte("help"), 500*time.Millisecond) - if err != nil { - t.Fatalf("Received an error on Request test: %s", err) - } - if !bytes.Equal(msg.Data, response) { - t.Fatalf("Received invalid response") - } -} - -func TestRequestNoBody(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - response := []byte("I will help you") - nc.Subscribe("foo", func(m *nats.Msg) { - nc.Publish(m.Reply, response) - }) - msg, err := nc.Request("foo", nil, 500*time.Millisecond) - if err != nil { - t.Fatalf("Received an error on Request test: %s", err) - } - if !bytes.Equal(msg.Data, response) { - t.Fatalf("Received invalid response") - } -} - -func TestFlushInCB(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - ch := make(chan bool) - - nc.Subscribe("foo", func(_ *nats.Msg) { - nc.Flush() - ch <- true - }) - nc.Publish("foo", []byte("Hello")) - if e := Wait(ch); e != nil { - t.Fatal("Flush did not return properly in callback") - } -} - -func TestReleaseFlush(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - - for i := 0; i < 1000; i++ { - nc.Publish("foo", []byte("Hello")) - } - go nc.Close() - nc.Flush() -} - -func TestInbox(t *testing.T) { - inbox := nats.NewInbox() - if matched, _ := regexp.Match(`_INBOX.\S`, []byte(inbox)); !matched { - t.Fatal("Bad INBOX format") - } -} - -func TestStats(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - data := []byte("The quick brown fox jumped over the lazy dog") - iter := 10 - - for i := 0; i < iter; i++ { - nc.Publish("foo", data) - } - - if nc.OutMsgs != uint64(iter) { - t.Fatalf("Not properly tracking OutMsgs: received %d, wanted %d\n", nc.OutMsgs, iter) - } - obb := uint64(iter * len(data)) - if nc.OutBytes != obb { - t.Fatalf("Not properly tracking OutBytes: received %d, wanted %d\n", nc.OutBytes, obb) - } - - // Clear outbound - nc.OutMsgs, nc.OutBytes = 0, 0 - - // Test both sync and async versions of subscribe. - nc.Subscribe("foo", func(_ *nats.Msg) {}) - nc.SubscribeSync("foo") - - for i := 0; i < iter; i++ { - nc.Publish("foo", data) - } - nc.Flush() - - if nc.InMsgs != uint64(2*iter) { - t.Fatalf("Not properly tracking InMsgs: received %d, wanted %d\n", nc.InMsgs, 2*iter) - } - - ibb := 2 * obb - if nc.InBytes != ibb { - t.Fatalf("Not properly tracking InBytes: received %d, wanted %d\n", nc.InBytes, ibb) - } -} - -func TestRaceSafeStats(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - go nc.Publish("foo", []byte("Hello World")) - time.Sleep(200 * time.Millisecond) - - stats := nc.Stats() - - if stats.OutMsgs != uint64(1) { - t.Fatalf("Not properly tracking OutMsgs: received %d, wanted %d\n", nc.OutMsgs, 1) - } -} - -func TestBadSubject(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(t) - defer nc.Close() - - err := nc.Publish("", []byte("Hello World")) - if err == nil { - t.Fatalf("Expected an error on bad subject to publish") - } - if err != nats.ErrBadSubject { - t.Fatalf("Expected a ErrBadSubject error: Got %v\n", err) - } -} - -func TestOptions(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc, err := nats.Connect(nats.DefaultURL, nats.Name("myName"), nats.MaxReconnects(2), nats.ReconnectWait(50*time.Millisecond)) - if err != nil { - t.Fatalf("Failed to connect: %v", err) - } - defer nc.Close() - - rch := make(chan bool) - cch := make(chan bool) - - nc.SetReconnectHandler(func(_ *nats.Conn) { rch <- true }) - nc.SetClosedHandler(func(_ *nats.Conn) { cch <- true }) - - s.Shutdown() - - s = RunDefaultServer() - defer s.Shutdown() - - if err := Wait(rch); err != nil { - t.Fatal("Failed getting reconnected cb") - } - - nc.Close() - - if err := Wait(cch); err != nil { - t.Fatal("Failed getting closed cb") - } - - nc, err = nats.Connect(nats.DefaultURL, nats.NoReconnect()) - if err != nil { - t.Fatalf("Failed to connect: %v", err) - } - defer nc.Close() - - nc.SetReconnectHandler(func(_ *nats.Conn) { rch <- true }) - nc.SetClosedHandler(func(_ *nats.Conn) { cch <- true }) - - s.Shutdown() - - // We should not get a reconnect cb this time - if err := WaitTime(rch, time.Second); err == nil { - t.Fatal("Unexpected reconnect cb") - } - - nc.Close() - - if err := Wait(cch); err != nil { - t.Fatal("Failed getting closed cb") - } -} - -func TestNilConnection(t *testing.T) { - var nc *nats.Conn - data := []byte("ok") - - // Publish - if err := nc.Publish("foo", data); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - if err := nc.PublishMsg(nil); err == nil || err != nats.ErrInvalidMsg { - t.Fatalf("Expected ErrInvalidMsg error, got %v\n", err) - } - if err := nc.PublishMsg(&nats.Msg{}); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - if err := nc.PublishRequest("foo", "reply", data); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - - // Subscribe - if _, err := nc.Subscribe("foo", nil); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - if _, err := nc.SubscribeSync("foo"); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - if _, err := nc.QueueSubscribe("foo", "bar", nil); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - ch := make(chan *nats.Msg) - if _, err := nc.ChanSubscribe("foo", ch); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - if _, err := nc.ChanQueueSubscribe("foo", "bar", ch); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - if _, err := nc.QueueSubscribeSyncWithChan("foo", "bar", ch); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - - // Flush - if err := nc.Flush(); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - if err := nc.FlushTimeout(time.Millisecond); err == nil || err != nats.ErrInvalidConnection { - t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) - } - - // Nil Subscribers - var sub *nats.Subscription - if sub.Type() != nats.NilSubscription { - t.Fatalf("Got wrong type for nil subscription, %v\n", sub.Type()) - } - if sub.IsValid() { - t.Fatalf("Expected IsValid() to return false") - } - if err := sub.Unsubscribe(); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected Unsubscribe to return proper error, got %v\n", err) - } - if err := sub.AutoUnsubscribe(1); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) - } - if _, err := sub.NextMsg(time.Millisecond); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) - } - if _, err := sub.QueuedMsgs(); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) - } - if _, _, err := sub.Pending(); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) - } - if _, _, err := sub.MaxPending(); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) - } - if err := sub.ClearMaxPending(); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) - } - if _, _, err := sub.PendingLimits(); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) - } - if err := sub.SetPendingLimits(1, 1); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) - } - if _, err := sub.Delivered(); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) - } - if _, err := sub.Dropped(); err == nil || err != nats.ErrBadSubscription { - t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) - } -} diff --git a/vendor/github.com/nats-io/go-nats/test/bench_test.go b/vendor/github.com/nats-io/go-nats/test/bench_test.go deleted file mode 100644 index 5deb12775..000000000 --- a/vendor/github.com/nats-io/go-nats/test/bench_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package test - -import ( - "sync/atomic" - "testing" - "time" - - "github.com/nats-io/go-nats" -) - -func BenchmarkPublishSpeed(b *testing.B) { - b.StopTimer() - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(b) - defer nc.Close() - b.StartTimer() - - msg := []byte("Hello World") - - for i := 0; i < b.N; i++ { - if err := nc.Publish("foo", msg); err != nil { - b.Fatalf("Error in benchmark during Publish: %v\n", err) - } - } - // Make sure they are all processed. - nc.Flush() - b.StopTimer() -} - -func BenchmarkPubSubSpeed(b *testing.B) { - b.StopTimer() - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(b) - defer nc.Close() - - ch := make(chan bool) - - nc.SetErrorHandler(func(nc *nats.Conn, s *nats.Subscription, err error) { - b.Fatalf("Error : %v\n", err) - }) - - received := int32(0) - - nc.Subscribe("foo", func(m *nats.Msg) { - if nr := atomic.AddInt32(&received, 1); nr >= int32(b.N) { - ch <- true - } - }) - - msg := []byte("Hello World") - - b.StartTimer() - - for i := 0; i < b.N; i++ { - if err := nc.Publish("foo", msg); err != nil { - b.Fatalf("Error in benchmark during Publish: %v\n", err) - } - // Don't overrun ourselves and be a slow consumer, server will cut us off - if int32(i)-atomic.LoadInt32(&received) > 32768 { - time.Sleep(100 * time.Nanosecond) - } - } - - // Make sure they are all processed. - err := WaitTime(ch, 10*time.Second) - if err != nil { - b.Fatal("Timed out waiting for messages") - } else if atomic.LoadInt32(&received) != int32(b.N) { - b.Fatalf("Received: %d, err:%v", received, nc.LastError()) - } - b.StopTimer() -} - -func BenchmarkAsyncSubscriptionCreationSpeed(b *testing.B) { - b.StopTimer() - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(b) - defer nc.Close() - b.StartTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - nc.Subscribe("foo", func(m *nats.Msg) {}) - } -} - -func BenchmarkSyncSubscriptionCreationSpeed(b *testing.B) { - b.StopTimer() - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(b) - defer nc.Close() - b.StartTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - nc.SubscribeSync("foo") - } -} - -func BenchmarkInboxCreation(b *testing.B) { - for i := 0; i < b.N; i++ { - nats.NewInbox() - } -} - -func BenchmarkRequest(b *testing.B) { - b.StopTimer() - s := RunDefaultServer() - defer s.Shutdown() - nc := NewDefaultConnection(b) - defer nc.Close() - ok := []byte("ok") - nc.Subscribe("req", func(m *nats.Msg) { - nc.Publish(m.Reply, ok) - }) - b.StartTimer() - b.ReportAllocs() - q := []byte("q") - for i := 0; i < b.N; i++ { - _, err := nc.Request("req", q, 1*time.Second) - if err != nil { - b.Fatalf("Err %v\n", err) - } - } -} diff --git a/vendor/github.com/nats-io/go-nats/test/cluster_test.go b/vendor/github.com/nats-io/go-nats/test/cluster_test.go deleted file mode 100644 index a5d793254..000000000 --- a/vendor/github.com/nats-io/go-nats/test/cluster_test.go +++ /dev/null @@ -1,605 +0,0 @@ -package test - -import ( - "math" - "regexp" - "runtime" - "strings" - "sync" - "testing" - "time" - - "github.com/nats-io/gnatsd/auth" - "github.com/nats-io/go-nats" -) - -var testServers = []string{ - "nats://localhost:1222", - "nats://localhost:1223", - "nats://localhost:1224", - "nats://localhost:1225", - "nats://localhost:1226", - "nats://localhost:1227", - "nats://localhost:1228", -} - -var servers = strings.Join(testServers, ",") - -func TestServersOption(t *testing.T) { - opts := nats.DefaultOptions - opts.NoRandomize = true - - _, err := opts.Connect() - if err != nats.ErrNoServers { - t.Fatalf("Wrong error: '%v'\n", err) - } - opts.Servers = testServers - _, err = opts.Connect() - if err == nil || err != nats.ErrNoServers { - t.Fatalf("Did not receive proper error: %v\n", err) - } - - // Make sure we can connect to first server if running - s1 := RunServerOnPort(1222) - // Do this in case some failure occurs before explicit shutdown - defer s1.Shutdown() - - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Could not connect: %v\n", err) - } - if nc.ConnectedUrl() != "nats://localhost:1222" { - nc.Close() - t.Fatalf("Does not report correct connection: %s\n", - nc.ConnectedUrl()) - } - nc.Close() - s1.Shutdown() - - // Make sure we can connect to a non first server if running - s2 := RunServerOnPort(1223) - // Do this in case some failure occurs before explicit shutdown - defer s2.Shutdown() - - nc, err = opts.Connect() - if err != nil { - t.Fatalf("Could not connect: %v\n", err) - } - defer nc.Close() - if nc.ConnectedUrl() != "nats://localhost:1223" { - t.Fatalf("Does not report correct connection: %s\n", - nc.ConnectedUrl()) - } -} - -func TestNewStyleServersOption(t *testing.T) { - _, err := nats.Connect(nats.DefaultURL, nats.DontRandomize()) - if err != nats.ErrNoServers { - t.Fatalf("Wrong error: '%v'\n", err) - } - servers := strings.Join(testServers, ",") - - _, err = nats.Connect(servers, nats.DontRandomize()) - if err == nil || err != nats.ErrNoServers { - t.Fatalf("Did not receive proper error: %v\n", err) - } - - // Make sure we can connect to first server if running - s1 := RunServerOnPort(1222) - // Do this in case some failure occurs before explicit shutdown - defer s1.Shutdown() - - nc, err := nats.Connect(servers, nats.DontRandomize()) - if err != nil { - t.Fatalf("Could not connect: %v\n", err) - } - if nc.ConnectedUrl() != "nats://localhost:1222" { - nc.Close() - t.Fatalf("Does not report correct connection: %s\n", - nc.ConnectedUrl()) - } - nc.Close() - s1.Shutdown() - - // Make sure we can connect to a non-first server if running - s2 := RunServerOnPort(1223) - // Do this in case some failure occurs before explicit shutdown - defer s2.Shutdown() - - nc, err = nats.Connect(servers, nats.DontRandomize()) - if err != nil { - t.Fatalf("Could not connect: %v\n", err) - } - defer nc.Close() - if nc.ConnectedUrl() != "nats://localhost:1223" { - t.Fatalf("Does not report correct connection: %s\n", - nc.ConnectedUrl()) - } -} - -func TestAuthServers(t *testing.T) { - var plainServers = []string{ - "nats://localhost:1222", - "nats://localhost:1224", - } - - auth := &auth.Plain{ - Username: "derek", - Password: "foo", - } - - as1 := RunServerOnPort(1222) - as1.SetClientAuthMethod(auth) - defer as1.Shutdown() - as2 := RunServerOnPort(1224) - as2.SetClientAuthMethod(auth) - defer as2.Shutdown() - - pservers := strings.Join(plainServers, ",") - nc, err := nats.Connect(pservers, nats.DontRandomize(), nats.Timeout(5*time.Second)) - if err == nil { - nc.Close() - t.Fatalf("Expect Auth failure, got no error\n") - } - - if matched, _ := regexp.Match(`authorization`, []byte(err.Error())); !matched { - t.Fatalf("Wrong error, wanted Auth failure, got '%s'\n", err) - } - - // Test that we can connect to a subsequent correct server. - var authServers = []string{ - "nats://localhost:1222", - "nats://derek:foo@localhost:1224", - } - aservers := strings.Join(authServers, ",") - nc, err = nats.Connect(aservers, nats.DontRandomize(), nats.Timeout(5*time.Second)) - if err != nil { - t.Fatalf("Expected to connect properly: %v\n", err) - } - defer nc.Close() - if nc.ConnectedUrl() != authServers[1] { - t.Fatalf("Does not report correct connection: %s\n", - nc.ConnectedUrl()) - } -} - -func TestBasicClusterReconnect(t *testing.T) { - s1 := RunServerOnPort(1222) - defer s1.Shutdown() - s2 := RunServerOnPort(1224) - defer s2.Shutdown() - - dch := make(chan bool) - rch := make(chan bool) - - dcbCalled := false - - opts := []nats.Option{nats.DontRandomize(), - nats.DisconnectHandler(func(nc *nats.Conn) { - // Suppress any additional callbacks - if dcbCalled { - return - } - dcbCalled = true - dch <- true - }), - nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true }), - } - - nc, err := nats.Connect(servers, opts...) - if err != nil { - t.Fatalf("Expected to connect, got err: %v\n", err) - } - defer nc.Close() - - s1.Shutdown() - - // wait for disconnect - if e := WaitTime(dch, 2*time.Second); e != nil { - t.Fatal("Did not receive a disconnect callback message") - } - - reconnectTimeStart := time.Now() - - // wait for reconnect - if e := WaitTime(rch, 2*time.Second); e != nil { - t.Fatal("Did not receive a reconnect callback message") - } - - if nc.ConnectedUrl() != testServers[2] { - t.Fatalf("Does not report correct connection: %s\n", - nc.ConnectedUrl()) - } - - // Make sure we did not wait on reconnect for default time. - // Reconnect should be fast since it will be a switch to the - // second server and not be dependent on server restart time. - - // On Windows, a failed connect takes more than a second, so - // account for that. - maxDuration := 100 * time.Millisecond - if runtime.GOOS == "windows" { - maxDuration = 1100 * time.Millisecond - } - reconnectTime := time.Since(reconnectTimeStart) - if reconnectTime > maxDuration { - t.Fatalf("Took longer than expected to reconnect: %v\n", reconnectTime) - } -} - -func TestHotSpotReconnect(t *testing.T) { - s1 := RunServerOnPort(1222) - defer s1.Shutdown() - - var srvrs string - if runtime.GOOS == "windows" { - srvrs = strings.Join(testServers[:5], ",") - } else { - srvrs = servers - } - - numClients := 32 - clients := []*nats.Conn{} - - wg := &sync.WaitGroup{} - wg.Add(numClients) - - opts := []nats.Option{ - nats.ReconnectWait(50 * time.Millisecond), - nats.ReconnectHandler(func(_ *nats.Conn) { wg.Done() }), - } - - for i := 0; i < numClients; i++ { - nc, err := nats.Connect(srvrs, opts...) - if err != nil { - t.Fatalf("Expected to connect, got err: %v\n", err) - } - defer nc.Close() - if nc.ConnectedUrl() != testServers[0] { - t.Fatalf("Connected to incorrect server: %v\n", nc.ConnectedUrl()) - } - clients = append(clients, nc) - } - - s2 := RunServerOnPort(1224) - defer s2.Shutdown() - s3 := RunServerOnPort(1226) - defer s3.Shutdown() - - s1.Shutdown() - - numServers := 2 - - // Wait on all reconnects - wg.Wait() - - // Walk the clients and calculate how many of each.. - cs := make(map[string]int) - for _, nc := range clients { - cs[nc.ConnectedUrl()]++ - nc.Close() - } - if len(cs) != numServers { - t.Fatalf("Wrong number of reported servers: %d vs %d\n", len(cs), numServers) - } - expected := numClients / numServers - v := uint(float32(expected) * 0.40) - - // Check that each item is within acceptable range - for s, total := range cs { - delta := uint(math.Abs(float64(expected - total))) - if delta > v { - t.Fatalf("Connected clients to server: %s out of range: %d\n", s, total) - } - } -} - -func TestProperReconnectDelay(t *testing.T) { - s1 := RunServerOnPort(1222) - defer s1.Shutdown() - - var srvs string - opts := nats.DefaultOptions - if runtime.GOOS == "windows" { - srvs = strings.Join(testServers[:2], ",") - } else { - srvs = strings.Join(testServers, ",") - } - opts.NoRandomize = true - - dcbCalled := false - closedCbCalled := false - dch := make(chan bool) - - dcb := func(nc *nats.Conn) { - // Suppress any additional calls - if dcbCalled { - return - } - dcbCalled = true - dch <- true - } - - ccb := func(_ *nats.Conn) { - closedCbCalled = true - } - - nc, err := nats.Connect(srvs, nats.DontRandomize(), nats.DisconnectHandler(dcb), nats.ClosedHandler(ccb)) - if err != nil { - t.Fatalf("Expected to connect, got err: %v\n", err) - } - defer nc.Close() - - s1.Shutdown() - - // wait for disconnect - if e := WaitTime(dch, 2*time.Second); e != nil { - t.Fatal("Did not receive a disconnect callback message") - } - - // Wait, want to make sure we don't spin on reconnect to non-existent servers. - time.Sleep(1 * time.Second) - - // Make sure we are still reconnecting.. - if closedCbCalled { - t.Fatal("Closed CB was triggered, should not have been.") - } - if status := nc.Status(); status != nats.RECONNECTING { - t.Fatalf("Wrong status: %d\n", status) - } -} - -func TestProperFalloutAfterMaxAttempts(t *testing.T) { - s1 := RunServerOnPort(1222) - defer s1.Shutdown() - - opts := nats.DefaultOptions - // Reduce the list of servers for Windows tests - if runtime.GOOS == "windows" { - opts.Servers = testServers[:2] - opts.MaxReconnect = 2 - } else { - opts.Servers = testServers - opts.MaxReconnect = 5 - } - opts.NoRandomize = true - opts.ReconnectWait = (25 * time.Millisecond) - - dch := make(chan bool) - opts.DisconnectedCB = func(_ *nats.Conn) { - dch <- true - } - - closedCbCalled := false - cch := make(chan bool) - - opts.ClosedCB = func(_ *nats.Conn) { - closedCbCalled = true - cch <- true - } - - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Expected to connect, got err: %v\n", err) - } - defer nc.Close() - - s1.Shutdown() - - // On Windows, creating a TCP connection to a server not running takes more than - // a second. So be generous with the WaitTime. - - // wait for disconnect - if e := WaitTime(dch, 5*time.Second); e != nil { - t.Fatal("Did not receive a disconnect callback message") - } - - // Wait for ClosedCB - if e := WaitTime(cch, 5*time.Second); e != nil { - t.Fatal("Did not receive a closed callback message") - } - - // Make sure we are not still reconnecting.. - if !closedCbCalled { - t.Logf("%+v\n", nc) - t.Fatal("Closed CB was not triggered, should have been.") - } - - // Expect connection to be closed... - if !nc.IsClosed() { - t.Fatalf("Wrong status: %d\n", nc.Status()) - } -} - -func TestProperFalloutAfterMaxAttemptsWithAuthMismatch(t *testing.T) { - var myServers = []string{ - "nats://localhost:1222", - "nats://localhost:4443", - } - s1 := RunServerOnPort(1222) - defer s1.Shutdown() - - s2, _ := RunServerWithConfig("./configs/tlsverify.conf") - defer s2.Shutdown() - - opts := nats.DefaultOptions - opts.Servers = myServers - opts.NoRandomize = true - if runtime.GOOS == "windows" { - opts.MaxReconnect = 2 - } else { - opts.MaxReconnect = 5 - } - opts.ReconnectWait = (25 * time.Millisecond) - - dch := make(chan bool) - opts.DisconnectedCB = func(_ *nats.Conn) { - dch <- true - } - - closedCbCalled := false - cch := make(chan bool) - - opts.ClosedCB = func(_ *nats.Conn) { - closedCbCalled = true - cch <- true - } - - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Expected to connect, got err: %v\n", err) - } - defer nc.Close() - - s1.Shutdown() - - // On Windows, creating a TCP connection to a server not running takes more than - // a second. So be generous with the WaitTime. - - // wait for disconnect - if e := WaitTime(dch, 5*time.Second); e != nil { - t.Fatal("Did not receive a disconnect callback message") - } - - // Wait for ClosedCB - if e := WaitTime(cch, 5*time.Second); e != nil { - t.Fatalf("Did not receive a closed callback message, #reconnects: %v", nc.Reconnects) - } - - // Make sure we have not exceeded MaxReconnect - if nc.Reconnects != uint64(opts.MaxReconnect) { - t.Fatalf("Num reconnects was %v, expected %v", nc.Reconnects, opts.MaxReconnect) - } - - // Make sure we are not still reconnecting.. - if !closedCbCalled { - t.Logf("%+v\n", nc) - t.Fatal("Closed CB was not triggered, should have been.") - } - - // Expect connection to be closed... - if !nc.IsClosed() { - t.Fatalf("Wrong status: %d\n", nc.Status()) - } -} - -func TestTimeoutOnNoServers(t *testing.T) { - s1 := RunServerOnPort(1222) - defer s1.Shutdown() - - opts := nats.DefaultOptions - if runtime.GOOS == "windows" { - opts.Servers = testServers[:2] - opts.MaxReconnect = 2 - opts.ReconnectWait = (100 * time.Millisecond) - } else { - opts.Servers = testServers - // 1 second total time wait - opts.MaxReconnect = 10 - opts.ReconnectWait = (100 * time.Millisecond) - } - opts.NoRandomize = true - - dch := make(chan bool) - opts.DisconnectedCB = func(nc *nats.Conn) { - // Suppress any additional calls - nc.SetDisconnectHandler(nil) - dch <- true - } - - cch := make(chan bool) - opts.ClosedCB = func(_ *nats.Conn) { - cch <- true - } - - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Expected to connect, got err: %v\n", err) - } - defer nc.Close() - - s1.Shutdown() - - // On Windows, creating a connection to a non-running server takes - // more than a second. So be generous with WaitTime - - // wait for disconnect - if e := WaitTime(dch, 5*time.Second); e != nil { - t.Fatal("Did not receive a disconnect callback message") - } - - startWait := time.Now() - - // Wait for ClosedCB - if e := WaitTime(cch, 5*time.Second); e != nil { - t.Fatal("Did not receive a closed callback message") - } - - if runtime.GOOS != "windows" { - timeWait := time.Since(startWait) - - // Use 500ms as variable time delta - variable := (500 * time.Millisecond) - expected := (time.Duration(opts.MaxReconnect) * opts.ReconnectWait) - - if timeWait > (expected + variable) { - t.Fatalf("Waited too long for Closed state: %d\n", timeWait/time.Millisecond) - } - } -} - -func TestPingReconnect(t *testing.T) { - RECONNECTS := 4 - s1 := RunServerOnPort(1222) - defer s1.Shutdown() - - opts := nats.DefaultOptions - opts.Servers = testServers - opts.NoRandomize = true - opts.ReconnectWait = 200 * time.Millisecond - opts.PingInterval = 50 * time.Millisecond - opts.MaxPingsOut = -1 - - var wg sync.WaitGroup - wg.Add(1) - rch := make(chan time.Time, RECONNECTS) - dch := make(chan time.Time, RECONNECTS) - - opts.DisconnectedCB = func(_ *nats.Conn) { - d := dch - select { - case d <- time.Now(): - default: - d = nil - } - } - - opts.ReconnectedCB = func(c *nats.Conn) { - r := rch - select { - case r <- time.Now(): - default: - r = nil - wg.Done() - } - } - - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Expected to connect, got err: %v\n", err) - } - defer nc.Close() - - wg.Wait() - s1.Shutdown() - - <-dch - for i := 0; i < RECONNECTS-1; i++ { - disconnectedAt := <-dch - reconnectAt := <-rch - pingCycle := disconnectedAt.Sub(reconnectAt) - if pingCycle > 2*opts.PingInterval { - t.Fatalf("Reconnect due to ping took %s", pingCycle.String()) - } - } -} diff --git a/vendor/github.com/nats-io/go-nats/test/configs/certs/ca.pem b/vendor/github.com/nats-io/go-nats/test/configs/certs/ca.pem deleted file mode 100644 index 17447f945..000000000 --- a/vendor/github.com/nats-io/go-nats/test/configs/certs/ca.pem +++ /dev/null @@ -1,38 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGjzCCBHegAwIBAgIJAKT2W9SKY7o4MA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD -VQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xEzAR -BgNVBAoTCkFwY2VyYSBJbmMxEDAOBgNVBAsTB25hdHMuaW8xEjAQBgNVBAMTCWxv -Y2FsaG9zdDEcMBoGCSqGSIb3DQEJARYNZGVyZWtAbmF0cy5pbzAeFw0xNTExMDUy -MzA2MTdaFw0xOTExMDQyMzA2MTdaMIGLMQswCQYDVQQGEwJVUzELMAkGA1UECBMC -Q0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xEzARBgNVBAoTCkFwY2VyYSBJbmMx -EDAOBgNVBAsTB25hdHMuaW8xEjAQBgNVBAMTCWxvY2FsaG9zdDEcMBoGCSqGSIb3 -DQEJARYNZGVyZWtAbmF0cy5pbzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC -ggIBAJOyBvFaREbmO/yaw8UD8u5vSk+Qrwdkfa0iHMo11nkcVtynHNKcgRUTkZBC -xEZILVsuPa+WSUcUc0ej0TmuimrtOjXGn+LD0TrDVz6dd6lBufLXjo1fbUnKUjml -TBYB2h7StDksrBPFnbEOVKN+qb1No4YxfvbJ6EK3xfnsm3dvamnetJugrmQ2EUlu -glPNZDIShu9Fcsiq2hjw+dJ2Erl8kx2/PE8nOdcDG9I4wAM71pw9L1dHGmMOnTsq -opLDVkMNjeIgMPxj5aIhvS8Tcnj16ZNi4h10587vld8fIdz+OgTDFMNi91PgZQmX -9puXraBGi5UEn0ly57IIY+aFkx74jPWgnVYz8w8G+W2GTFYQEVgHcPTJ4aIPjyRd -m/cLelV34TMNCoTXmpIKVBkJY01t2awUYN0AcauhmD1L+ihY2lVk330lxQR11ZQ/ -rjSRpG6jzb6diVK5wpNjsRRt5zJgZr6BMp0LYwJESGjt0sF0zZxixvHu8EctVle4 -zX6NHDic7mf4Wvo4rfnUyCGr7Y3OxB2vakq1fDZ1Di9OzpW/k8i/TE+mPRI5GTZt -lR+c8mBxdV595EKHDxj0gY7PCM3Pe35p3oScWtfbpesTX6a7IL801ZwKKtN+4DOV -mZhwiefztb/9IFPNXiuQnNh7mf7W2ob7SiGYct8iCLLjT64DAgMBAAGjgfMwgfAw -HQYDVR0OBBYEFPDMEiYb7Np2STbm8j9qNj1aAvz2MIHABgNVHSMEgbgwgbWAFPDM -EiYb7Np2STbm8j9qNj1aAvz2oYGRpIGOMIGLMQswCQYDVQQGEwJVUzELMAkGA1UE -CBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xEzARBgNVBAoTCkFwY2VyYSBJ -bmMxEDAOBgNVBAsTB25hdHMuaW8xEjAQBgNVBAMTCWxvY2FsaG9zdDEcMBoGCSqG -SIb3DQEJARYNZGVyZWtAbmF0cy5pb4IJAKT2W9SKY7o4MAwGA1UdEwQFMAMBAf8w -DQYJKoZIhvcNAQELBQADggIBAIkoO+svWiudydr4sQNv/XhDvH0GiWMjaI738fAB -sGUKWXarXM9rsRtoQ78iwEBZmusEv0fmJ9hX275aZdduTJt4AnCBVptnSyMJS6K5 -RZF4ZQ3zqT3QOeWepLqszqRZHf+xNfl9JiXZc3pqNhoh1YXPubCgY+TY1XFSrL+u -Wmbs3n56Cede5+dKwMpT9SfQ7nL1pwKihx16vlBGTjjvJ0RE5Tx+0VRcDgbtIF52 -pNlvjg9DL+UqP3S1WR0PcsUss/ygiC1NDegZr+I/04/wEG9Drwk1yPSshWsH90W0 -7TmLDoWf5caAX62jOJtXbsA9JZ16RnIWy2iZYwg4YdE0rEeMbnDzrRucbyBahMX0 -mKc8C+rroW0TRTrqxYDQTE5gmAghCa9EixcwSTgMH/U6zsRbbY62m9WA5fKfu3n0 -z82+c36ijScHLgppTVosq+kkr/YE84ct56RMsg9esEKTxGxje812OSdHp/i2RzqW -J59yo7KUn1nX7HsFvBVh9D8147J5BxtPztc0GtCQTXFT73nQapJjAd5J+AC5AB4t -ShE+MRD+XIlPB/aMgtzz9Th8UCktVKoPOpFMC0SvFbbINWL/JO1QGhuZLMTKLjQN -QBzjrETAOA9PICpI5hcPtTXz172X+I8/tIEFrZfew0Fdt/oAVcnb659zKiR8EuAq -+Svp ------END CERTIFICATE----- diff --git a/vendor/github.com/nats-io/go-nats/test/configs/certs/client-cert.pem b/vendor/github.com/nats-io/go-nats/test/configs/certs/client-cert.pem deleted file mode 100644 index 549c9b389..000000000 --- a/vendor/github.com/nats-io/go-nats/test/configs/certs/client-cert.pem +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFPDCCAySgAwIBAgIJAO+k4G7bNTypMA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD -VQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xEzAR -BgNVBAoTCkFwY2VyYSBJbmMxEDAOBgNVBAsTB25hdHMuaW8xEjAQBgNVBAMTCWxv -Y2FsaG9zdDEcMBoGCSqGSIb3DQEJARYNZGVyZWtAbmF0cy5pbzAeFw0xNTExMDUy -MzEwNDdaFw0xOTExMDQyMzEwNDdaMBYxFDASBgNVBAMTC25hdHMtY2xpZW50MIIC -IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEArgLxszD5/vDrDUwwIEgQx9I0 -J/H6MXPO0Tj9D2BnR+nwjCe9M03fsq4Il96BVzoaAiAQD1r4NyAX2adKydlnE3/m -bUFiSVHErJceEi9aSs+WlLdmKEgU2qrsIal9KzthlI786qtjb7OFSCxP14R4xYA5 -dlZXhJ9oUuFhVTdaVmRMzWuWj8RbBx8VptSZ0f7Q+Uv8GuB0kyiVkv6GYcH/IWuI -7jnM0QcVWBmxJfWmqd0yx/FLlX/LRXqdiyoFSIlMaP0VOwto3uEhAoBk83Z+/zrZ -Brymx1Nnz3qzTCf8/mdMjPuWibXDTLbo0/Kf6neHs6wxx8irb1ZfIwhn8grXTcgd -rg9bfcyyUOBey7QXiedpU0xFqoH26E+Aq+CV4R56i1sJKsSYEGu8O69H8zu5dgan -LZRhcCHcZhMe7Nbiu5BcuOW4r3rGDMTLXSugEX91iy5jJaYmRjtPN5imQIJtf+GK -Vq7YLv4MQV6R3xRiZXaocCae1qzIMc4kxCKvZTmxuJsvIUPjNnGumwbjV/a2fLFX -9tMqUKyEmiPtFtqNH/kmkHCQ5FGYIIj3wGuD5yWfK5Tr3iHOdNJoNNPgPBg9tMRw -j3+W8+uyBxc+FUEb8a9m3R4VmAYyiqgzCA0DWZBF1fOYLWfRnwS5OBKiP4OUlUEb -YZUEzfvDbLOwQrb123cCAwEAAaMXMBUwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJ -KoZIhvcNAQELBQADggIBACNKPbvaXwl5rRTqFw37Am1r6e+LkUg9dFogSwXDnuT/ -RRZJi5MHsC5MUOkHB28lTmPwkAogs+LBmKrM0Npzk6OPkT/LCgKqpVoz2Tc1nGMI -Jy8jxPYogMmDCOhoEoC7zsWABMLiX5KDAuKommk61w7AwKu4kK198ngwbfF2fzdH -1DUGID7iV4fyPGI+pCU3Ullv51c5xkhqjVy1JYdYc0+s6rFyVTibSABa7PfHE2ML -A+cNFWoKQhugVHQU7qYvuWvnEqZro2T6nmSmpK3oOaUgVnDuY2q4JwiMbZAtuyD7 -8LFwCim49WzgYcfs/BwKlUrTV/QBYurruHWjElZzwA39/ZlbnOjJJ85j/YqxR+4S -fK/KktegyrPJU3fxdl2+77zVlfgzxaQ//58vx5LgXWhl2KeHyakeD0jQFVn1R7GD -bynAlHlSOr+nGkwP2WVqXKf+l/gb/gUEY7bC8fCVRCctkcK+smEl+sIKH3O9JY8l -rBWjOXkMY91ZDh77hfTNni/s2/DGAoNrEft8rgu3/NPxhCTfQH3ranCryth9mF6I -qsOFr5/81WGKqU+Kec8st/RSU2vBjBp41HILAEEhUiB6prhc9B3+exwkvQSPz22W -PIvhkzqeOYRoEDE2bWGC1ukd818qvQp618eLBmJSvwGh4YfUcmgqHaEk2NjoPIMV ------END CERTIFICATE----- diff --git a/vendor/github.com/nats-io/go-nats/test/configs/certs/client-key.pem b/vendor/github.com/nats-io/go-nats/test/configs/certs/client-key.pem deleted file mode 100644 index bb44aa5a5..000000000 --- a/vendor/github.com/nats-io/go-nats/test/configs/certs/client-key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEArgLxszD5/vDrDUwwIEgQx9I0J/H6MXPO0Tj9D2BnR+nwjCe9 -M03fsq4Il96BVzoaAiAQD1r4NyAX2adKydlnE3/mbUFiSVHErJceEi9aSs+WlLdm -KEgU2qrsIal9KzthlI786qtjb7OFSCxP14R4xYA5dlZXhJ9oUuFhVTdaVmRMzWuW -j8RbBx8VptSZ0f7Q+Uv8GuB0kyiVkv6GYcH/IWuI7jnM0QcVWBmxJfWmqd0yx/FL -lX/LRXqdiyoFSIlMaP0VOwto3uEhAoBk83Z+/zrZBrymx1Nnz3qzTCf8/mdMjPuW -ibXDTLbo0/Kf6neHs6wxx8irb1ZfIwhn8grXTcgdrg9bfcyyUOBey7QXiedpU0xF -qoH26E+Aq+CV4R56i1sJKsSYEGu8O69H8zu5dganLZRhcCHcZhMe7Nbiu5BcuOW4 -r3rGDMTLXSugEX91iy5jJaYmRjtPN5imQIJtf+GKVq7YLv4MQV6R3xRiZXaocCae -1qzIMc4kxCKvZTmxuJsvIUPjNnGumwbjV/a2fLFX9tMqUKyEmiPtFtqNH/kmkHCQ -5FGYIIj3wGuD5yWfK5Tr3iHOdNJoNNPgPBg9tMRwj3+W8+uyBxc+FUEb8a9m3R4V -mAYyiqgzCA0DWZBF1fOYLWfRnwS5OBKiP4OUlUEbYZUEzfvDbLOwQrb123cCAwEA -AQKCAgAQUkBfYVGhgvFZDvNYo8nHJEU2FfE0oDsezqyVu6IUUbH5Q2TwofZAaShv -LjSNfOqhlmZLOmobqYvzI0jVg+myH4X6a26Pl/bNhWMRq5VZfP0Pt+ACGTizheKe -Caqu2mP9rie0zxyFhp4Ste1LNqapR6ycF98flmAPngomFwoHHmNBxTybAXzUPysl -ub0vwCnTqDfeQX1NrDnTTsJF+w82EEMIrS0z0elDmS1PdSoLtq6jqFNBk3n6a1TJ -j8htFEuxcUODhT9x4EXbWTWezFd/EwL2Kc2u1njfMhANLZcCOagpdROamQzXbjSK -ZLBxKoL07ErDBWRnDf/gZlJxlmi5QFgy3LFvmZ93sbedzRaTDsjXEpbTse/l36QY -6YCjSnb2zUX2AElKmyC/QwR8BZ9afRQM7x3eqLkE1q4jkLsk3+W3VroyaoOfQxiB -k+xtL5cxoa9SiTgETNHpFQhiTNyX7FlH1ykoJzTryLsbccTd1iP7DF5ZPt8DfgIZ -PLzwh7PDiK5cpitm8g6TdvuLA9FT+bEtd/78odN++VDhkcCmSQMWKk3Xt8wznNcY -8Ye5JC/4aHRueWCziWaJYJHi6ZNCt4CR5wzEGBmPlf0562UpQpfEuDOQDRX3FaMs -qYbCrRVeQL3wXcu3sVToj9zSES2R+kQfTwaqdypgS79y0Dp6eQKCAQEA2BAu0Cqn -xmjuqn/qpPXtW3kryHPP7eyzt53o8Xg7RqQ0oT+FNiO3o4aGoVlxkMjBW+NOpWo1 -VtsTrsB+RxIiuugb9/D2dy1z5BK2x4bvurxkyOovU3J2WHSNIUsbQ5FSN8w5sAcl -+1QFNcM5ooBa7VahRV2vJcGe9P+QFR75c4xSCvG6AOu8WzZNUNOw97s/N24NevU5 -26Ql20zwn+E0avd3yuFU7bKrvXh9v6lNqWhjkJePk8eTh/5O4cTuF/cB3wPcgjiC -24uyNI29lAVHS/+h0nVTdm0F1Fel8nwPkOLyRJUyEzWm8SX2rnwI3EegWaRyDohp -a1hmjHsCcpoxhQKCAQEAzizucnHqwxEQiMaJPUKBi3v3j+a/me3PfsY1760LdLVY -AcMuGr+wg2/e9d7jMvEIxlACng4aU2kKG0fOxS0G0e7AefB9DiwzexJ+pHu0R49p -PmkAoPl2+mAlfeqvwEJ4gQEH8hKoIEkU0XAPZfWMTlshCJgAyYYpsLlJl0f8ooa3 -4VRg3hjfWj+Z5pQryojN/Pfl4XRoM11xdaa79odvtptpN3KWxs9IhesM1o4mi4kC -Dd996iQpNau1bF6LHmEXJhbkEJ+SDXUDvEx6d3HYAFNPyWLe4DtJn38qb1gtuesZ -vGntToaAN12z4vJIj75vuduSJei8ceXcixYo1WZrywKCAQEAiz9avERRXpjwAChy -lB/++i4MnqKtBjy/0n3NzBndsfhQBwAGHU9FofkoOUKI43PO0iab4BWkDLciZ0Sd -3bX9dhHzPIcqgMJlZz78V3lKdUHHfokXOSOSzA1Ji4R5LMGyiE1xfFYPD3wl43FP -asBoWX+0bh0jrSStCl7OgB43TFXJ5k3Fv6Qt/2buy0GzUuV1p4ag33a99CVFVKGw -jom4m5ujs7gnYQ3+ixzlhilZ6O1jBaP4H5jHJyUpt22QuRczOISnj7FV/KJ6lk4n -OQdx3LQCmb2NrcwzrpdSVwXHjmwFEVhKLoEsd0wtQGSl3Tm4SS2naGBX+Ju/c5gv -iqZ/dQKCAQAzDJcByUkKgZgpdZcXjvcKdWhnvgek8mgVCLjkHmGexSQEU7J/twTa -loGLOWPiAiJdEASF5BIKoxB4jsAYvDxbEJWh27TrJHCewYaP7X1G1rCFXnRkZ0BZ -YCMIWWqo3Qx/TKUOACaWz+GStf9qDHFwGUpFmXVgcJK0Cjy5c36PM3ImHcFaXKg4 -7VSK7hclr9fpEexedXczeKiWK/GQahp0CWj07K9+jGZ1mix0l3/dvs++ZZ8EsW1u -t5RVP9eMbxfPO42+u/Pq1xVUs08DcjG8auRvhcaPmL5y+oakSR4RUa/uof+7GLx4 -eQAIalsjFFEPoNk//69hODvySEtWA2UfAoIBACGXYc0SuE9m2KxnxLiy4yEvDbw1 -3KO9Gwv+0iRaeCizdCTwaSu/weQrw9ddpfmeqdGhwsvH1S5WyFqtwsjS7abdj4cg -KJ3nuR1EDInFQcu9ii+T8MSTc64cPkJVIYHwYiwE2Whj+6F7KFc1mf33/zrivruT -6Mm1YJv11KkBDAaM4Bj37DQfCrYh6quxczCT827YX7Wuw9YGQZYZh/xzss0Tkfzm -LgHriX+8U7+rL24Fi+merhDhjO95NVkRSIDmg+pULaWkeDOyVxfLCIMmy7JByHW4 -fyDr/w1dfkx/yiV0xvkrfT+sOFmnMjfgMwmit3tfm7zkmkzNfmASugDPWjA= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/nats-io/go-nats/test/configs/certs/key.pem b/vendor/github.com/nats-io/go-nats/test/configs/certs/key.pem deleted file mode 100644 index 113a87e1a..000000000 --- a/vendor/github.com/nats-io/go-nats/test/configs/certs/key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKgIBAAKCAgEAtgHLcgRjeSqV/mHa8S2T0IHhWe0AP55pVzdj3G4UcniTRJyy -PCtgfdhzBBbR8Ok5AIjTXTZihBPu08IFP6sLTDWYzzbRlIpL/LZIgr1wzosdaRRt -BxZ95ov67PYcHeNMSby2YQQVMsEkUxsylSy+MDkYuoZRGzCw2NgSXwz3BLUERPDZ -754IVrjDGr2gYen8OCHS9mCUfNAvmiwSlFy3VppCjo6NbNlzUKDHhLGYw6gxYXwF -DOU7tqKRtkQnGTTdMgU2mH9rMm3ua+Iyx5bvaY/5tf2yb/xuwg2JiAkwzYcDKMiA -VUxdfwBh8QULjCjNiWguqfTLL1N2OHIZuxSODTJN3iUD0uQYqugF1jV2s9J6Tk2P -1uvbtQYYZ9TZ10APnFgEh54Vj7eepJPzryghcH+bU/vWny2mSC6PH9Goqvee86oE -eLOahBpZmw8Ldf8lzg29UeKGm43M3+7UPmbEaHGzH5GqesiSFLQio2uiSCA9lrO6 -CYee133keBNvcmmNjdEYRhcBA2v6ZkZQJz4JW7SaEVfEAxlx9WnmcODiEoeJpG/Q -pxqoGaefwAHnDkWJOmnNRtE/TPPsaTCt26XBHpzYRvnvn7/TbZNuALHwH1IfjMlF -OPma2srnp4WBNye5cH5idZo/v/uqYohnPGt3dQO+fNpuGcyKIgru8vyqI5MCAwEA -AQKCAgEAl6zBNUAxAW2a2AYGZgx8bTt/Z+hY16uUz8jqIG1f/tE6sOgApKHlZJp3 -pwW5aRGCnk5oDfrfeH///Fpo81kALj9QHAbr+uSRVIU3wjRLCOTn2oTaIxj8TJ+E -ueqTHdko3x4zwn+bhtNsCRHWQnip+hfq4q5Ccu1Nwze1f56XUEXly+oHRGenPVX1 -yZgTSuWqecC+RPHRbH413T4zMY5efv5IzvI/K2G/doa2Hn+99fd5R2sJ7mguLhIm -agU7rAbg+ulbSRSOadUw5pj3hlrjI06HY8GK7UYpqu+LGGHIWM7VtCv6vprII6lW -9Xsl12S9fG/ky1+j38mm8H0tsjj78t2L6ZDS2Fb9usbM5VhdQfQpTBTSfAEZPeus -X2QTpTXnp5oHM7CzcQuGE25CruSHEJPy/Y0hTaunNBQ9VY6M/Pcq0sB0xAa0hN5H -PqOae1/fNKR/7iwdptesNGguZoLnNd1yeVBdZ55SZw7+9hjIPAjn3iLNqfieSpXL -5lG+Z0JEUMW0f1MRmU9AsR2x4Dlpvulrn39Oc5vgc0JP+r7+MMpY5BpWS5WhTxqm -tx1qh49yXFXIIEXqxjIIxQ3NO1del8QNDUGROnqlh5gFRADIcJpZMv8uAhSHEXm3 -+3PndJoCIfNv9gE8zNsB3r3PPgelG3wagy/eDe59PH0JvUmTWZkCggEBANxBkHAT -LB5hkp3hAwmop62HgkG8k6Ht11q2qGgkO/EhfsgsZXTpI3LZZ3Nrf+5IZiwStloW -iZwY/xocGL6tIFcuXHRqDDDPNRFUVxhSdcQd2mL7R6uin9eJ4ccQdaOXplQXOXFG -G7wAIhfGR7JnyzS1+eKItdFYrU63BeavPLltE4GV4pFJIFXEXc3v87j/Ba9uIop1 -/zytEn37yzDxdptH0HYtCm4Ve17n0STwvf9Le7b3ZFbs/cj3akAoSOTy/bYKNZl4 -EtaT0T7AGr8qJIaAlUYtva30+sQ2ytXHOdjkKD38xTN2oXoHgAfn7wIinzM+rbGi -d6FFIiARlp1g0O0CggEBANOLMJSvNeMxlM+8LJ0xo2J20Lk+1EGyb0+Ltp6jkrRW -SPCvnNC7Ww6L6tRfCvatnb0qTvfR/HfM1oE2e2Q2QL+hZoZyxXEiZHd/ERyAj398 -uImSz8bkRPWzPZU0wqYO621MEdY+fPcQfZDMBlcA25cFlvuiCRoeRQ1DIREDKMMG -Cnhbvv0f2J7e9rVAIqrTRtxKaRAIwU4YVIG2ymwWA+P/3/NFlYC344MGfoeum0NI -qazULaAVKE99jV3sYC2twcrGgXel/OSGCX33WCVsQKIhIOGDib1KzyJHTBr+D8Tu -rbO4fmyJtUpKC+XCIXto7ebbo0sVE2+7dp5ofBhCtn8CggEBALvBABkpnsA/OLZw -qyA+rsET9IuI7uhoUN25OxGbYaWJggOtJMdmPZuXi8It7x32hXIoeV2OPLvd6wgc -z1MrTZhDovhxtfadi4U8Ogo3sL//Grypq0y6EjuwA9CnTUCo81ZXfdX7h4TZMDbI -BTIlnGlQfrUHCMZuKz4gcl1VIBSI0Mn0NPDYP0IdZEE6vK4EZppG7hbNw0e72Tmf -vHP6QbrYmvFCL9PraAFc50HwHmZTuCAd/2DCIQyBLAeIz6qrIG9fgJVUb+qOkx5E -sAgpKn2lepoaP8jcPi+o7XsSm1MyGsPMh2X5SGk3n4IdyfYuATuzwGjeL9A/mHlx -xMxfTXkCggEAGYuTYEEQNtFD8Rn+ITVfT4KdjeEibJSJkIeEk/+YtaI9yKLMQwB8 -7HLE9sRLZKJui+tSAecfn6/ir1PO7rkGdJ2e7dlqMlE+5Jc5j8GOkoyTFDngUVo7 -YZg1dZEbeEYQ8+/dr4t4N7WMFDIvCc6WtdP8+YIFq1vAZuuWUKGbCIHwPbyGgbaY -yAaQsC6AgTRmOC/cJA2Kmk2h1tAl/YtjCONbPdtHRHXwSWA9Y1EYerWJl88/ezdS -2NaGfbMPojR7VGtIMxSeR1JQTx/RSyOZYnqxp8nkljE0diU58YCAkv1niG5dBepT -NBdg/GvG80omgFxBic2PvUxb9KEVazCTLQKCAQEAwx3aNk2lMovLzuMRqj2O7rqs -4usiHDllR1S7vAySUqhBaL8l+y1lsulgCDExClt3SQpsaM5xep1sK5jN8REzKsE9 -xBgXkNRgy+/1VGa1Tx0DR6xLoAIYT7Ttm27kellAFLE1tEFsSdZP9ZcfwjYKQEuu -Bsm4zf5duDb+hLraxK9ISqcc8ZUSlCLkj9GdhLwf+/8C81LXkS2ScR8Edumn8qe7 -IYqqWSYqKhaoqmx6sr8E0SIn6PKd7uXZnXTTxTf6AR1RNzFcStIL5lC06V6Savpa -tSX2voU3DgUIDYrYUhDweukR8i+0nrkR8wRUUjxaAeegUIRHN5ffpk57lQNaNg== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/nats-io/go-nats/test/configs/certs/server.pem b/vendor/github.com/nats-io/go-nats/test/configs/certs/server.pem deleted file mode 100644 index 46bc9133c..000000000 --- a/vendor/github.com/nats-io/go-nats/test/configs/certs/server.pem +++ /dev/null @@ -1,31 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFQTCCAymgAwIBAgIJAO+k4G7bNTyoMA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD -VQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xEzAR -BgNVBAoTCkFwY2VyYSBJbmMxEDAOBgNVBAsTB25hdHMuaW8xEjAQBgNVBAMTCWxv -Y2FsaG9zdDEcMBoGCSqGSIb3DQEJARYNZGVyZWtAbmF0cy5pbzAeFw0xNTExMDUy -MzA2MzRaFw0xOTExMDQyMzA2MzRaMBQxEjAQBgNVBAMTCWxvY2FsaG9zdDCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALYBy3IEY3kqlf5h2vEtk9CB4Vnt -AD+eaVc3Y9xuFHJ4k0ScsjwrYH3YcwQW0fDpOQCI0102YoQT7tPCBT+rC0w1mM82 -0ZSKS/y2SIK9cM6LHWkUbQcWfeaL+uz2HB3jTEm8tmEEFTLBJFMbMpUsvjA5GLqG -URswsNjYEl8M9wS1BETw2e+eCFa4wxq9oGHp/Dgh0vZglHzQL5osEpRct1aaQo6O -jWzZc1Cgx4SxmMOoMWF8BQzlO7aikbZEJxk03TIFNph/azJt7mviMseW72mP+bX9 -sm/8bsINiYgJMM2HAyjIgFVMXX8AYfEFC4wozYloLqn0yy9TdjhyGbsUjg0yTd4l -A9LkGKroBdY1drPSek5Nj9br27UGGGfU2ddAD5xYBIeeFY+3nqST868oIXB/m1P7 -1p8tpkgujx/RqKr3nvOqBHizmoQaWZsPC3X/Jc4NvVHihpuNzN/u1D5mxGhxsx+R -qnrIkhS0IqNrokggPZazugmHntd95HgTb3JpjY3RGEYXAQNr+mZGUCc+CVu0mhFX -xAMZcfVp5nDg4hKHiaRv0KcaqBmnn8AB5w5FiTppzUbRP0zz7GkwrdulwR6c2Eb5 -75+/022TbgCx8B9SH4zJRTj5mtrK56eFgTcnuXB+YnWaP7/7qmKIZzxrd3UDvnza -bhnMiiIK7vL8qiOTAgMBAAGjHjAcMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA -ATANBgkqhkiG9w0BAQsFAAOCAgEAOrh8XfW6quwBAcCxHf6/uvu/iNq4yHCg2qH6 -VtWs/x38I2t3BRSNsLsJGieh6yLlZDzOus+XYui4uDE50XmcwaIsY0VcXnvdyZVZ -w9+lMyfp00kRF1o3B6eVxq0pRE5VB0cai7XI7tyfpRwGzA+oNLF4vBvxAHm9Ony5 -Q57DC/HFzyUogdkMYciO/kd9oa4HosDEXwaE8UvZUL8OVl/dptMXLL/GGwzZsUAE -1sLAbgm044YChLUDzgBAtDTkB/HNkcPzSKwULuskhe7ndoaEQNXVZuP7quGiZ/W1 -1lE59gnmnyG8ySFCL05jHrKLtFAJe88gQjgDK65ZJv4W/k7ocmT+HhCxWyQWcX6v -abJ0EssqeSQuzRMuZebMJJ8s46d6RcYuMdIX3RDXq+1moJDFopE7lgNrlRhWgaky -Og8f/u8s1j75tk1YaYcY9uBKjKk7f681R9wMumkd6IEmEvkUwHNFsctxi4fGI7h1 -PRdKL0DlhVmnpHlKs6Kvm2sJ3twSAGSrC4u0LuxACeR3XbiBfyhFV/291LSuw/y1 -JtWOW5koh0g1k9xtkiu3/ePVdG/CLp796IyRhdB1jP/vD7W5RLLG/VAlomfjsPsB -AnwFYbVZ8KrmMKYUpTJOH31CRzFdOB6nWqXu5tk3nOtLKo1nIOuVtmp9XLz3VtHe -NiZPnqA= ------END CERTIFICATE----- diff --git a/vendor/github.com/nats-io/go-nats/test/configs/tls.conf b/vendor/github.com/nats-io/go-nats/test/configs/tls.conf deleted file mode 100644 index 06394b1b9..000000000 --- a/vendor/github.com/nats-io/go-nats/test/configs/tls.conf +++ /dev/null @@ -1,17 +0,0 @@ - -# Simple TLS config file - -port: 4443 -net: localhost # net interface - -tls { - cert_file: "./configs/certs/server.pem" - key_file: "./configs/certs/key.pem" - timeout: 2 -} - -authorization { - user: derek - password: buckley - timeout: 1 -} diff --git a/vendor/github.com/nats-io/go-nats/test/configs/tlsverify.conf b/vendor/github.com/nats-io/go-nats/test/configs/tlsverify.conf deleted file mode 100644 index 29e6a77f7..000000000 --- a/vendor/github.com/nats-io/go-nats/test/configs/tlsverify.conf +++ /dev/null @@ -1,17 +0,0 @@ - -# Simple TLS config file - -port: 4443 -net: localhost - -tls { - cert_file: "./configs/certs/server.pem" - key_file: "./configs/certs/key.pem" - timeout: 2 - - # Optional certificate authority for clients - ca_file: "./configs/certs/ca.pem" - - # Require a client certificate - verify: true -} diff --git a/vendor/github.com/nats-io/go-nats/test/conn_test.go b/vendor/github.com/nats-io/go-nats/test/conn_test.go deleted file mode 100644 index 68b824751..000000000 --- a/vendor/github.com/nats-io/go-nats/test/conn_test.go +++ /dev/null @@ -1,1297 +0,0 @@ -package test - -import ( - "bufio" - "bytes" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "net" - "runtime" - "strconv" - "strings" - "sync/atomic" - "testing" - "time" - - "github.com/nats-io/go-nats" -) - -func TestDefaultConnection(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - nc.Close() -} - -func TestConnectionStatus(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - if nc.Status() != nats.CONNECTED { - t.Fatal("Should have status set to CONNECTED") - } - if !nc.IsConnected() { - t.Fatal("Should have status set to CONNECTED") - } - nc.Close() - if nc.Status() != nats.CLOSED { - t.Fatal("Should have status set to CLOSED") - } - if !nc.IsClosed() { - t.Fatal("Should have status set to CLOSED") - } -} - -func TestConnClosedCB(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ch := make(chan bool) - o := nats.DefaultOptions - o.Url = nats.DefaultURL - o.ClosedCB = func(_ *nats.Conn) { - ch <- true - } - nc, err := o.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - nc.Close() - if e := Wait(ch); e != nil { - t.Fatalf("Closed callback not triggered\n") - } -} - -func TestCloseDisconnectedCB(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ch := make(chan bool) - o := nats.DefaultOptions - o.Url = nats.DefaultURL - o.AllowReconnect = false - o.DisconnectedCB = func(_ *nats.Conn) { - ch <- true - } - nc, err := o.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - nc.Close() - if e := Wait(ch); e != nil { - t.Fatal("Disconnected callback not triggered") - } -} - -func TestServerStopDisconnectedCB(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ch := make(chan bool) - o := nats.DefaultOptions - o.Url = nats.DefaultURL - o.AllowReconnect = false - o.DisconnectedCB = func(nc *nats.Conn) { - ch <- true - } - nc, err := o.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - defer nc.Close() - - s.Shutdown() - if e := Wait(ch); e != nil { - t.Fatalf("Disconnected callback not triggered\n") - } -} - -func TestServerSecureConnections(t *testing.T) { - s, opts := RunServerWithConfig("./configs/tls.conf") - defer s.Shutdown() - - endpoint := fmt.Sprintf("%s:%d", opts.Host, opts.Port) - secureURL := fmt.Sprintf("nats://%s:%s@%s/", opts.Username, opts.Password, endpoint) - - // Make sure this succeeds - nc, err := nats.Connect(secureURL, nats.Secure()) - if err != nil { - t.Fatalf("Failed to create secure (TLS) connection: %v", err) - } - defer nc.Close() - - omsg := []byte("Hello World") - checkRecv := make(chan bool) - - received := 0 - nc.Subscribe("foo", func(m *nats.Msg) { - received += 1 - if !bytes.Equal(m.Data, omsg) { - t.Fatal("Message received does not match") - } - checkRecv <- true - }) - err = nc.Publish("foo", omsg) - if err != nil { - t.Fatalf("Failed to publish on secure (TLS) connection: %v", err) - } - nc.Flush() - - if err := Wait(checkRecv); err != nil { - t.Fatal("Failed receiving message") - } - - nc.Close() - - // Server required, but not requested. - nc, err = nats.Connect(secureURL) - if err == nil || nc != nil || err != nats.ErrSecureConnRequired { - if nc != nil { - nc.Close() - } - t.Fatal("Should have failed to create secure (TLS) connection") - } - - // Test flag mismatch - // Wanted but not available.. - ds := RunDefaultServer() - defer ds.Shutdown() - - nc, err = nats.Connect(nats.DefaultURL, nats.Secure()) - if err == nil || nc != nil || err != nats.ErrSecureConnWanted { - if nc != nil { - nc.Close() - } - t.Fatalf("Should have failed to create connection: %v", err) - } - - // Let's be more TLS correct and verify servername, endpoint etc. - // Now do more advanced checking, verifying servername and using rootCA. - // Setup our own TLSConfig using RootCA from our self signed cert. - rootPEM, err := ioutil.ReadFile("./configs/certs/ca.pem") - if err != nil || rootPEM == nil { - t.Fatalf("failed to read root certificate") - } - pool := x509.NewCertPool() - ok := pool.AppendCertsFromPEM([]byte(rootPEM)) - if !ok { - t.Fatal("failed to parse root certificate") - } - - tls1 := &tls.Config{ - ServerName: opts.Host, - RootCAs: pool, - MinVersion: tls.VersionTLS12, - } - - nc, err = nats.Connect(secureURL, nats.Secure(tls1)) - if err != nil { - t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err) - } - defer nc.Close() - - tls2 := &tls.Config{ - ServerName: "OtherHostName", - RootCAs: pool, - MinVersion: tls.VersionTLS12, - } - - nc2, err := nats.Connect(secureURL, nats.Secure(tls1, tls2)) - if err == nil { - nc2.Close() - t.Fatal("Was expecting an error!") - } -} - -func TestClientCertificate(t *testing.T) { - - s, opts := RunServerWithConfig("./configs/tlsverify.conf") - defer s.Shutdown() - - endpoint := fmt.Sprintf("%s:%d", opts.Host, opts.Port) - secureURL := fmt.Sprintf("nats://%s", endpoint) - - // Make sure this fails - nc, err := nats.Connect(secureURL, nats.Secure()) - if err == nil { - nc.Close() - t.Fatal("Sould have failed (TLS) connection without client certificate") - } - - // Check parameters validity - nc, err = nats.Connect(secureURL, nats.ClientCert("", "")) - if err == nil { - nc.Close() - t.Fatal("Sould have failed due to invalid parameters") - } - - // Should fail because wrong key - nc, err = nats.Connect(secureURL, - nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/key.pem")) - if err == nil { - nc.Close() - t.Fatal("Sould have failed due to invalid key") - } - - // Should fail because no CA - nc, err = nats.Connect(secureURL, - nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem")) - if err == nil { - nc.Close() - t.Fatal("Sould have failed due to missing ca") - } - - nc, err = nats.Connect(secureURL, - nats.RootCAs("./configs/certs/ca.pem"), - nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem")) - if err != nil { - t.Fatalf("Failed to create (TLS) connection: %v", err) - } - defer nc.Close() - - omsg := []byte("Hello!") - checkRecv := make(chan bool) - - received := 0 - nc.Subscribe("foo", func(m *nats.Msg) { - received += 1 - if !bytes.Equal(m.Data, omsg) { - t.Fatal("Message received does not match") - } - checkRecv <- true - }) - err = nc.Publish("foo", omsg) - if err != nil { - t.Fatalf("Failed to publish on secure (TLS) connection: %v", err) - } - nc.Flush() - - if err := Wait(checkRecv); err != nil { - t.Fatal("Failed to receive message") - } -} - -func TestServerTLSHintConnections(t *testing.T) { - s, opts := RunServerWithConfig("./configs/tls.conf") - defer s.Shutdown() - - endpoint := fmt.Sprintf("%s:%d", opts.Host, opts.Port) - secureURL := fmt.Sprintf("tls://%s:%s@%s/", opts.Username, opts.Password, endpoint) - - nc, err := nats.Connect(secureURL, nats.RootCAs("./configs/certs/badca.pem")) - if err == nil { - t.Fatal("Expected an error from bad RootCA file") - } - - nc, err = nats.Connect(secureURL, nats.RootCAs("./configs/certs/ca.pem")) - if err != nil { - t.Fatal("Failed to create secure (TLS) connection", err) - } - defer nc.Close() -} - -func TestClosedConnections(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - sub, _ := nc.SubscribeSync("foo") - if sub == nil { - t.Fatal("Failed to create valid subscription") - } - - // Test all API endpoints do the right thing with a closed connection. - nc.Close() - if err := nc.Publish("foo", nil); err != nats.ErrConnectionClosed { - t.Fatalf("Publish on closed conn did not fail properly: %v\n", err) - } - if err := nc.PublishMsg(&nats.Msg{Subject: "foo"}); err != nats.ErrConnectionClosed { - t.Fatalf("PublishMsg on closed conn did not fail properly: %v\n", err) - } - if err := nc.Flush(); err != nats.ErrConnectionClosed { - t.Fatalf("Flush on closed conn did not fail properly: %v\n", err) - } - _, err := nc.Subscribe("foo", nil) - if err != nats.ErrConnectionClosed { - t.Fatalf("Subscribe on closed conn did not fail properly: %v\n", err) - } - _, err = nc.SubscribeSync("foo") - if err != nats.ErrConnectionClosed { - t.Fatalf("SubscribeSync on closed conn did not fail properly: %v\n", err) - } - _, err = nc.QueueSubscribe("foo", "bar", nil) - if err != nats.ErrConnectionClosed { - t.Fatalf("QueueSubscribe on closed conn did not fail properly: %v\n", err) - } - _, err = nc.Request("foo", []byte("help"), 10*time.Millisecond) - if err != nats.ErrConnectionClosed { - t.Fatalf("Request on closed conn did not fail properly: %v\n", err) - } - if _, err = sub.NextMsg(10); err != nats.ErrConnectionClosed { - t.Fatalf("NextMessage on closed conn did not fail properly: %v\n", err) - } - if err = sub.Unsubscribe(); err != nats.ErrConnectionClosed { - t.Fatalf("Unsubscribe on closed conn did not fail properly: %v\n", err) - } -} - -func TestErrOnConnectAndDeadlock(t *testing.T) { - // We will hand run a fake server that will timeout and not return a proper - // INFO proto. This is to test that we do not deadlock. Issue #18 - - l, e := net.Listen("tcp", ":0") - if e != nil { - t.Fatal("Could not listen on an ephemeral port") - } - tl := l.(*net.TCPListener) - defer tl.Close() - - addr := tl.Addr().(*net.TCPAddr) - - go func() { - conn, err := l.Accept() - if err != nil { - t.Fatalf("Error accepting client connection: %v\n", err) - } - defer conn.Close() - // Send back a mal-formed INFO. - conn.Write([]byte("INFOZ \r\n")) - }() - - // Used to synchronize - ch := make(chan bool) - - go func() { - natsURL := fmt.Sprintf("nats://localhost:%d/", addr.Port) - nc, err := nats.Connect(natsURL) - if err == nil { - nc.Close() - t.Fatal("Expected bad INFO err, got none") - } - ch <- true - }() - - // Setup a timer to watch for deadlock - select { - case <-ch: - break - case <-time.After(time.Second): - t.Fatalf("Connect took too long, deadlock?") - } -} - -func TestMoreErrOnConnect(t *testing.T) { - l, e := net.Listen("tcp", "127.0.0.1:0") - if e != nil { - t.Fatal("Could not listen on an ephemeral port") - } - tl := l.(*net.TCPListener) - defer tl.Close() - - addr := tl.Addr().(*net.TCPAddr) - - done := make(chan bool) - case1 := make(chan bool) - case2 := make(chan bool) - case3 := make(chan bool) - case4 := make(chan bool) - - go func() { - for i := 0; i < 5; i++ { - conn, err := l.Accept() - if err != nil { - t.Fatalf("Error accepting client connection: %v\n", err) - } - switch i { - case 0: - // Send back a partial INFO and close the connection. - conn.Write([]byte("INFO")) - case 1: - // Send just INFO - conn.Write([]byte("INFO\r\n")) - // Stick around a bit - <-case1 - case 2: - info := fmt.Sprintf("INFO {\"server_id\":\"foobar\",\"version\":\"0.7.3\",\"go\":\"go1.5.1\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"ssl_required\":false,\"max_payload\":1048576}\r\n", addr.IP, addr.Port) - // Send complete INFO - conn.Write([]byte(info)) - // Read connect and ping commands sent from the client - br := bufio.NewReaderSize(conn, 1024) - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected CONNECT from client, got: %s", err) - } - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected PING from client, got: %s", err) - } - // Client expect +OK, send it but then something else than PONG - conn.Write([]byte("+OK\r\n")) - // Stick around a bit - <-case2 - case 3: - info := fmt.Sprintf("INFO {\"server_id\":\"foobar\",\"version\":\"0.7.3\",\"go\":\"go1.5.1\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"ssl_required\":false,\"max_payload\":1048576}\r\n", addr.IP, addr.Port) - // Send complete INFO - conn.Write([]byte(info)) - // Read connect and ping commands sent from the client - br := bufio.NewReaderSize(conn, 1024) - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected CONNECT from client, got: %s", err) - } - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected PING from client, got: %s", err) - } - // Client expect +OK, send it but then something else than PONG - conn.Write([]byte("+OK\r\nXXX\r\n")) - // Stick around a bit - <-case3 - case 4: - info := fmt.Sprintf("INFO {'x'}\r\n") - // Send INFO with JSON marshall error - conn.Write([]byte(info)) - // Stick around a bit - <-case4 - } - - conn.Close() - } - - // Hang around until asked to quit - <-done - }() - - natsURL := fmt.Sprintf("nats://localhost:%d", addr.Port) - - if nc, err := nats.Connect(natsURL, nats.Timeout(20*time.Millisecond)); err == nil { - nc.Close() - t.Fatal("Expected error, got none") - } - - if nc, err := nats.Connect(natsURL, nats.Timeout(20*time.Millisecond)); err == nil { - close(case1) - nc.Close() - t.Fatal("Expected error, got none") - } - - close(case1) - - opts := nats.DefaultOptions - opts.Servers = []string{natsURL} - opts.Timeout = 20 * time.Millisecond - opts.Verbose = true - - if nc, err := opts.Connect(); err == nil { - close(case2) - nc.Close() - t.Fatal("Expected error, got none") - } - - close(case2) - - if nc, err := opts.Connect(); err == nil { - close(case3) - nc.Close() - t.Fatal("Expected error, got none") - } - - close(case3) - - if nc, err := opts.Connect(); err == nil { - close(case4) - nc.Close() - t.Fatal("Expected error, got none") - } - - close(case4) - - close(done) -} - -func TestErrOnMaxPayloadLimit(t *testing.T) { - expectedMaxPayload := int64(10) - serverInfo := "INFO {\"server_id\":\"foobar\",\"version\":\"0.6.6\",\"go\":\"go1.5.1\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"ssl_required\":false,\"max_payload\":%d}\r\n" - - l, e := net.Listen("tcp", "127.0.0.1:0") - if e != nil { - t.Fatal("Could not listen on an ephemeral port") - } - tl := l.(*net.TCPListener) - defer tl.Close() - - addr := tl.Addr().(*net.TCPAddr) - - // Send back an INFO message with custom max payload size on connect. - var conn net.Conn - var err error - - go func() { - conn, err = l.Accept() - if err != nil { - t.Fatalf("Error accepting client connection: %v\n", err) - } - defer conn.Close() - info := fmt.Sprintf(serverInfo, addr.IP, addr.Port, expectedMaxPayload) - conn.Write([]byte(info)) - - // Read connect and ping commands sent from the client - line := make([]byte, 111) - _, err := conn.Read(line) - if err != nil { - t.Fatalf("Expected CONNECT and PING from client, got: %s", err) - } - conn.Write([]byte("PONG\r\n")) - // Hang around a bit to not err on EOF in client. - time.Sleep(250 * time.Millisecond) - }() - - // Wait for server mock to start - time.Sleep(100 * time.Millisecond) - - natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) - opts := nats.DefaultOptions - opts.Servers = []string{natsURL} - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Expected INFO message with custom max payload, got: %s", err) - } - defer nc.Close() - - got := nc.MaxPayload() - if got != expectedMaxPayload { - t.Fatalf("Expected MaxPayload to be %d, got: %d", expectedMaxPayload, got) - } - err = nc.Publish("hello", []byte("hello world")) - if err != nats.ErrMaxPayload { - t.Fatalf("Expected to fail trying to send more than max payload, got: %s", err) - } - err = nc.Publish("hello", []byte("a")) - if err != nil { - t.Fatalf("Expected to succeed trying to send less than max payload, got: %s", err) - } -} - -func TestConnectVerbose(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - o := nats.DefaultOptions - o.Verbose = true - - nc, err := o.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - nc.Close() -} - -func isRunningInAsyncCBDispatcher() error { - var stacks []byte - - stacksSize := 10000 - - for { - stacks = make([]byte, stacksSize) - n := runtime.Stack(stacks, false) - if n == stacksSize { - stacksSize *= stacksSize - continue - } - break - } - - strStacks := string(stacks) - - if strings.Contains(strStacks, "asyncDispatch") { - return nil - } - - return errors.New(fmt.Sprintf("Callback not executed from dispatcher:\n %s\n", strStacks)) -} - -func TestCallbacksOrder(t *testing.T) { - authS, authSOpts := RunServerWithConfig("./configs/tls.conf") - defer authS.Shutdown() - - s := RunDefaultServer() - defer s.Shutdown() - - firstDisconnect := true - dtime1 := time.Time{} - dtime2 := time.Time{} - rtime := time.Time{} - atime1 := time.Time{} - atime2 := time.Time{} - ctime := time.Time{} - - cbErrors := make(chan error, 20) - - reconnected := make(chan bool) - closed := make(chan bool) - asyncErr := make(chan bool, 2) - recvCh := make(chan bool, 2) - recvCh1 := make(chan bool) - recvCh2 := make(chan bool) - - dch := func(nc *nats.Conn) { - if err := isRunningInAsyncCBDispatcher(); err != nil { - cbErrors <- err - return - } - time.Sleep(100 * time.Millisecond) - if firstDisconnect { - firstDisconnect = false - dtime1 = time.Now() - } else { - dtime2 = time.Now() - } - } - - rch := func(nc *nats.Conn) { - if err := isRunningInAsyncCBDispatcher(); err != nil { - cbErrors <- err - reconnected <- true - return - } - time.Sleep(50 * time.Millisecond) - rtime = time.Now() - reconnected <- true - } - - ech := func(nc *nats.Conn, sub *nats.Subscription, err error) { - if err := isRunningInAsyncCBDispatcher(); err != nil { - cbErrors <- err - asyncErr <- true - return - } - if sub.Subject == "foo" { - time.Sleep(20 * time.Millisecond) - atime1 = time.Now() - } else { - atime2 = time.Now() - } - asyncErr <- true - } - - cch := func(nc *nats.Conn) { - if err := isRunningInAsyncCBDispatcher(); err != nil { - cbErrors <- err - closed <- true - return - } - ctime = time.Now() - closed <- true - } - - url := net.JoinHostPort(authSOpts.Host, strconv.Itoa(authSOpts.Port)) - url = "nats://" + url + "," + nats.DefaultURL - - nc, err := nats.Connect(url, - nats.DisconnectHandler(dch), - nats.ReconnectHandler(rch), - nats.ClosedHandler(cch), - nats.ErrorHandler(ech), - nats.ReconnectWait(50*time.Millisecond), - nats.DontRandomize()) - if err != nil { - t.Fatalf("Unable to connect: %v\n", err) - } - defer nc.Close() - - ncp, err := nats.Connect(nats.DefaultURL, - nats.ReconnectWait(50*time.Millisecond)) - if err != nil { - t.Fatalf("Unable to connect: %v\n", err) - } - defer ncp.Close() - - // Wait to make sure that if we have closed (incorrectly) the - // asyncCBDispatcher during the connect process, this is caught here. - time.Sleep(time.Second) - - s.Shutdown() - - s = RunDefaultServer() - defer s.Shutdown() - - if err := Wait(reconnected); err != nil { - t.Fatal("Did not get the reconnected callback") - } - - var sub1 *nats.Subscription - var sub2 *nats.Subscription - - recv := func(m *nats.Msg) { - // Signal that one message is received - recvCh <- true - - // We will now block - if m.Subject == "foo" { - <-recvCh1 - } else { - <-recvCh2 - } - m.Sub.Unsubscribe() - } - - sub1, err = nc.Subscribe("foo", recv) - if err != nil { - t.Fatalf("Unable to create subscription: %v\n", err) - } - sub1.SetPendingLimits(1, 100000) - - sub2, err = nc.Subscribe("bar", recv) - if err != nil { - t.Fatalf("Unable to create subscription: %v\n", err) - } - sub2.SetPendingLimits(1, 100000) - - nc.Flush() - - ncp.Publish("foo", []byte("test")) - ncp.Publish("bar", []byte("test")) - ncp.Flush() - - // Wait notification that message were received - err = Wait(recvCh) - if err == nil { - err = Wait(recvCh) - } - if err != nil { - t.Fatal("Did not receive message") - } - - for i := 0; i < 2; i++ { - ncp.Publish("foo", []byte("test")) - ncp.Publish("bar", []byte("test")) - } - ncp.Flush() - - if err := Wait(asyncErr); err != nil { - t.Fatal("Did not get the async callback") - } - if err := Wait(asyncErr); err != nil { - t.Fatal("Did not get the async callback") - } - - close(recvCh1) - close(recvCh2) - - nc.Close() - - if err := Wait(closed); err != nil { - t.Fatal("Did not get the close callback") - } - - if len(cbErrors) > 0 { - t.Fatalf("%v", <-cbErrors) - } - - if (dtime1 == time.Time{}) || (dtime2 == time.Time{}) || (rtime == time.Time{}) || (atime1 == time.Time{}) || (atime2 == time.Time{}) || (ctime == time.Time{}) { - t.Fatalf("Some callbacks did not fire:\n%v\n%v\n%v\n%v\n%v\n%v", dtime1, rtime, atime1, atime2, dtime2, ctime) - } - - if rtime.Before(dtime1) || dtime2.Before(rtime) || atime2.Before(atime1) || ctime.Before(atime2) { - t.Fatalf("Wrong callback order:\n%v\n%v\n%v\n%v\n%v\n%v", dtime1, rtime, atime1, atime2, dtime2, ctime) - } -} - -func TestFlushReleaseOnClose(t *testing.T) { - serverInfo := "INFO {\"server_id\":\"foobar\",\"version\":\"0.7.3\",\"go\":\"go1.5.1\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"ssl_required\":false,\"max_payload\":1048576}\r\n" - - l, e := net.Listen("tcp", "127.0.0.1:0") - if e != nil { - t.Fatal("Could not listen on an ephemeral port") - } - tl := l.(*net.TCPListener) - defer tl.Close() - - addr := tl.Addr().(*net.TCPAddr) - done := make(chan bool) - - go func() { - conn, err := l.Accept() - if err != nil { - t.Fatalf("Error accepting client connection: %v\n", err) - } - defer conn.Close() - info := fmt.Sprintf(serverInfo, addr.IP, addr.Port) - conn.Write([]byte(info)) - - // Read connect and ping commands sent from the client - br := bufio.NewReaderSize(conn, 1024) - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected CONNECT from client, got: %s", err) - } - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected PING from client, got: %s", err) - } - conn.Write([]byte("PONG\r\n")) - - // Hang around until asked to quit - <-done - }() - - // Wait for server mock to start - time.Sleep(100 * time.Millisecond) - - natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) - opts := nats.DefaultOptions - opts.AllowReconnect = false - opts.Servers = []string{natsURL} - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Expected INFO message with custom max payload, got: %s", err) - } - defer nc.Close() - - // First try a FlushTimeout() and make sure we timeout - if err := nc.FlushTimeout(50 * time.Millisecond); err == nil || err != nats.ErrTimeout { - t.Fatalf("Expected a timeout error, got: %v", err) - } - - go func() { - time.Sleep(50 * time.Millisecond) - nc.Close() - }() - - if err := nc.Flush(); err == nil { - t.Fatal("Expected error on Flush() released by Close()") - } - - close(done) -} - -func TestMaxPendingOut(t *testing.T) { - serverInfo := "INFO {\"server_id\":\"foobar\",\"version\":\"0.7.3\",\"go\":\"go1.5.1\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"ssl_required\":false,\"max_payload\":1048576}\r\n" - - l, e := net.Listen("tcp", "127.0.0.1:0") - if e != nil { - t.Fatal("Could not listen on an ephemeral port") - } - tl := l.(*net.TCPListener) - defer tl.Close() - - addr := tl.Addr().(*net.TCPAddr) - done := make(chan bool) - cch := make(chan bool) - - go func() { - conn, err := l.Accept() - if err != nil { - t.Fatalf("Error accepting client connection: %v\n", err) - } - defer conn.Close() - info := fmt.Sprintf(serverInfo, addr.IP, addr.Port) - conn.Write([]byte(info)) - - // Read connect and ping commands sent from the client - br := bufio.NewReaderSize(conn, 1024) - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected CONNECT from client, got: %s", err) - } - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected PING from client, got: %s", err) - } - conn.Write([]byte("PONG\r\n")) - - // Hang around until asked to quit - <-done - }() - - // Wait for server mock to start - time.Sleep(100 * time.Millisecond) - - natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) - opts := nats.DefaultOptions - opts.PingInterval = 20 * time.Millisecond - opts.MaxPingsOut = 2 - opts.AllowReconnect = false - opts.ClosedCB = func(_ *nats.Conn) { cch <- true } - opts.Servers = []string{natsURL} - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Expected INFO message with custom max payload, got: %s", err) - } - defer nc.Close() - - // After 60 ms, we should have closed the connection - time.Sleep(100 * time.Millisecond) - - if err := Wait(cch); err != nil { - t.Fatal("Failed to get ClosedCB") - } - if nc.LastError() != nats.ErrStaleConnection { - t.Fatalf("Expected to get %v, got %v", nats.ErrStaleConnection, nc.LastError()) - } - - close(done) -} - -func TestErrInReadLoop(t *testing.T) { - serverInfo := "INFO {\"server_id\":\"foobar\",\"version\":\"0.7.3\",\"go\":\"go1.5.1\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"ssl_required\":false,\"max_payload\":1048576}\r\n" - - l, e := net.Listen("tcp", "127.0.0.1:0") - if e != nil { - t.Fatal("Could not listen on an ephemeral port") - } - tl := l.(*net.TCPListener) - defer tl.Close() - - addr := tl.Addr().(*net.TCPAddr) - done := make(chan bool) - cch := make(chan bool) - - go func() { - conn, err := l.Accept() - if err != nil { - t.Fatalf("Error accepting client connection: %v\n", err) - } - defer conn.Close() - info := fmt.Sprintf(serverInfo, addr.IP, addr.Port) - conn.Write([]byte(info)) - - // Read connect and ping commands sent from the client - br := bufio.NewReaderSize(conn, 1024) - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected CONNECT from client, got: %s", err) - } - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected PING from client, got: %s", err) - } - conn.Write([]byte("PONG\r\n")) - - // Read (and ignore) the SUB from the client - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected SUB from client, got: %s", err) - } - - // Send something that should make the subscriber fail. - conn.Write([]byte("Ivan")) - - // Hang around until asked to quit - <-done - }() - - // Wait for server mock to start - time.Sleep(100 * time.Millisecond) - - natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) - opts := nats.DefaultOptions - opts.AllowReconnect = false - opts.ClosedCB = func(_ *nats.Conn) { cch <- true } - opts.Servers = []string{natsURL} - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Expected INFO message with custom max payload, got: %s", err) - } - defer nc.Close() - - received := int64(0) - - nc.Subscribe("foo", func(_ *nats.Msg) { - atomic.AddInt64(&received, 1) - }) - - if err := Wait(cch); err != nil { - t.Fatal("Failed to get ClosedCB") - } - - recv := int(atomic.LoadInt64(&received)) - if recv != 0 { - t.Fatalf("Should not have received messages, got: %d", recv) - } - - close(done) -} - -func TestErrStaleConnection(t *testing.T) { - serverInfo := "INFO {\"server_id\":\"foobar\",\"version\":\"0.7.3\",\"go\":\"go1.5.1\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"ssl_required\":false,\"max_payload\":1048576}\r\n" - - l, e := net.Listen("tcp", "127.0.0.1:0") - if e != nil { - t.Fatal("Could not listen on an ephemeral port") - } - tl := l.(*net.TCPListener) - defer tl.Close() - - addr := tl.Addr().(*net.TCPAddr) - done := make(chan bool) - dch := make(chan bool) - rch := make(chan bool) - cch := make(chan bool) - sch := make(chan bool) - - firstDisconnect := true - - go func() { - for i := 0; i < 2; i++ { - conn, err := l.Accept() - if err != nil { - t.Fatalf("Error accepting client connection: %v\n", err) - } - defer conn.Close() - info := fmt.Sprintf(serverInfo, addr.IP, addr.Port) - conn.Write([]byte(info)) - - // Read connect and ping commands sent from the client - br := bufio.NewReaderSize(conn, 1024) - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected CONNECT from client, got: %s", err) - } - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected PING from client, got: %s", err) - } - conn.Write([]byte("PONG\r\n")) - - if i == 0 { - // Wait a tiny, and simulate a Stale Connection - time.Sleep(50 * time.Millisecond) - conn.Write([]byte("-ERR 'Stale Connection'\r\n")) - - // The client should try to reconnect. When getting the - // disconnected callback, it will close this channel. - <-sch - - // Close the connection and go back to accept the new - // connection. - conn.Close() - } else { - // Hang around a bit - <-done - } - } - }() - - // Wait for server mock to start - time.Sleep(100 * time.Millisecond) - - natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) - opts := nats.DefaultOptions - opts.AllowReconnect = true - opts.DisconnectedCB = func(_ *nats.Conn) { - // Interested only in the first disconnect cb - if firstDisconnect { - firstDisconnect = false - close(sch) - dch <- true - } - } - opts.ReconnectedCB = func(_ *nats.Conn) { rch <- true } - opts.ClosedCB = func(_ *nats.Conn) { cch <- true } - opts.ReconnectWait = 20 * time.Millisecond - opts.MaxReconnect = 100 - opts.Servers = []string{natsURL} - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Expected INFO message with custom max payload, got: %s", err) - } - defer nc.Close() - - // We should first gets disconnected - if err := Wait(dch); err != nil { - t.Fatal("Failed to get DisconnectedCB") - } - - // Then reconneted.. - if err := Wait(rch); err != nil { - t.Fatal("Failed to get ReconnectedCB") - } - - // Now close the connection - nc.Close() - - // We should get the closed cb - if err := Wait(cch); err != nil { - t.Fatal("Failed to get ClosedCB") - } - - close(done) -} - -func TestServerErrorClosesConnection(t *testing.T) { - serverInfo := "INFO {\"server_id\":\"foobar\",\"version\":\"0.7.3\",\"go\":\"go1.5.1\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"ssl_required\":false,\"max_payload\":1048576}\r\n" - - l, e := net.Listen("tcp", "127.0.0.1:0") - if e != nil { - t.Fatal("Could not listen on an ephemeral port") - } - tl := l.(*net.TCPListener) - defer tl.Close() - - addr := tl.Addr().(*net.TCPAddr) - done := make(chan bool) - dch := make(chan bool) - cch := make(chan bool) - - serverSentError := "Any Error" - reconnected := int64(0) - - go func() { - conn, err := l.Accept() - if err != nil { - t.Fatalf("Error accepting client connection: %v\n", err) - } - defer conn.Close() - info := fmt.Sprintf(serverInfo, addr.IP, addr.Port) - conn.Write([]byte(info)) - - // Read connect and ping commands sent from the client - br := bufio.NewReaderSize(conn, 1024) - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected CONNECT from client, got: %s", err) - } - if _, err := br.ReadString('\n'); err != nil { - t.Fatalf("Expected PING from client, got: %s", err) - } - conn.Write([]byte("PONG\r\n")) - - // Wait a tiny, and simulate a Stale Connection - time.Sleep(50 * time.Millisecond) - conn.Write([]byte("-ERR '" + serverSentError + "'\r\n")) - - // Hang around a bit - <-done - }() - - // Wait for server mock to start - time.Sleep(100 * time.Millisecond) - - natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) - opts := nats.DefaultOptions - opts.AllowReconnect = true - opts.DisconnectedCB = func(_ *nats.Conn) { dch <- true } - opts.ReconnectedCB = func(_ *nats.Conn) { atomic.AddInt64(&reconnected, 1) } - opts.ClosedCB = func(_ *nats.Conn) { cch <- true } - opts.ReconnectWait = 20 * time.Millisecond - opts.MaxReconnect = 100 - opts.Servers = []string{natsURL} - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Expected INFO message with custom max payload, got: %s", err) - } - defer nc.Close() - - // The server sends an error that should cause the client to simply close - // the connection. - - // We should first gets disconnected - if err := Wait(dch); err != nil { - t.Fatal("Failed to get DisconnectedCB") - } - - // We should get the closed cb - if err := Wait(cch); err != nil { - t.Fatal("Failed to get ClosedCB") - } - - // We should not have been reconnected - if atomic.LoadInt64(&reconnected) != 0 { - t.Fatal("ReconnectedCB should not have been invoked") - } - - // Check LastError(), it should be "nats: " - lastErr := nc.LastError().Error() - expectedErr := "nats: " + strings.ToLower(serverSentError) - if lastErr != expectedErr { - t.Fatalf("Expected error: '%v', got '%v'", expectedErr, lastErr) - } - - close(done) -} - -func TestUseDefaultTimeout(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - opts := &nats.Options{ - Servers: []string{nats.DefaultURL}, - } - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Unexpected error on connect: %v", err) - } - defer nc.Close() - if nc.Opts.Timeout != nats.DefaultTimeout { - t.Fatalf("Expected Timeout to be set to %v, got %v", nats.DefaultTimeout, nc.Opts.Timeout) - } -} - -func TestNoRaceOnLastError(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - dch := func(c *nats.Conn) { - // Just access LastError to make sure that there is no race - if c.LastError() != nil { - if c.LastError().Error() == "" { - } - } - } - nc, err := nats.Connect(nats.DefaultURL, - nats.DisconnectHandler(dch), - nats.ReconnectWait(5*time.Millisecond)) - if err != nil { - t.Fatalf("Unable to connect: %v\n", err) - } - defer nc.Close() - - for i := 0; i < 10; i++ { - s.Shutdown() - time.Sleep(10 * time.Millisecond) - s = RunDefaultServer() - } - s.Shutdown() -} - -func TestUseCustomDialer(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - dialer := &net.Dialer{ - Timeout: 10 * time.Second, - DualStack: true, - } - opts := &nats.Options{ - Servers: []string{nats.DefaultURL}, - Dialer: dialer, - } - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Unexpected error on connect: %v", err) - } - defer nc.Close() - if nc.Opts.Dialer != dialer { - t.Fatalf("Expected Dialer to be set to %v, got %v", dialer, nc.Opts.Dialer) - } - - // Should be possible to set via variadic func based Option setter - dialer2 := &net.Dialer{ - Timeout: 5 * time.Second, - DualStack: true, - } - nc2, err := nats.Connect(nats.DefaultURL, nats.Dialer(dialer2)) - if err != nil { - t.Fatalf("Unexpected error on connect: %v", err) - } - defer nc2.Close() - if !nc2.Opts.Dialer.DualStack { - t.Fatalf("Expected for dialer to be customized to use dual stack support") - } - - // By default, dialer still uses the DefaultTimeout - nc3, err := nats.Connect(nats.DefaultURL) - if err != nil { - t.Fatalf("Unexpected error on connect: %v", err) - } - defer nc3.Close() - if nc3.Opts.Dialer.Timeout != nats.DefaultTimeout { - t.Fatalf("Expected DialTimeout to be set to %v, got %v", nats.DefaultTimeout, nc.Opts.Dialer.Timeout) - } -} diff --git a/vendor/github.com/nats-io/go-nats/test/netchan_test.go b/vendor/github.com/nats-io/go-nats/test/netchan_test.go deleted file mode 100644 index 88b66cb60..000000000 --- a/vendor/github.com/nats-io/go-nats/test/netchan_test.go +++ /dev/null @@ -1,355 +0,0 @@ -package test - -import ( - "runtime" - "testing" - "time" - - "github.com/nats-io/go-nats" -) - -func TestBadChan(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - if err := ec.BindSendChan("foo", "not a chan"); err == nil { - t.Fatalf("Expected an Error when sending a non-channel\n") - } - - if _, err := ec.BindRecvChan("foo", "not a chan"); err == nil { - t.Fatalf("Expected an Error when sending a non-channel\n") - } - - if err := ec.BindSendChan("foo", "not a chan"); err != nats.ErrChanArg { - t.Fatalf("Expected an ErrChanArg when sending a non-channel\n") - } - - if _, err := ec.BindRecvChan("foo", "not a chan"); err != nats.ErrChanArg { - t.Fatalf("Expected an ErrChanArg when sending a non-channel\n") - } -} - -func TestSimpleSendChan(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - recv := make(chan bool) - - numSent := int32(22) - ch := make(chan int32) - - if err := ec.BindSendChan("foo", ch); err != nil { - t.Fatalf("Failed to bind to a send channel: %v\n", err) - } - - ec.Subscribe("foo", func(num int32) { - if num != numSent { - t.Fatalf("Failed to receive correct value: %d vs %d\n", num, numSent) - } - recv <- true - }) - - // Send to 'foo' - ch <- numSent - - if e := Wait(recv); e != nil { - if ec.LastError() != nil { - e = ec.LastError() - } - t.Fatalf("Did not receive the message: %s", e) - } - close(ch) -} - -func TestFailedChannelSend(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - nc := ec.Conn - ch := make(chan bool) - wch := make(chan bool) - - nc.Opts.AsyncErrorCB = func(c *nats.Conn, s *nats.Subscription, e error) { - wch <- true - } - - if err := ec.BindSendChan("foo", ch); err != nil { - t.Fatalf("Failed to bind to a receive channel: %v\n", err) - } - - nc.Flush() - - go func() { - time.Sleep(100 * time.Millisecond) - nc.Close() - }() - - func() { - for { - select { - case ch <- true: - case <-wch: - return - case <-time.After(time.Second): - t.Fatal("Failed to get async error cb") - } - } - }() - - ec = NewEConn(t) - defer ec.Close() - - nc = ec.Conn - bch := make(chan []byte) - - nc.Opts.AsyncErrorCB = func(c *nats.Conn, s *nats.Subscription, e error) { - wch <- true - } - - if err := ec.BindSendChan("foo", bch); err != nil { - t.Fatalf("Failed to bind to a receive channel: %v\n", err) - } - - buf := make([]byte, 2*1024*1024) - bch <- buf - - if e := Wait(wch); e != nil { - t.Fatal("Failed to call async err handler") - } -} - -func TestSimpleRecvChan(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - numSent := int32(22) - ch := make(chan int32) - - if _, err := ec.BindRecvChan("foo", ch); err != nil { - t.Fatalf("Failed to bind to a receive channel: %v\n", err) - } - - ec.Publish("foo", numSent) - - // Receive from 'foo' - select { - case num := <-ch: - if num != numSent { - t.Fatalf("Failed to receive correct value: %d vs %d\n", num, numSent) - } - case <-time.After(1 * time.Second): - t.Fatalf("Failed to receive a value, timed-out\n") - } - close(ch) -} - -func TestQueueRecvChan(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - numSent := int32(22) - ch := make(chan int32) - - if _, err := ec.BindRecvQueueChan("foo", "bar", ch); err != nil { - t.Fatalf("Failed to bind to a queue receive channel: %v\n", err) - } - - ec.Publish("foo", numSent) - - // Receive from 'foo' - select { - case num := <-ch: - if num != numSent { - t.Fatalf("Failed to receive correct value: %d vs %d\n", num, numSent) - } - case <-time.After(1 * time.Second): - t.Fatalf("Failed to receive a value, timed-out\n") - } - close(ch) -} - -func TestDecoderErrRecvChan(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - nc := ec.Conn - wch := make(chan bool) - - nc.Opts.AsyncErrorCB = func(c *nats.Conn, s *nats.Subscription, e error) { - wch <- true - } - - ch := make(chan *int32) - - if _, err := ec.BindRecvChan("foo", ch); err != nil { - t.Fatalf("Failed to bind to a send channel: %v\n", err) - } - - ec.Publish("foo", "Hello World") - - if e := Wait(wch); e != nil { - t.Fatal("Failed to call async err handler") - } -} - -func TestRecvChanPanicOnClosedChan(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - ch := make(chan int) - - if _, err := ec.BindRecvChan("foo", ch); err != nil { - t.Fatalf("Failed to bind to a send channel: %v\n", err) - } - - close(ch) - ec.Publish("foo", 22) - ec.Flush() -} - -func TestRecvChanAsyncLeakGoRoutines(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - // Call this to make sure that we have everything setup connection wise - ec.Flush() - - before := runtime.NumGoroutine() - - ch := make(chan int) - - if _, err := ec.BindRecvChan("foo", ch); err != nil { - t.Fatalf("Failed to bind to a send channel: %v\n", err) - } - - // Close the receive Channel - close(ch) - - // The publish will trigger the close and shutdown of the Go routines - ec.Publish("foo", 22) - ec.Flush() - - time.Sleep(100 * time.Millisecond) - - delta := (runtime.NumGoroutine() - before) - - if delta > 0 { - t.Fatalf("Leaked Go routine(s) : %d, closing channel should have closed them\n", delta) - } -} - -func TestRecvChanLeakGoRoutines(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - // Call this to make sure that we have everything setup connection wise - ec.Flush() - - before := runtime.NumGoroutine() - - ch := make(chan int) - - sub, err := ec.BindRecvChan("foo", ch) - if err != nil { - t.Fatalf("Failed to bind to a send channel: %v\n", err) - } - sub.Unsubscribe() - - // Sleep a bit to wait for the Go routine to exit. - time.Sleep(500 * time.Millisecond) - - delta := (runtime.NumGoroutine() - before) - - if delta > 0 { - t.Fatalf("Leaked Go routine(s) : %d, closing channel should have closed them\n", delta) - } -} - -func TestRecvChanMultipleMessages(t *testing.T) { - // Make sure we can receive more than one message. - // In response to #25, which is a bug from fixing #22. - - s := RunDefaultServer() - defer s.Shutdown() - - ec := NewEConn(t) - defer ec.Close() - - // Num to send, should == len of messages queued. - size := 10 - - ch := make(chan int, size) - - if _, err := ec.BindRecvChan("foo", ch); err != nil { - t.Fatalf("Failed to bind to a send channel: %v\n", err) - } - - for i := 0; i < size; i++ { - ec.Publish("foo", 22) - } - ec.Flush() - time.Sleep(10 * time.Millisecond) - - if lch := len(ch); lch != size { - t.Fatalf("Expected %d messages queued, got %d.", size, lch) - } -} - -func BenchmarkPublishSpeedViaChan(b *testing.B) { - b.StopTimer() - - s := RunDefaultServer() - defer s.Shutdown() - - nc, err := nats.Connect(nats.DefaultURL) - if err != nil { - b.Fatalf("Could not connect: %v\n", err) - } - ec, err := nats.NewEncodedConn(nc, nats.DEFAULT_ENCODER) - if err != nil { - b.Fatalf("Failed creating encoded connection: %v\n", err) - } - defer ec.Close() - - ch := make(chan int32, 1024) - if err := ec.BindSendChan("foo", ch); err != nil { - b.Fatalf("Failed to bind to a send channel: %v\n", err) - } - - b.StartTimer() - - num := int32(22) - - for i := 0; i < b.N; i++ { - ch <- num - } - // Make sure they are all processed. - nc.Flush() - b.StopTimer() -} diff --git a/vendor/github.com/nats-io/go-nats/test/reconnect_test.go b/vendor/github.com/nats-io/go-nats/test/reconnect_test.go deleted file mode 100644 index 6e11b80f8..000000000 --- a/vendor/github.com/nats-io/go-nats/test/reconnect_test.go +++ /dev/null @@ -1,623 +0,0 @@ -package test - -import ( - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/nats-io/gnatsd/server" - "github.com/nats-io/go-nats" -) - -func startReconnectServer(t *testing.T) *server.Server { - return RunServerOnPort(22222) -} - -func TestReconnectTotalTime(t *testing.T) { - opts := nats.DefaultOptions - totalReconnectTime := time.Duration(opts.MaxReconnect) * opts.ReconnectWait - if totalReconnectTime < (2 * time.Minute) { - t.Fatalf("Total reconnect time should be at least 2 mins: Currently %v\n", - totalReconnectTime) - } -} - -func TestReconnectDisallowedFlags(t *testing.T) { - ts := startReconnectServer(t) - defer ts.Shutdown() - - ch := make(chan bool) - opts := nats.DefaultOptions - opts.Url = "nats://localhost:22222" - opts.AllowReconnect = false - opts.ClosedCB = func(_ *nats.Conn) { - ch <- true - } - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - defer nc.Close() - - ts.Shutdown() - - if e := Wait(ch); e != nil { - t.Fatal("Did not trigger ClosedCB correctly") - } -} - -func TestReconnectAllowedFlags(t *testing.T) { - ts := startReconnectServer(t) - defer ts.Shutdown() - ch := make(chan bool) - dch := make(chan bool) - opts := nats.DefaultOptions - opts.Url = "nats://localhost:22222" - opts.AllowReconnect = true - opts.MaxReconnect = 2 - opts.ReconnectWait = 1 * time.Second - - opts.ClosedCB = func(_ *nats.Conn) { - ch <- true - } - opts.DisconnectedCB = func(_ *nats.Conn) { - dch <- true - } - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - defer nc.Close() - - ts.Shutdown() - - // We want wait to timeout here, and the connection - // should not trigger the Close CB. - if e := WaitTime(ch, 500*time.Millisecond); e == nil { - t.Fatal("Triggered ClosedCB incorrectly") - } - - // We should wait to get the disconnected callback to ensure - // that we are in the process of reconnecting. - if e := Wait(dch); e != nil { - t.Fatal("DisconnectedCB should have been triggered") - } - - if !nc.IsReconnecting() { - t.Fatal("Expected to be in a reconnecting state") - } - - // clear the CloseCB since ch will block - nc.Opts.ClosedCB = nil -} - -var reconnectOpts = nats.Options{ - Url: "nats://localhost:22222", - AllowReconnect: true, - MaxReconnect: 10, - ReconnectWait: 100 * time.Millisecond, - Timeout: nats.DefaultTimeout, -} - -func TestConnCloseBreaksReconnectLoop(t *testing.T) { - ts := startReconnectServer(t) - defer ts.Shutdown() - - cch := make(chan bool) - - opts := reconnectOpts - // Bump the max reconnect attempts - opts.MaxReconnect = 100 - opts.ClosedCB = func(_ *nats.Conn) { - cch <- true - } - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - defer nc.Close() - nc.Flush() - - // Shutdown the server - ts.Shutdown() - - // Wait a second, then close the connection - time.Sleep(time.Second) - - // Close the connection, this should break the reconnect loop. - // Do this in a go routine since the issue was that Close() - // would block until the reconnect loop is done. - go nc.Close() - - // Even on Windows (where a createConn takes more than a second) - // we should be able to break the reconnect loop with the following - // timeout. - if err := WaitTime(cch, 3*time.Second); err != nil { - t.Fatal("Did not get a closed callback") - } -} - -func TestBasicReconnectFunctionality(t *testing.T) { - ts := startReconnectServer(t) - defer ts.Shutdown() - - ch := make(chan bool) - dch := make(chan bool) - - opts := reconnectOpts - - opts.DisconnectedCB = func(_ *nats.Conn) { - dch <- true - } - - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v\n", err) - } - defer nc.Close() - ec, err := nats.NewEncodedConn(nc, nats.DEFAULT_ENCODER) - if err != nil { - t.Fatalf("Failed to create an encoded connection: %v\n", err) - } - - testString := "bar" - ec.Subscribe("foo", func(s string) { - if s != testString { - t.Fatal("String doesn't match") - } - ch <- true - }) - ec.Flush() - - ts.Shutdown() - // server is stopped here... - - if err := Wait(dch); err != nil { - t.Fatalf("Did not get the disconnected callback on time\n") - } - - if err := ec.Publish("foo", testString); err != nil { - t.Fatalf("Failed to publish message: %v\n", err) - } - - ts = startReconnectServer(t) - defer ts.Shutdown() - - if err := ec.FlushTimeout(5 * time.Second); err != nil { - t.Fatalf("Error on Flush: %v", err) - } - - if e := Wait(ch); e != nil { - t.Fatal("Did not receive our message") - } - - expectedReconnectCount := uint64(1) - reconnectCount := ec.Conn.Stats().Reconnects - - if reconnectCount != expectedReconnectCount { - t.Fatalf("Reconnect count incorrect: %d vs %d\n", - reconnectCount, expectedReconnectCount) - } -} - -func TestExtendedReconnectFunctionality(t *testing.T) { - ts := startReconnectServer(t) - defer ts.Shutdown() - - opts := reconnectOpts - dch := make(chan bool) - opts.DisconnectedCB = func(_ *nats.Conn) { - dch <- true - } - rch := make(chan bool) - opts.ReconnectedCB = func(_ *nats.Conn) { - rch <- true - } - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - defer nc.Close() - ec, err := nats.NewEncodedConn(nc, nats.DEFAULT_ENCODER) - if err != nil { - t.Fatalf("Failed to create an encoded connection: %v\n", err) - } - testString := "bar" - received := int32(0) - - ec.Subscribe("foo", func(s string) { - atomic.AddInt32(&received, 1) - }) - - sub, _ := ec.Subscribe("foobar", func(s string) { - atomic.AddInt32(&received, 1) - }) - - ec.Publish("foo", testString) - ec.Flush() - - ts.Shutdown() - // server is stopped here.. - - // wait for disconnect - if e := WaitTime(dch, 2*time.Second); e != nil { - t.Fatal("Did not receive a disconnect callback message") - } - - // Sub while disconnected - ec.Subscribe("bar", func(s string) { - atomic.AddInt32(&received, 1) - }) - - // Unsub foobar while disconnected - sub.Unsubscribe() - - if err = ec.Publish("foo", testString); err != nil { - t.Fatalf("Received an error after disconnect: %v\n", err) - } - - if err = ec.Publish("bar", testString); err != nil { - t.Fatalf("Received an error after disconnect: %v\n", err) - } - - ts = startReconnectServer(t) - defer ts.Shutdown() - - // server is restarted here.. - // wait for reconnect - if e := WaitTime(rch, 2*time.Second); e != nil { - t.Fatal("Did not receive a reconnect callback message") - } - - if err = ec.Publish("foobar", testString); err != nil { - t.Fatalf("Received an error after server restarted: %v\n", err) - } - - if err = ec.Publish("foo", testString); err != nil { - t.Fatalf("Received an error after server restarted: %v\n", err) - } - - ch := make(chan bool) - ec.Subscribe("done", func(b bool) { - ch <- true - }) - ec.Publish("done", true) - - if e := Wait(ch); e != nil { - t.Fatal("Did not receive our message") - } - - // Sleep a bit to guarantee scheduler runs and process all subs. - time.Sleep(50 * time.Millisecond) - - if atomic.LoadInt32(&received) != 4 { - t.Fatalf("Received != %d, equals %d\n", 4, received) - } -} - -func TestQueueSubsOnReconnect(t *testing.T) { - ts := startReconnectServer(t) - defer ts.Shutdown() - - opts := reconnectOpts - - // Allow us to block on reconnect complete. - reconnectsDone := make(chan bool) - opts.ReconnectedCB = func(nc *nats.Conn) { - reconnectsDone <- true - } - - // Create connection - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v\n", err) - } - defer nc.Close() - - ec, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) - if err != nil { - t.Fatalf("Failed to create an encoded connection: %v\n", err) - } - - // To hold results. - results := make(map[int]int) - var mu sync.Mutex - - // Make sure we got what we needed, 1 msg only and all seqnos accounted for.. - checkResults := func(numSent int) { - mu.Lock() - defer mu.Unlock() - - for i := 0; i < numSent; i++ { - if results[i] != 1 { - t.Fatalf("Received incorrect number of messages, [%d] for seq: %d\n", results[i], i) - } - } - - // Auto reset results map - results = make(map[int]int) - } - - subj := "foo.bar" - qgroup := "workers" - - cb := func(seqno int) { - mu.Lock() - defer mu.Unlock() - results[seqno] = results[seqno] + 1 - } - - // Create Queue Subscribers - ec.QueueSubscribe(subj, qgroup, cb) - ec.QueueSubscribe(subj, qgroup, cb) - - ec.Flush() - - // Helper function to send messages and check results. - sendAndCheckMsgs := func(numToSend int) { - for i := 0; i < numToSend; i++ { - ec.Publish(subj, i) - } - // Wait for processing. - ec.Flush() - time.Sleep(50 * time.Millisecond) - - // Check Results - checkResults(numToSend) - } - - // Base Test - sendAndCheckMsgs(10) - - // Stop and restart server - ts.Shutdown() - ts = startReconnectServer(t) - defer ts.Shutdown() - - if err := Wait(reconnectsDone); err != nil { - t.Fatal("Did not get the ReconnectedCB!") - } - - // Reconnect Base Test - sendAndCheckMsgs(10) -} - -func TestIsClosed(t *testing.T) { - ts := startReconnectServer(t) - defer ts.Shutdown() - - nc := NewConnection(t, 22222) - defer nc.Close() - - if nc.IsClosed() { - t.Fatalf("IsClosed returned true when the connection is still open.") - } - ts.Shutdown() - if nc.IsClosed() { - t.Fatalf("IsClosed returned true when the connection is still open.") - } - ts = startReconnectServer(t) - defer ts.Shutdown() - if nc.IsClosed() { - t.Fatalf("IsClosed returned true when the connection is still open.") - } - nc.Close() - if !nc.IsClosed() { - t.Fatalf("IsClosed returned false after Close() was called.") - } -} - -func TestIsReconnectingAndStatus(t *testing.T) { - ts := startReconnectServer(t) - defer ts.Shutdown() - - disconnectedch := make(chan bool) - reconnectch := make(chan bool) - opts := nats.DefaultOptions - opts.Url = "nats://localhost:22222" - opts.AllowReconnect = true - opts.MaxReconnect = 10000 - opts.ReconnectWait = 100 * time.Millisecond - - opts.DisconnectedCB = func(_ *nats.Conn) { - disconnectedch <- true - } - opts.ReconnectedCB = func(_ *nats.Conn) { - reconnectch <- true - } - - // Connect, verify initial reconnecting state check, then stop the server - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - defer nc.Close() - - if nc.IsReconnecting() { - t.Fatalf("IsReconnecting returned true when the connection is still open.") - } - if status := nc.Status(); status != nats.CONNECTED { - t.Fatalf("Status returned %d when connected instead of CONNECTED", status) - } - ts.Shutdown() - - // Wait until we get the disconnected callback - if e := Wait(disconnectedch); e != nil { - t.Fatalf("Disconnect callback wasn't triggered: %v", e) - } - if !nc.IsReconnecting() { - t.Fatalf("IsReconnecting returned false when the client is reconnecting.") - } - if status := nc.Status(); status != nats.RECONNECTING { - t.Fatalf("Status returned %d when reconnecting instead of CONNECTED", status) - } - - ts = startReconnectServer(t) - defer ts.Shutdown() - - // Wait until we get the reconnect callback - if e := Wait(reconnectch); e != nil { - t.Fatalf("Reconnect callback wasn't triggered: %v", e) - } - if nc.IsReconnecting() { - t.Fatalf("IsReconnecting returned true after the connection was reconnected.") - } - if status := nc.Status(); status != nats.CONNECTED { - t.Fatalf("Status returned %d when reconnected instead of CONNECTED", status) - } - - // Close the connection, reconnecting should still be false - nc.Close() - if nc.IsReconnecting() { - t.Fatalf("IsReconnecting returned true after Close() was called.") - } - if status := nc.Status(); status != nats.CLOSED { - t.Fatalf("Status returned %d after Close() was called instead of CLOSED", status) - } -} - -func TestFullFlushChanDuringReconnect(t *testing.T) { - ts := startReconnectServer(t) - defer ts.Shutdown() - - reconnectch := make(chan bool) - - opts := nats.DefaultOptions - opts.Url = "nats://localhost:22222" - opts.AllowReconnect = true - opts.MaxReconnect = 10000 - opts.ReconnectWait = 100 * time.Millisecond - - opts.ReconnectedCB = func(_ *nats.Conn) { - reconnectch <- true - } - - // Connect - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - defer nc.Close() - - // Channel used to make the go routine sending messages to stop. - stop := make(chan bool) - - // While connected, publish as fast as we can - go func() { - for i := 0; ; i++ { - _ = nc.Publish("foo", []byte("hello")) - - // Make sure we are sending at least flushChanSize (1024) messages - // before potentially pausing. - if i%2000 == 0 { - select { - case <-stop: - return - default: - time.Sleep(100 * time.Millisecond) - } - } - } - }() - - // Send a bit... - time.Sleep(500 * time.Millisecond) - - // Shut down the server - ts.Shutdown() - - // Continue sending while we are disconnected - time.Sleep(time.Second) - - // Restart the server - ts = startReconnectServer(t) - defer ts.Shutdown() - - // Wait for the reconnect CB to be invoked (but not for too long) - if e := WaitTime(reconnectch, 5*time.Second); e != nil { - t.Fatalf("Reconnect callback wasn't triggered: %v", e) - } -} - -func TestReconnectVerbose(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - o := nats.DefaultOptions - o.Verbose = true - rch := make(chan bool) - o.ReconnectedCB = func(_ *nats.Conn) { - rch <- true - } - - nc, err := o.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - defer nc.Close() - - err = nc.Flush() - if err != nil { - t.Fatalf("Error during flush: %v", err) - } - - s.Shutdown() - s = RunDefaultServer() - defer s.Shutdown() - - if e := Wait(rch); e != nil { - t.Fatal("Should have reconnected ok") - } - - err = nc.Flush() - if err != nil { - t.Fatalf("Error during flush: %v", err) - } -} - -func TestReconnectBufSize(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - o := nats.DefaultOptions - o.ReconnectBufSize = 32 // 32 bytes - - dch := make(chan bool) - o.DisconnectedCB = func(_ *nats.Conn) { - dch <- true - } - - nc, err := o.Connect() - if err != nil { - t.Fatalf("Should have connected ok: %v", err) - } - defer nc.Close() - - err = nc.Flush() - if err != nil { - t.Fatalf("Error during flush: %v", err) - } - - // Force disconnected state. - s.Shutdown() - - if e := Wait(dch); e != nil { - t.Fatal("DisconnectedCB should have been triggered") - } - - msg := []byte("food") // 4 bytes paylaod, total proto is 16 bytes - // These should work, 2X16 = 32 - if err := nc.Publish("foo", msg); err != nil { - t.Fatalf("Failed to publish message: %v\n", err) - } - if err := nc.Publish("foo", msg); err != nil { - t.Fatalf("Failed to publish message: %v\n", err) - } - - // This should fail since we have exhausted the backing buffer. - if err := nc.Publish("foo", msg); err == nil { - // t.Fatalf("Expected to fail to publish message: got no error\n") - } - nc.Buffered() -} diff --git a/vendor/github.com/nats-io/go-nats/test/sub_test.go b/vendor/github.com/nats-io/go-nats/test/sub_test.go deleted file mode 100644 index 80fea9c95..000000000 --- a/vendor/github.com/nats-io/go-nats/test/sub_test.go +++ /dev/null @@ -1,1473 +0,0 @@ -package test - -import ( - "fmt" - "runtime" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/nats-io/go-nats" -) - -// More advanced tests on subscriptions - -func TestServerAutoUnsub(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - received := int32(0) - max := int32(10) - - // Call this to make sure that we have everything setup connection wise - nc.Flush() - - base := runtime.NumGoroutine() - - sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { - atomic.AddInt32(&received, 1) - }) - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - sub.AutoUnsubscribe(int(max)) - total := 100 - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - nc.Flush() - time.Sleep(100 * time.Millisecond) - - if atomic.LoadInt32(&received) != max { - t.Fatalf("Received %d msgs, wanted only %d\n", received, max) - } - if sub.IsValid() { - t.Fatal("Expected subscription to be invalid after hitting max") - } - if err := sub.AutoUnsubscribe(10); err == nil { - t.Fatal("Calling AutoUnsubscribe() on closed subscription should fail") - } - delta := (runtime.NumGoroutine() - base) - if delta > 0 { - t.Fatalf("%d Go routines still exist post max subscriptions hit", delta) - } -} - -func TestClientSyncAutoUnsub(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - received := 0 - max := 10 - sub, _ := nc.SubscribeSync("foo") - sub.AutoUnsubscribe(max) - total := 100 - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - nc.Flush() - for { - _, err := sub.NextMsg(10 * time.Millisecond) - if err != nil { - if err != nats.ErrMaxMessages { - t.Fatalf("Expected '%v', but got: '%v'\n", nats.ErrBadSubscription, err.Error()) - } - break - } - received++ - } - if received != max { - t.Fatalf("Received %d msgs, wanted only %d\n", received, max) - } - if sub.IsValid() { - t.Fatal("Expected subscription to be invalid after hitting max") - } - if err := sub.AutoUnsubscribe(10); err == nil { - t.Fatal("Calling AutoUnsubscribe() ob closed subscription should fail") - } -} - -func TestClientASyncAutoUnsub(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - received := int32(0) - max := int32(10) - sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { - atomic.AddInt32(&received, 1) - }) - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - sub.AutoUnsubscribe(int(max)) - total := 100 - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - nc.Flush() - time.Sleep(10 * time.Millisecond) - - if atomic.LoadInt32(&received) != max { - t.Fatalf("Received %d msgs, wanted only %d\n", received, max) - } - if err := sub.AutoUnsubscribe(10); err == nil { - t.Fatal("Calling AutoUnsubscribe() on closed subscription should fail") - } -} - -func TestAutoUnsubAndReconnect(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - rch := make(chan bool) - - nc, err := nats.Connect(nats.DefaultURL, - nats.ReconnectWait(50*time.Millisecond), - nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true })) - if err != nil { - t.Fatalf("Unable to connect: %v", err) - } - defer nc.Close() - - received := int32(0) - max := int32(10) - sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { - atomic.AddInt32(&received, 1) - }) - if err != nil { - t.Fatalf("Failed to subscribe: %v", err) - } - sub.AutoUnsubscribe(int(max)) - - // Send less than the max - total := int(max / 2) - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - nc.Flush() - - // Restart the server - s.Shutdown() - s = RunDefaultServer() - defer s.Shutdown() - - // and wait to reconnect - if err := Wait(rch); err != nil { - t.Fatal("Failed to get the reconnect cb") - } - - // Now send more than the total max. - total = int(3 * max) - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - nc.Flush() - - // Wait a bit before checking. - time.Sleep(50 * time.Millisecond) - - // We should have received only up-to-max messages. - if atomic.LoadInt32(&received) != max { - t.Fatalf("Received %d msgs, wanted only %d\n", received, max) - } -} - -func TestAutoUnsubWithParallelNextMsgCalls(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - rch := make(chan bool, 1) - - nc, err := nats.Connect(nats.DefaultURL, - nats.ReconnectWait(50*time.Millisecond), - nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true })) - if err != nil { - t.Fatalf("Unable to connect: %v", err) - } - defer nc.Close() - - numRoutines := 3 - max := 100 - total := max * 2 - received := int64(0) - - var wg sync.WaitGroup - - sub, err := nc.SubscribeSync("foo") - if err != nil { - t.Fatalf("Failed to subscribe: %v", err) - } - sub.AutoUnsubscribe(int(max)) - nc.Flush() - - wg.Add(numRoutines) - - for i := 0; i < numRoutines; i++ { - go func(s *nats.Subscription, idx int) { - for { - // The first to reach the max delivered will cause the - // subscription to be removed, which will kick out all - // other calls to NextMsg. So don't be afraid of the long - // timeout. - _, err := s.NextMsg(3 * time.Second) - if err != nil { - break - } - atomic.AddInt64(&received, 1) - } - wg.Done() - }(sub, i) - } - - msg := []byte("Hello") - for i := 0; i < max/2; i++ { - nc.Publish("foo", msg) - } - nc.Flush() - - s.Shutdown() - s = RunDefaultServer() - defer s.Shutdown() - - // Make sure we got the reconnected cb - if err := Wait(rch); err != nil { - t.Fatal("Failed to get reconnected cb") - } - - for i := 0; i < total; i++ { - nc.Publish("foo", msg) - } - nc.Flush() - - wg.Wait() - if atomic.LoadInt64(&received) != int64(max) { - t.Fatalf("Wrong number of received msg: %v instead of %v", atomic.LoadInt64(&received), max) - } -} - -func TestAutoUnsubscribeFromCallback(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc, err := nats.Connect(nats.DefaultURL) - if err != nil { - t.Fatalf("Unable to connect: %v", err) - } - defer nc.Close() - - max := 10 - resetUnsubMark := int64(max / 2) - limit := int64(100) - received := int64(0) - - msg := []byte("Hello") - - // Auto-unsubscribe within the callback with a value lower - // than what was already received. - - sub, err := nc.Subscribe("foo", func(m *nats.Msg) { - r := atomic.AddInt64(&received, 1) - if r == resetUnsubMark { - m.Sub.AutoUnsubscribe(int(r - 1)) - nc.Flush() - } - if r == limit { - // Something went wrong... fail now - t.Fatal("Got more messages than expected") - } - nc.Publish("foo", msg) - }) - if err != nil { - t.Fatalf("Failed to subscribe: %v", err) - } - sub.AutoUnsubscribe(int(max)) - nc.Flush() - - // Trigger the first message, the other are sent from the callback. - nc.Publish("foo", msg) - nc.Flush() - - time.Sleep(100 * time.Millisecond) - - recv := atomic.LoadInt64(&received) - if recv != resetUnsubMark { - t.Fatalf("Wrong number of received messages. Original max was %v reset to %v, actual received: %v", - max, resetUnsubMark, recv) - } - - // Now check with AutoUnsubscribe with higher value than original - received = int64(0) - newMax := int64(2 * max) - - sub, err = nc.Subscribe("foo", func(m *nats.Msg) { - r := atomic.AddInt64(&received, 1) - if r == resetUnsubMark { - m.Sub.AutoUnsubscribe(int(newMax)) - nc.Flush() - } - if r == limit { - // Something went wrong... fail now - t.Fatal("Got more messages than expected") - } - nc.Publish("foo", msg) - }) - if err != nil { - t.Fatalf("Failed to subscribe: %v", err) - } - sub.AutoUnsubscribe(int(max)) - nc.Flush() - - // Trigger the first message, the other are sent from the callback. - nc.Publish("foo", msg) - nc.Flush() - - time.Sleep(100 * time.Millisecond) - - recv = atomic.LoadInt64(&received) - if recv != newMax { - t.Fatalf("Wrong number of received messages. Original max was %v reset to %v, actual received: %v", - max, newMax, recv) - } -} - -func TestCloseSubRelease(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - sub, _ := nc.SubscribeSync("foo") - start := time.Now() - go func() { - time.Sleep(5 * time.Millisecond) - nc.Close() - }() - _, err := sub.NextMsg(50 * time.Millisecond) - if err == nil { - t.Fatalf("Expected an error from NextMsg") - } - elapsed := time.Since(start) - - // On Windows, the minimum waitTime is at least 15ms. - if elapsed > 20*time.Millisecond { - t.Fatalf("Too much time has elapsed to release NextMsg: %dms", - (elapsed / time.Millisecond)) - } -} - -func TestIsValidSubscriber(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - sub, err := nc.SubscribeSync("foo") - if !sub.IsValid() { - t.Fatalf("Subscription should be valid") - } - for i := 0; i < 10; i++ { - nc.Publish("foo", []byte("Hello")) - } - nc.Flush() - _, err = sub.NextMsg(200 * time.Millisecond) - if err != nil { - t.Fatalf("NextMsg returned an error") - } - sub.Unsubscribe() - _, err = sub.NextMsg(200 * time.Millisecond) - if err == nil { - t.Fatalf("NextMsg should have returned an error") - } -} - -func TestSlowSubscriber(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - sub, _ := nc.SubscribeSync("foo") - sub.SetPendingLimits(100, 1024) - - for i := 0; i < 200; i++ { - nc.Publish("foo", []byte("Hello")) - } - timeout := 5 * time.Second - start := time.Now() - nc.FlushTimeout(timeout) - elapsed := time.Since(start) - if elapsed >= timeout { - t.Fatalf("Flush did not return before timeout: %d > %d", elapsed, timeout) - } - // Make sure NextMsg returns an error to indicate slow consumer - _, err := sub.NextMsg(200 * time.Millisecond) - if err == nil { - t.Fatalf("NextMsg did not return an error") - } -} - -func TestSlowChanSubscriber(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - ch := make(chan *nats.Msg, 64) - sub, _ := nc.ChanSubscribe("foo", ch) - sub.SetPendingLimits(100, 1024) - - for i := 0; i < 200; i++ { - nc.Publish("foo", []byte("Hello")) - } - timeout := 5 * time.Second - start := time.Now() - nc.FlushTimeout(timeout) - elapsed := time.Since(start) - if elapsed >= timeout { - t.Fatalf("Flush did not return before timeout: %d > %d", elapsed, timeout) - } -} - -func TestSlowAsyncSubscriber(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - bch := make(chan bool) - - sub, _ := nc.Subscribe("foo", func(_ *nats.Msg) { - // block to back us up.. - <-bch - }) - // Make sure these are the defaults - pm, pb, _ := sub.PendingLimits() - if pm != nats.DefaultSubPendingMsgsLimit { - t.Fatalf("Pending limit for number of msgs incorrect, expected %d, got %d\n", nats.DefaultSubPendingMsgsLimit, pm) - } - if pb != nats.DefaultSubPendingBytesLimit { - t.Fatalf("Pending limit for number of bytes incorrect, expected %d, got %d\n", nats.DefaultSubPendingBytesLimit, pb) - } - - // Set new limits - pml := 100 - pbl := 1024 * 1024 - - sub.SetPendingLimits(pml, pbl) - - // Make sure the set is correct - pm, pb, _ = sub.PendingLimits() - if pm != pml { - t.Fatalf("Pending limit for number of msgs incorrect, expected %d, got %d\n", pml, pm) - } - if pb != pbl { - t.Fatalf("Pending limit for number of bytes incorrect, expected %d, got %d\n", pbl, pb) - } - - for i := 0; i < (int(pml) + 100); i++ { - nc.Publish("foo", []byte("Hello")) - } - - timeout := 5 * time.Second - start := time.Now() - err := nc.FlushTimeout(timeout) - elapsed := time.Since(start) - if elapsed >= timeout { - t.Fatalf("Flush did not return before timeout") - } - // We want flush to work, so expect no error for it. - if err != nil { - t.Fatalf("Expected no error from Flush()\n") - } - if nc.LastError() != nats.ErrSlowConsumer { - t.Fatal("Expected LastError to indicate slow consumer") - } - // release the sub - bch <- true -} - -func TestAsyncErrHandler(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - opts := nats.DefaultOptions - - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Could not connect to server: %v\n", err) - } - defer nc.Close() - - subj := "async_test" - bch := make(chan bool) - - sub, err := nc.Subscribe(subj, func(_ *nats.Msg) { - // block to back us up.. - <-bch - }) - if err != nil { - t.Fatalf("Could not subscribe: %v\n", err) - } - - limit := 10 - toSend := 100 - - // Limit internal subchan length to trip condition easier. - sub.SetPendingLimits(limit, 1024) - - ch := make(chan bool) - - aeCalled := int64(0) - - nc.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, e error) { - atomic.AddInt64(&aeCalled, 1) - - if s != sub { - t.Fatal("Did not receive proper subscription") - } - if e != nats.ErrSlowConsumer { - t.Fatalf("Did not receive proper error: %v vs %v\n", e, nats.ErrSlowConsumer) - } - // Suppress additional calls - if atomic.LoadInt64(&aeCalled) == 1 { - // release the sub - defer close(bch) - // release the test - ch <- true - } - }) - - b := []byte("Hello World!") - // First one trips the ch wait in subscription callback. - nc.Publish(subj, b) - nc.Flush() - for i := 0; i < toSend; i++ { - nc.Publish(subj, b) - } - if err := nc.Flush(); err != nil { - t.Fatalf("Got an error on Flush:%v\n", err) - } - - if e := Wait(ch); e != nil { - t.Fatal("Failed to call async err handler") - } - // Make sure dropped stats is correct. - if d, _ := sub.Dropped(); d != toSend-limit { - t.Fatalf("Expected Dropped to be %d, got %d\n", toSend-limit, d) - } - if ae := atomic.LoadInt64(&aeCalled); ae != 1 { - t.Fatalf("Expected err handler to be called once, got %d\n", ae) - } - - sub.Unsubscribe() - if _, err := sub.Dropped(); err == nil { - t.Fatal("Calling Dropped() on closed subscription should fail") - } -} - -func TestAsyncErrHandlerChanSubscription(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - opts := nats.DefaultOptions - - nc, err := opts.Connect() - if err != nil { - t.Fatalf("Could not connect to server: %v\n", err) - } - defer nc.Close() - - subj := "chan_test" - - limit := 10 - toSend := 100 - - // Create our own channel. - mch := make(chan *nats.Msg, limit) - sub, err := nc.ChanSubscribe(subj, mch) - if err != nil { - t.Fatalf("Could not subscribe: %v\n", err) - } - ch := make(chan bool) - aeCalled := int64(0) - - nc.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, e error) { - atomic.AddInt64(&aeCalled, 1) - if e != nats.ErrSlowConsumer { - t.Fatalf("Did not receive proper error: %v vs %v\n", - e, nats.ErrSlowConsumer) - } - // Suppress additional calls - if atomic.LoadInt64(&aeCalled) == 1 { - // release the test - ch <- true - } - }) - - b := []byte("Hello World!") - for i := 0; i < toSend; i++ { - nc.Publish(subj, b) - } - nc.Flush() - - if e := Wait(ch); e != nil { - t.Fatal("Failed to call async err handler") - } - // Make sure dropped stats is correct. - if d, _ := sub.Dropped(); d != toSend-limit { - t.Fatalf("Expected Dropped to be %d, go %d\n", toSend-limit, d) - } - if ae := atomic.LoadInt64(&aeCalled); ae != 1 { - t.Fatalf("Expected err handler to be called once, got %d\n", ae) - } - - sub.Unsubscribe() - if _, err := sub.Dropped(); err == nil { - t.Fatal("Calling Dropped() on closed subscription should fail") - } -} - -// Test to make sure that we can send and async receive messages on -// different subjects within a callback. -func TestAsyncSubscriberStarvation(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - // Helper - nc.Subscribe("helper", func(m *nats.Msg) { - nc.Publish(m.Reply, []byte("Hello")) - }) - - ch := make(chan bool) - - // Kickoff - nc.Subscribe("start", func(m *nats.Msg) { - // Helper Response - response := nats.NewInbox() - nc.Subscribe(response, func(_ *nats.Msg) { - ch <- true - }) - nc.PublishRequest("helper", response, []byte("Help Me!")) - }) - - nc.Publish("start", []byte("Begin")) - nc.Flush() - - if e := Wait(ch); e != nil { - t.Fatal("Was stalled inside of callback waiting on another callback") - } -} - -func TestAsyncSubscribersOnClose(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - toSend := 10 - callbacks := int32(0) - ch := make(chan bool, toSend) - - nc.Subscribe("foo", func(_ *nats.Msg) { - atomic.AddInt32(&callbacks, 1) - <-ch - }) - - for i := 0; i < toSend; i++ { - nc.Publish("foo", []byte("Hello World!")) - } - nc.Flush() - time.Sleep(10 * time.Millisecond) - nc.Close() - - // Release callbacks - for i := 1; i < toSend; i++ { - ch <- true - } - - // Wait for some time. - time.Sleep(10 * time.Millisecond) - seen := atomic.LoadInt32(&callbacks) - if seen != 1 { - t.Fatalf("Expected only one callback, received %d callbacks\n", seen) - } -} - -func TestNextMsgCallOnAsyncSub(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { - }) - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - _, err = sub.NextMsg(time.Second) - if err == nil { - t.Fatal("Expected an error call NextMsg() on AsyncSubscriber") - } -} - -func TestNextMsgCallOnClosedSub(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - sub, err := nc.SubscribeSync("foo") - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - - if err = sub.Unsubscribe(); err != nil { - t.Fatal("Unsubscribe failed with err:", err) - } - - _, err = sub.NextMsg(time.Second) - if err == nil { - t.Fatal("Expected an error calling NextMsg() on closed subscription") - } else if err != nats.ErrBadSubscription { - t.Fatalf("Expected '%v', but got: '%v'\n", nats.ErrBadSubscription, err.Error()) - } -} - -func TestChanSubscriber(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - // Create our own channel. - ch := make(chan *nats.Msg, 128) - - // Channel is mandatory - if _, err := nc.ChanSubscribe("foo", nil); err == nil { - t.Fatal("Creating subscription without channel should have failed") - } - - _, err := nc.ChanSubscribe("foo", ch) - if err != nil { - t.Fatal("Failed to subscribe: ", err) - } - - // Send some messages to ourselves. - total := 100 - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - - received := 0 - tm := time.NewTimer(5 * time.Second) - defer tm.Stop() - - // Go ahead and receive - for { - select { - case _, ok := <-ch: - if !ok { - t.Fatalf("Got an error reading from channel") - } - case <-tm.C: - t.Fatalf("Timed out waiting on messages") - } - received++ - if received >= total { - return - } - } -} - -func TestChanQueueSubscriber(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - // Create our own channel. - ch1 := make(chan *nats.Msg, 64) - ch2 := make(chan *nats.Msg, 64) - - nc.ChanQueueSubscribe("foo", "bar", ch1) - nc.ChanQueueSubscribe("foo", "bar", ch2) - - // Send some messages to ourselves. - total := 100 - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - - received := 0 - tm := time.NewTimer(5 * time.Second) - defer tm.Stop() - - chk := func(ok bool) { - if !ok { - t.Fatalf("Got an error reading from channel") - } else { - received++ - } - } - - // Go ahead and receive - for { - select { - case _, ok := <-ch1: - chk(ok) - case _, ok := <-ch2: - chk(ok) - case <-tm.C: - t.Fatalf("Timed out waiting on messages") - } - if received >= total { - return - } - } -} - -func TestChanSubscriberPendingLimits(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - // There was a defect that prevented to receive more than - // the default pending message limit. Trying to send more - // than this limit. - total := nats.DefaultSubPendingMsgsLimit + 100 - - for typeSubs := 0; typeSubs < 3; typeSubs++ { - - func() { - // Create our own channel. - ch := make(chan *nats.Msg, total) - - var err error - var sub *nats.Subscription - switch typeSubs { - case 0: - sub, err = nc.ChanSubscribe("foo", ch) - case 1: - sub, err = nc.ChanQueueSubscribe("foo", "bar", ch) - case 2: - sub, err = nc.QueueSubscribeSyncWithChan("foo", "bar", ch) - } - if err != nil { - t.Fatalf("Unexpected error on subscribe: %v", err) - } - defer sub.Unsubscribe() - - // Send some messages to ourselves. - go func() { - for i := 0; i < total; i++ { - if err := nc.Publish("foo", []byte("Hello")); err != nil { - t.Fatalf("Unexpected error on publish: %v", err) - } - } - }() - - received := 0 - tm := time.NewTimer(5 * time.Second) - defer tm.Stop() - - chk := func(ok bool) { - if !ok { - t.Fatalf("Got an error reading from channel") - } else { - received++ - } - } - - // Go ahead and receive - for { - select { - case _, ok := <-ch: - chk(ok) - case <-tm.C: - t.Fatalf("Timed out waiting on messages") - } - if received >= total { - return - } - } - }() - } -} - -func TestQueueChanQueueSubscriber(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - // Create our own channel. - ch1 := make(chan *nats.Msg, 64) - ch2 := make(chan *nats.Msg, 64) - - nc.QueueSubscribeSyncWithChan("foo", "bar", ch1) - nc.QueueSubscribeSyncWithChan("foo", "bar", ch2) - - // Send some messages to ourselves. - total := 100 - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - - recv1 := 0 - recv2 := 0 - tm := time.NewTimer(5 * time.Second) - defer tm.Stop() - runTimer := time.NewTimer(500 * time.Millisecond) - defer runTimer.Stop() - - chk := func(ok bool, which int) { - if !ok { - t.Fatalf("Got an error reading from channel") - } else { - if which == 1 { - recv1++ - } else { - recv2++ - } - } - } - - // Go ahead and receive -recvLoop: - for { - select { - case _, ok := <-ch1: - chk(ok, 1) - case _, ok := <-ch2: - chk(ok, 2) - case <-tm.C: - t.Fatalf("Timed out waiting on messages") - case <-runTimer.C: - break recvLoop - } - } - - if recv1+recv2 > total { - t.Fatalf("Received more messages than expected: %v vs %v", (recv1 + recv2), total) - } -} - -func TestUnsubscribeChanOnSubscriber(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - // Create our own channel. - ch := make(chan *nats.Msg, 8) - sub, _ := nc.ChanSubscribe("foo", ch) - - // Send some messages to ourselves. - total := 100 - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - - sub.Unsubscribe() - for len(ch) > 0 { - <-ch - } - // Make sure we can send to the channel still. - // Test that we do not close it. - ch <- &nats.Msg{} -} - -func TestCloseChanOnSubscriber(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - // Create our own channel. - ch := make(chan *nats.Msg, 8) - nc.ChanSubscribe("foo", ch) - - // Send some messages to ourselves. - total := 100 - for i := 0; i < total; i++ { - nc.Publish("foo", []byte("Hello")) - } - - nc.Close() - for len(ch) > 0 { - <-ch - } - // Make sure we can send to the channel still. - // Test that we do not close it. - ch <- &nats.Msg{} -} - -func TestAsyncSubscriptionPending(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - // Send some messages to ourselves. - total := 100 - msg := []byte("0123456789") - - inCb := make(chan bool) - block := make(chan bool) - - sub, _ := nc.Subscribe("foo", func(_ *nats.Msg) { - inCb <- true - <-block - }) - defer sub.Unsubscribe() - - for i := 0; i < total; i++ { - nc.Publish("foo", msg) - } - nc.Flush() - - // Wait that a message is received, so checks are safe - if err := Wait(inCb); err != nil { - t.Fatal("No message received") - } - - // Test old way - q, _ := sub.QueuedMsgs() - if q != total && q != total-1 { - t.Fatalf("Expected %d or %d, got %d\n", total, total-1, q) - } - - // New way, make sure the same and check bytes. - m, b, _ := sub.Pending() - mlen := len(msg) - totalSize := total * mlen - - if m != total && m != total-1 { - t.Fatalf("Expected msgs of %d or %d, got %d\n", total, total-1, m) - } - if b != totalSize && b != totalSize-mlen { - t.Fatalf("Expected bytes of %d or %d, got %d\n", - totalSize, totalSize-mlen, b) - } - - // Make sure max has been set. Since we block after the first message is - // received, MaxPending should be >= total - 1 and <= total - mm, bm, _ := sub.MaxPending() - if mm < total-1 || mm > total { - t.Fatalf("Expected max msgs (%d) to be between %d and %d\n", - mm, total-1, total) - } - if bm < totalSize-mlen || bm > totalSize { - t.Fatalf("Expected max bytes (%d) to be between %d and %d\n", - bm, totalSize, totalSize-mlen) - } - // Check that clear works. - sub.ClearMaxPending() - mm, bm, _ = sub.MaxPending() - if mm != 0 { - t.Fatalf("Expected max msgs to be 0 vs %d after clearing\n", mm) - } - if bm != 0 { - t.Fatalf("Expected max bytes to be 0 vs %d after clearing\n", bm) - } - - close(block) - sub.Unsubscribe() - - // These calls should fail once the subscription is closed. - if _, _, err := sub.Pending(); err == nil { - t.Fatal("Calling Pending() on closed subscription should fail") - } - if _, _, err := sub.MaxPending(); err == nil { - t.Fatal("Calling MaxPending() on closed subscription should fail") - } - if err := sub.ClearMaxPending(); err == nil { - t.Fatal("Calling ClearMaxPending() on closed subscription should fail") - } -} - -func TestAsyncSubscriptionPendingDrain(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - // Send some messages to ourselves. - total := 100 - msg := []byte("0123456789") - - sub, _ := nc.Subscribe("foo", func(_ *nats.Msg) {}) - defer sub.Unsubscribe() - - for i := 0; i < total; i++ { - nc.Publish("foo", msg) - } - nc.Flush() - - // Wait for all delivered. - for d, _ := sub.Delivered(); d != int64(total); d, _ = sub.Delivered() { - time.Sleep(10 * time.Millisecond) - } - - m, b, _ := sub.Pending() - if m != 0 { - t.Fatalf("Expected msgs of 0, got %d\n", m) - } - if b != 0 { - t.Fatalf("Expected bytes of 0, got %d\n", b) - } - - sub.Unsubscribe() - if _, err := sub.Delivered(); err == nil { - t.Fatal("Calling Delivered() on closed subscription should fail") - } -} - -func TestSyncSubscriptionPendingDrain(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - // Send some messages to ourselves. - total := 100 - msg := []byte("0123456789") - - sub, _ := nc.SubscribeSync("foo") - defer sub.Unsubscribe() - - for i := 0; i < total; i++ { - nc.Publish("foo", msg) - } - nc.Flush() - - // Wait for all delivered. - for d, _ := sub.Delivered(); d != int64(total); d, _ = sub.Delivered() { - sub.NextMsg(10 * time.Millisecond) - } - - m, b, _ := sub.Pending() - if m != 0 { - t.Fatalf("Expected msgs of 0, got %d\n", m) - } - if b != 0 { - t.Fatalf("Expected bytes of 0, got %d\n", b) - } - - sub.Unsubscribe() - if _, err := sub.Delivered(); err == nil { - t.Fatal("Calling Delivered() on closed subscription should fail") - } -} - -func TestSyncSubscriptionPending(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - sub, _ := nc.SubscribeSync("foo") - defer sub.Unsubscribe() - - // Send some messages to ourselves. - total := 100 - msg := []byte("0123456789") - for i := 0; i < total; i++ { - nc.Publish("foo", msg) - } - nc.Flush() - - // Test old way - q, _ := sub.QueuedMsgs() - if q != total && q != total-1 { - t.Fatalf("Expected %d or %d, got %d\n", total, total-1, q) - } - - // New way, make sure the same and check bytes. - m, b, _ := sub.Pending() - mlen := len(msg) - - if m != total { - t.Fatalf("Expected msgs of %d, got %d\n", total, m) - } - if b != total*mlen { - t.Fatalf("Expected bytes of %d, got %d\n", total*mlen, b) - } - - // Now drain some down and make sure pending is correct - for i := 0; i < total-1; i++ { - sub.NextMsg(10 * time.Millisecond) - } - m, b, _ = sub.Pending() - if m != 1 { - t.Fatalf("Expected msgs of 1, got %d\n", m) - } - if b != mlen { - t.Fatalf("Expected bytes of %d, got %d\n", mlen, b) - } -} - -func TestSetPendingLimits(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - payload := []byte("hello") - payloadLen := len(payload) - toSend := 100 - - var sub *nats.Subscription - - // Check for invalid values - invalid := func() error { - if err := sub.SetPendingLimits(0, 1); err == nil { - return fmt.Errorf("Setting limit with 0 should fail") - } - if err := sub.SetPendingLimits(1, 0); err == nil { - return fmt.Errorf("Setting limit with 0 should fail") - } - return nil - } - // function to send messages - send := func(subject string, count int) { - for i := 0; i < count; i++ { - if err := nc.Publish(subject, payload); err != nil { - t.Fatalf("Unexpected error on publish: %v", err) - } - } - nc.Flush() - } - - // Check pending vs expected values - var limitCount, limitBytes int - var expectedCount, expectedBytes int - checkPending := func() error { - lc, lb, err := sub.PendingLimits() - if err != nil { - return err - } - if lc != limitCount || lb != limitBytes { - return fmt.Errorf("Unexpected limits, expected %v msgs %v bytes, got %v msgs %v bytes", - limitCount, limitBytes, lc, lb) - } - msgs, bytes, err := sub.Pending() - if err != nil { - return fmt.Errorf("Unexpected error getting pending counts: %v", err) - } - if (msgs != expectedCount && msgs != expectedCount-1) || - (bytes != expectedBytes && bytes != expectedBytes-payloadLen) { - return fmt.Errorf("Unexpected counts, expected %v msgs %v bytes, got %v msgs %v bytes", - expectedCount, expectedBytes, msgs, bytes) - } - return nil - } - - recv := make(chan bool) - block := make(chan bool) - cb := func(m *nats.Msg) { - recv <- true - <-block - m.Sub.Unsubscribe() - } - subj := "foo" - sub, err := nc.Subscribe(subj, cb) - if err != nil { - t.Fatalf("Unexpected error on subscribe: %v", err) - } - defer sub.Unsubscribe() - if err := invalid(); err != nil { - t.Fatalf("%v", err) - } - // Check we apply limit only for size - limitCount = -1 - limitBytes = (toSend / 2) * payloadLen - if err := sub.SetPendingLimits(limitCount, limitBytes); err != nil { - t.Fatalf("Unexpected error setting limits: %v", err) - } - // Send messages - send(subj, toSend) - // Wait for message to be received - if err := Wait(recv); err != nil { - t.Fatal("Did not get our message") - } - expectedBytes = limitBytes - expectedCount = limitBytes / payloadLen - if err := checkPending(); err != nil { - t.Fatalf("%v", err) - } - // Release callback - block <- true - - subj = "bar" - sub, err = nc.Subscribe(subj, cb) - if err != nil { - t.Fatalf("Unexpected error on subscribe: %v", err) - } - defer sub.Unsubscribe() - // Check we apply limit only for count - limitCount = toSend / 4 - limitBytes = -1 - if err := sub.SetPendingLimits(limitCount, limitBytes); err != nil { - t.Fatalf("Unexpected error setting limits: %v", err) - } - // Send messages - send(subj, toSend) - // Wait for message to be received - if err := Wait(recv); err != nil { - t.Fatal("Did not get our message") - } - expectedCount = limitCount - expectedBytes = limitCount * payloadLen - if err := checkPending(); err != nil { - t.Fatalf("%v", err) - } - // Release callback - block <- true - - subj = "baz" - sub, err = nc.SubscribeSync(subj) - if err != nil { - t.Fatalf("Unexpected error on subscribe: %v", err) - } - defer sub.Unsubscribe() - if err := invalid(); err != nil { - t.Fatalf("%v", err) - } - // Check we apply limit only for size - limitCount = -1 - limitBytes = (toSend / 2) * payloadLen - if err := sub.SetPendingLimits(limitCount, limitBytes); err != nil { - t.Fatalf("Unexpected error setting limits: %v", err) - } - // Send messages - send(subj, toSend) - expectedBytes = limitBytes - expectedCount = limitBytes / payloadLen - if err := checkPending(); err != nil { - t.Fatalf("%v", err) - } - sub.Unsubscribe() - nc.Flush() - - subj = "boz" - sub, err = nc.SubscribeSync(subj) - if err != nil { - t.Fatalf("Unexpected error on subscribe: %v", err) - } - defer sub.Unsubscribe() - // Check we apply limit only for count - limitCount = toSend / 4 - limitBytes = -1 - if err := sub.SetPendingLimits(limitCount, limitBytes); err != nil { - t.Fatalf("Unexpected error setting limits: %v", err) - } - // Send messages - send(subj, toSend) - expectedCount = limitCount - expectedBytes = limitCount * payloadLen - if err := checkPending(); err != nil { - t.Fatalf("%v", err) - } - sub.Unsubscribe() - nc.Flush() -} - -func TestSubscriptionTypes(t *testing.T) { - s := RunDefaultServer() - defer s.Shutdown() - - nc := NewDefaultConnection(t) - defer nc.Close() - - sub, _ := nc.Subscribe("foo", func(_ *nats.Msg) {}) - defer sub.Unsubscribe() - if st := sub.Type(); st != nats.AsyncSubscription { - t.Fatalf("Expected AsyncSubscription, got %v\n", st) - } - // Check Pending - if err := sub.SetPendingLimits(1, 100); err != nil { - t.Fatalf("We should be able to SetPendingLimits()") - } - if _, _, err := sub.Pending(); err != nil { - t.Fatalf("We should be able to call Pending()") - } - sub.Unsubscribe() - if err := sub.SetPendingLimits(1, 100); err == nil { - t.Fatal("Calling SetPendingLimits() on closed subscription should fail") - } - if _, _, err := sub.PendingLimits(); err == nil { - t.Fatal("Calling PendingLimits() on closed subscription should fail") - } - - sub, _ = nc.SubscribeSync("foo") - defer sub.Unsubscribe() - if st := sub.Type(); st != nats.SyncSubscription { - t.Fatalf("Expected SyncSubscription, got %v\n", st) - } - // Check Pending - if err := sub.SetPendingLimits(1, 100); err != nil { - t.Fatalf("We should be able to SetPendingLimits()") - } - if _, _, err := sub.Pending(); err != nil { - t.Fatalf("We should be able to call Pending()") - } - sub.Unsubscribe() - if err := sub.SetPendingLimits(1, 100); err == nil { - t.Fatal("Calling SetPendingLimits() on closed subscription should fail") - } - if _, _, err := sub.PendingLimits(); err == nil { - t.Fatal("Calling PendingLimits() on closed subscription should fail") - } - - sub, _ = nc.ChanSubscribe("foo", make(chan *nats.Msg)) - defer sub.Unsubscribe() - if st := sub.Type(); st != nats.ChanSubscription { - t.Fatalf("Expected ChanSubscription, got %v\n", st) - } - // Check Pending - if err := sub.SetPendingLimits(1, 100); err == nil { - t.Fatalf("We should NOT be able to SetPendingLimits() on ChanSubscriber") - } - if _, _, err := sub.Pending(); err == nil { - t.Fatalf("We should NOT be able to call Pending() on ChanSubscriber") - } - if _, _, err := sub.MaxPending(); err == nil { - t.Fatalf("We should NOT be able to call MaxPending() on ChanSubscriber") - } - if err := sub.ClearMaxPending(); err == nil { - t.Fatalf("We should NOT be able to call ClearMaxPending() on ChanSubscriber") - } - if _, _, err := sub.PendingLimits(); err == nil { - t.Fatalf("We should NOT be able to call PendingLimits() on ChanSubscriber") - } - -} diff --git a/vendor/github.com/nats-io/go-nats/test/test.go b/vendor/github.com/nats-io/go-nats/test/test.go deleted file mode 100644 index ec4fd1db8..000000000 --- a/vendor/github.com/nats-io/go-nats/test/test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2015 Apcera Inc. All rights reserved. - -package test - -import ( - "errors" - "fmt" - "time" - - "github.com/nats-io/gnatsd/server" - "github.com/nats-io/go-nats" - - gnatsd "github.com/nats-io/gnatsd/test" -) - -// So that we can pass tests and benchmarks... -type tLogger interface { - Fatalf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) -} - -// TestLogger -type TestLogger tLogger - -// Dumb wait program to sync on callbacks, etc... Will timeout -func Wait(ch chan bool) error { - return WaitTime(ch, 5*time.Second) -} - -// Wait for a chan with a timeout. -func WaitTime(ch chan bool, timeout time.Duration) error { - select { - case <-ch: - return nil - case <-time.After(timeout): - } - return errors.New("timeout") -} - -//////////////////////////////////////////////////////////////////////////////// -// Creating client connections -//////////////////////////////////////////////////////////////////////////////// - -// NewDefaultConnection -func NewDefaultConnection(t tLogger) *nats.Conn { - return NewConnection(t, nats.DefaultPort) -} - -// NewConnection forms connection on a given port. -func NewConnection(t tLogger, port int) *nats.Conn { - url := fmt.Sprintf("nats://localhost:%d", port) - nc, err := nats.Connect(url) - if err != nil { - t.Fatalf("Failed to create default connection: %v\n", err) - return nil - } - return nc -} - -// NewEConn -func NewEConn(t tLogger) *nats.EncodedConn { - ec, err := nats.NewEncodedConn(NewDefaultConnection(t), nats.DEFAULT_ENCODER) - if err != nil { - t.Fatalf("Failed to create an encoded connection: %v\n", err) - } - return ec -} - -//////////////////////////////////////////////////////////////////////////////// -// Running gnatsd server in separate Go routines -//////////////////////////////////////////////////////////////////////////////// - -// RunDefaultServer will run a server on the default port. -func RunDefaultServer() *server.Server { - return RunServerOnPort(nats.DefaultPort) -} - -// RunServerOnPort will run a server on the given port. -func RunServerOnPort(port int) *server.Server { - opts := gnatsd.DefaultTestOptions - opts.Port = port - return RunServerWithOptions(opts) -} - -// RunServerWithOptions will run a server with the given options. -func RunServerWithOptions(opts server.Options) *server.Server { - return gnatsd.RunServer(&opts) -} - -// RunServerWithConfig will run a server with the given configuration file. -func RunServerWithConfig(configFile string) (*server.Server, *server.Options) { - return gnatsd.RunServerWithConfig(configFile) -} diff --git a/vendor/github.com/nats-io/go-nats/util/tls.go b/vendor/github.com/nats-io/go-nats/util/tls.go deleted file mode 100644 index 51da0b88c..000000000 --- a/vendor/github.com/nats-io/go-nats/util/tls.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Apcera Inc. All rights reserved. -// +build go1.7 - -package util - -import ( - "crypto/tls" -) - -// CloneTLSConfig returns a copy of c. Only the exported fields are copied. -// This is temporary, until this is provided by the language. -// https://go-review.googlesource.com/#/c/28075/ -func CloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/github.com/nats-io/go-nats/util/tls_pre17.go b/vendor/github.com/nats-io/go-nats/util/tls_pre17.go deleted file mode 100644 index db198ae31..000000000 --- a/vendor/github.com/nats-io/go-nats/util/tls_pre17.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2016 Apcera Inc. All rights reserved. -// +build go1.5,!go1.7 - -package util - -import ( - "crypto/tls" -) - -// CloneTLSConfig returns a copy of c. Only the exported fields are copied. -// This is temporary, until this is provided by the language. -// https://go-review.googlesource.com/#/c/28075/ -func CloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} diff --git a/vendor/github.com/nats-io/nats/.travis.yml b/vendor/github.com/nats-io/nats/.travis.yml index 7f8b6d352..e843f0b00 100644 --- a/vendor/github.com/nats-io/nats/.travis.yml +++ b/vendor/github.com/nats-io/nats/.travis.yml @@ -14,6 +14,5 @@ script: - go vet ./... - go test -i -race ./... - go test -v -race ./... -- staticcheck -ignore="github.com/nats-io/go-nats/*_test.go:SA2002 github.com/nats-io/go-nats/*/*_test.go:SA2002" ./... -after_script: -- if [ "$TRAVIS_GO_VERSION" = "1.7.3" ]; then ./scripts/cov.sh; fi +- staticcheck ./... +- ./scripts/cov.sh TRAVIS diff --git a/vendor/github.com/nats-io/nats/README.md b/vendor/github.com/nats-io/nats/README.md index ad95e0a7e..5cb830c6b 100644 --- a/vendor/github.com/nats-io/nats/README.md +++ b/vendor/github.com/nats-io/nats/README.md @@ -2,13 +2,13 @@ A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io). [![License MIT](https://img.shields.io/npm/l/express.svg)](http://opensource.org/licenses/MIT) -[![Go Report Card](https://goreportcard.com/badge/github.com/nats-io/go-nats)](https://goreportcard.com/report/github.com/nats-io/go-nats) [![Build Status](https://travis-ci.org/nats-io/go-nats.svg?branch=master)](http://travis-ci.org/nats-io/go-nats) [![GoDoc](https://godoc.org/github.com/nats-io/go-nats?status.svg)](http://godoc.org/github.com/nats-io/go-nats) [![Coverage Status](https://coveralls.io/repos/nats-io/go-nats/badge.svg?branch=master)](https://coveralls.io/r/nats-io/go-nats?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/nats-io/nats)](https://goreportcard.com/report/github.com/nats-io/nats) [![Build Status](https://travis-ci.org/nats-io/nats.svg?branch=master)](http://travis-ci.org/nats-io/nats) [![GoDoc](https://godoc.org/github.com/nats-io/nats?status.svg)](http://godoc.org/github.com/nats-io/nats) [![Coverage Status](https://coveralls.io/repos/nats-io/nats/badge.svg?branch=master)](https://coveralls.io/r/nats-io/nats?branch=master) ## Installation ```bash # Go client -go get github.com/nats-io/go-nats +go get github.com/nats-io/nats # Server go get github.com/nats-io/gnatsd diff --git a/vendor/github.com/nats-io/nats/bench/bench.go b/vendor/github.com/nats-io/nats/bench/bench.go index 8353110b0..f985159aa 100644 --- a/vendor/github.com/nats-io/nats/bench/bench.go +++ b/vendor/github.com/nats-io/nats/bench/bench.go @@ -11,7 +11,7 @@ import ( "strconv" "time" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" "github.com/nats-io/nuid" ) diff --git a/vendor/github.com/nats-io/nats/bench/benchlib_test.go b/vendor/github.com/nats-io/nats/bench/benchlib_test.go index 6d7dddca8..ad4fc4660 100644 --- a/vendor/github.com/nats-io/nats/bench/benchlib_test.go +++ b/vendor/github.com/nats-io/nats/bench/benchlib_test.go @@ -2,11 +2,10 @@ package bench import ( "fmt" + "github.com/nats-io/nats" "strings" "testing" "time" - - "github.com/nats-io/go-nats" ) const ( diff --git a/vendor/github.com/nats-io/nats/enc.go b/vendor/github.com/nats-io/nats/enc.go index f29b0343a..4653559d4 100644 --- a/vendor/github.com/nats-io/nats/enc.go +++ b/vendor/github.com/nats-io/nats/enc.go @@ -10,7 +10,7 @@ import ( "time" // Default Encoders - . "github.com/nats-io/go-nats/encoders/builtin" + . "github.com/nats-io/nats/encoders/builtin" ) // Encoder interface is for all register encoders diff --git a/vendor/github.com/nats-io/nats/enc_test.go b/vendor/github.com/nats-io/nats/enc_test.go index ada5b0246..7be39d10e 100644 --- a/vendor/github.com/nats-io/nats/enc_test.go +++ b/vendor/github.com/nats-io/nats/enc_test.go @@ -5,9 +5,9 @@ import ( "testing" "time" - . "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/encoders/protobuf" - "github.com/nats-io/go-nats/encoders/protobuf/testdata" + . "github.com/nats-io/nats" + "github.com/nats-io/nats/encoders/protobuf" + "github.com/nats-io/nats/encoders/protobuf/testdata" ) // Since we import above nats packages, we need to have a different diff --git a/vendor/github.com/nats-io/nats/encoders/builtin/enc_test.go b/vendor/github.com/nats-io/nats/encoders/builtin/enc_test.go index b57553a5a..ad757f001 100644 --- a/vendor/github.com/nats-io/nats/encoders/builtin/enc_test.go +++ b/vendor/github.com/nats-io/nats/encoders/builtin/enc_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/encoders/builtin" - "github.com/nats-io/go-nats/test" + "github.com/nats-io/nats" + "github.com/nats-io/nats/encoders/builtin" + "github.com/nats-io/nats/test" ) const TEST_PORT = 8168 diff --git a/vendor/github.com/nats-io/nats/encoders/builtin/gob_test.go b/vendor/github.com/nats-io/nats/encoders/builtin/gob_test.go index 791192be0..a7149fc7f 100644 --- a/vendor/github.com/nats-io/nats/encoders/builtin/gob_test.go +++ b/vendor/github.com/nats-io/nats/encoders/builtin/gob_test.go @@ -6,8 +6,8 @@ import ( "reflect" "testing" - "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/test" + "github.com/nats-io/nats" + "github.com/nats-io/nats/test" ) func NewGobEncodedConn(tl test.TestLogger) *nats.EncodedConn { diff --git a/vendor/github.com/nats-io/nats/encoders/builtin/json_test.go b/vendor/github.com/nats-io/nats/encoders/builtin/json_test.go index c0ffb2f45..be9adfba9 100644 --- a/vendor/github.com/nats-io/nats/encoders/builtin/json_test.go +++ b/vendor/github.com/nats-io/nats/encoders/builtin/json_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/encoders/builtin" - "github.com/nats-io/go-nats/test" + "github.com/nats-io/nats" + "github.com/nats-io/nats/encoders/builtin" + "github.com/nats-io/nats/test" ) func NewJsonEncodedConn(tl test.TestLogger) *nats.EncodedConn { diff --git a/vendor/github.com/nats-io/nats/encoders/protobuf/protobuf_enc.go b/vendor/github.com/nats-io/nats/encoders/protobuf/protobuf_enc.go index 0ff1d7859..f8c559701 100644 --- a/vendor/github.com/nats-io/nats/encoders/protobuf/protobuf_enc.go +++ b/vendor/github.com/nats-io/nats/encoders/protobuf/protobuf_enc.go @@ -6,7 +6,7 @@ import ( "errors" "github.com/golang/protobuf/proto" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) // Additional index for registered Encoders. diff --git a/vendor/github.com/nats-io/nats/encoders/protobuf/protobuf_test.go b/vendor/github.com/nats-io/nats/encoders/protobuf/protobuf_test.go index e0b360850..ee064e138 100644 --- a/vendor/github.com/nats-io/nats/encoders/protobuf/protobuf_test.go +++ b/vendor/github.com/nats-io/nats/encoders/protobuf/protobuf_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/test" + "github.com/nats-io/nats" + "github.com/nats-io/nats/test" - "github.com/nats-io/go-nats/encoders/protobuf" - pb "github.com/nats-io/go-nats/encoders/protobuf/testdata" + "github.com/nats-io/nats/encoders/protobuf" + pb "github.com/nats-io/nats/encoders/protobuf/testdata" ) const TEST_PORT = 8068 diff --git a/vendor/github.com/nats-io/nats/example_test.go b/vendor/github.com/nats-io/nats/example_test.go index 2411f50ee..8aca536da 100644 --- a/vendor/github.com/nats-io/nats/example_test.go +++ b/vendor/github.com/nats-io/nats/example_test.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) // Shows different ways to create a Conn diff --git a/vendor/github.com/nats-io/nats/examples/nats-bench.go b/vendor/github.com/nats-io/nats/examples/nats-bench.go index 1194e8821..c5e2db098 100644 --- a/vendor/github.com/nats-io/nats/examples/nats-bench.go +++ b/vendor/github.com/nats-io/nats/examples/nats-bench.go @@ -11,8 +11,8 @@ import ( "sync" "time" - "github.com/nats-io/go-nats" - "github.com/nats-io/go-nats/bench" + "github.com/nats-io/nats" + "github.com/nats-io/nats/bench" ) // Some sane defaults diff --git a/vendor/github.com/nats-io/nats/examples/nats-pub.go b/vendor/github.com/nats-io/nats/examples/nats-pub.go index 4eaedb25e..8e75c2bca 100644 --- a/vendor/github.com/nats-io/nats/examples/nats-pub.go +++ b/vendor/github.com/nats-io/nats/examples/nats-pub.go @@ -7,7 +7,7 @@ import ( "flag" "log" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) // NOTE: Use tls scheme for TLS, e.g. nats-pub -s tls://demo.nats.io:4443 foo hello diff --git a/vendor/github.com/nats-io/nats/examples/nats-qsub.go b/vendor/github.com/nats-io/nats/examples/nats-qsub.go index 2262dba64..391c212e6 100644 --- a/vendor/github.com/nats-io/nats/examples/nats-qsub.go +++ b/vendor/github.com/nats-io/nats/examples/nats-qsub.go @@ -9,7 +9,7 @@ import ( "os" "runtime" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) // NOTE: Use tls scheme for TLS, e.g. nats-qsub -s tls://demo.nats.io:4443 foo diff --git a/vendor/github.com/nats-io/nats/examples/nats-req.go b/vendor/github.com/nats-io/nats/examples/nats-req.go index 711be5e7c..fdc2edadf 100644 --- a/vendor/github.com/nats-io/nats/examples/nats-req.go +++ b/vendor/github.com/nats-io/nats/examples/nats-req.go @@ -8,7 +8,7 @@ import ( "log" "time" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) // NOTE: Use tls scheme for TLS, e.g. nats-req -s tls://demo.nats.io:4443 foo hello diff --git a/vendor/github.com/nats-io/nats/examples/nats-rply.go b/vendor/github.com/nats-io/nats/examples/nats-rply.go index d5407c42f..c395b94a4 100644 --- a/vendor/github.com/nats-io/nats/examples/nats-rply.go +++ b/vendor/github.com/nats-io/nats/examples/nats-rply.go @@ -8,7 +8,7 @@ import ( "log" "runtime" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) // NOTE: Use tls scheme for TLS, e.g. nats-rply -s tls://demo.nats.io:4443 foo hello diff --git a/vendor/github.com/nats-io/nats/examples/nats-sub.go b/vendor/github.com/nats-io/nats/examples/nats-sub.go index 048a0d964..96767089d 100644 --- a/vendor/github.com/nats-io/nats/examples/nats-sub.go +++ b/vendor/github.com/nats-io/nats/examples/nats-sub.go @@ -8,7 +8,7 @@ import ( "log" "runtime" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) // NOTE: Use tls scheme for TLS, e.g. nats-sub -s tls://demo.nats.io:4443 foo diff --git a/vendor/github.com/nats-io/nats/nats.go b/vendor/github.com/nats-io/nats/nats.go index 52a3bb083..852404866 100644 --- a/vendor/github.com/nats-io/nats/nats.go +++ b/vendor/github.com/nats-io/nats/nats.go @@ -23,7 +23,7 @@ import ( "sync/atomic" "time" - "github.com/nats-io/go-nats/util" + "github.com/nats-io/nats/util" "github.com/nats-io/nuid" ) @@ -1445,7 +1445,7 @@ func (nc *Conn) waitForMsgs(s *Subscription) { } // Deliver the message. - if m != nil && (max == 0 || delivered <= max) { + if m != nil && (max <= 0 || delivered <= max) { mcb(m) } // If we have hit the max for delivered msgs, remove sub. diff --git a/vendor/github.com/nats-io/nats/nats_test.go b/vendor/github.com/nats-io/nats/nats_test.go index ef68782b3..d19a74e22 100644 --- a/vendor/github.com/nats-io/nats/nats_test.go +++ b/vendor/github.com/nats-io/nats/nats_test.go @@ -7,17 +7,17 @@ package nats import ( "bufio" "bytes" - "encoding/json" "errors" "fmt" "reflect" - "runtime" "strings" "testing" "time" + "encoding/json" "github.com/nats-io/gnatsd/server" gnatsd "github.com/nats-io/gnatsd/test" + "runtime" ) // Dumb wait program to sync on callbacks, etc... Will timeout @@ -779,9 +779,7 @@ func TestParserSplitMsg(t *testing.T) { } buf = []byte("\r\n") - if err := nc.parse(buf); err != nil { - t.Fatalf("Unexpected error during parsing: %v", err) - } + err = nc.parse(buf) if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) } diff --git a/vendor/github.com/nats-io/nats/scripts/cov.sh b/vendor/github.com/nats-io/nats/scripts/cov.sh index 437b8d492..7cd51f689 100755 --- a/vendor/github.com/nats-io/nats/scripts/cov.sh +++ b/vendor/github.com/nats-io/nats/scripts/cov.sh @@ -6,7 +6,7 @@ mkdir cov go test -v -covermode=atomic -coverprofile=./cov/nats.out go test -v -covermode=atomic -coverprofile=./cov/builtin.out ./encoders/builtin go test -v -covermode=atomic -coverprofile=./cov/protobuf.out ./encoders/protobuf -go test -v -covermode=atomic -coverprofile=./cov/test.out -coverpkg=github.com/nats-io/go-nats ./test +go test -v -covermode=atomic -coverprofile=./cov/test.out -coverpkg=github.com/nats-io/nats ./test gocovmerge ./cov/*.out > acc.out rm -rf ./cov diff --git a/vendor/github.com/nats-io/nats/test/auth_test.go b/vendor/github.com/nats-io/nats/test/auth_test.go index d343bfb0c..e0d55bb91 100644 --- a/vendor/github.com/nats-io/nats/test/auth_test.go +++ b/vendor/github.com/nats-io/nats/test/auth_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/nats-io/gnatsd/auth" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) func TestAuth(t *testing.T) { diff --git a/vendor/github.com/nats-io/nats/test/basic_test.go b/vendor/github.com/nats-io/nats/test/basic_test.go index d18f4cb51..3df27cef3 100644 --- a/vendor/github.com/nats-io/nats/test/basic_test.go +++ b/vendor/github.com/nats-io/nats/test/basic_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) func TestCloseLeakingGoRoutines(t *testing.T) { diff --git a/vendor/github.com/nats-io/nats/test/bench_test.go b/vendor/github.com/nats-io/nats/test/bench_test.go index 5deb12775..f03569681 100644 --- a/vendor/github.com/nats-io/nats/test/bench_test.go +++ b/vendor/github.com/nats-io/nats/test/bench_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) func BenchmarkPublishSpeed(b *testing.B) { diff --git a/vendor/github.com/nats-io/nats/test/cluster_test.go b/vendor/github.com/nats-io/nats/test/cluster_test.go index a5d793254..2c2c40804 100644 --- a/vendor/github.com/nats-io/nats/test/cluster_test.go +++ b/vendor/github.com/nats-io/nats/test/cluster_test.go @@ -10,7 +10,7 @@ import ( "time" "github.com/nats-io/gnatsd/auth" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) var testServers = []string{ @@ -365,8 +365,10 @@ func TestProperFalloutAfterMaxAttempts(t *testing.T) { opts.NoRandomize = true opts.ReconnectWait = (25 * time.Millisecond) + dcbCalled := false dch := make(chan bool) opts.DisconnectedCB = func(_ *nats.Conn) { + dcbCalled = true dch <- true } @@ -432,8 +434,10 @@ func TestProperFalloutAfterMaxAttemptsWithAuthMismatch(t *testing.T) { } opts.ReconnectWait = (25 * time.Millisecond) + dcbCalled := false dch := make(chan bool) opts.DisconnectedCB = func(_ *nats.Conn) { + dcbCalled = true dch <- true } @@ -500,10 +504,12 @@ func TestTimeoutOnNoServers(t *testing.T) { } opts.NoRandomize = true + dcbCalled := false dch := make(chan bool) opts.DisconnectedCB = func(nc *nats.Conn) { // Suppress any additional calls nc.SetDisconnectHandler(nil) + dcbCalled = true dch <- true } diff --git a/vendor/github.com/nats-io/nats/test/conn_test.go b/vendor/github.com/nats-io/nats/test/conn_test.go index 68b824751..bb4f97622 100644 --- a/vendor/github.com/nats-io/nats/test/conn_test.go +++ b/vendor/github.com/nats-io/nats/test/conn_test.go @@ -16,7 +16,7 @@ import ( "testing" "time" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) func TestDefaultConnection(t *testing.T) { diff --git a/vendor/github.com/nats-io/nats/test/netchan_test.go b/vendor/github.com/nats-io/nats/test/netchan_test.go index 88b66cb60..aa50fc2c8 100644 --- a/vendor/github.com/nats-io/nats/test/netchan_test.go +++ b/vendor/github.com/nats-io/nats/test/netchan_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) func TestBadChan(t *testing.T) { diff --git a/vendor/github.com/nats-io/nats/test/reconnect_test.go b/vendor/github.com/nats-io/nats/test/reconnect_test.go index 6e11b80f8..223c2b569 100644 --- a/vendor/github.com/nats-io/nats/test/reconnect_test.go +++ b/vendor/github.com/nats-io/nats/test/reconnect_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/nats-io/gnatsd/server" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) func startReconnectServer(t *testing.T) *server.Server { diff --git a/vendor/github.com/nats-io/nats/test/sub_test.go b/vendor/github.com/nats-io/nats/test/sub_test.go index 80fea9c95..ae777bf46 100644 --- a/vendor/github.com/nats-io/nats/test/sub_test.go +++ b/vendor/github.com/nats-io/nats/test/sub_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" ) // More advanced tests on subscriptions diff --git a/vendor/github.com/nats-io/nats/test/test.go b/vendor/github.com/nats-io/nats/test/test.go index ec4fd1db8..9f909b8a0 100644 --- a/vendor/github.com/nats-io/nats/test/test.go +++ b/vendor/github.com/nats-io/nats/test/test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/nats-io/gnatsd/server" - "github.com/nats-io/go-nats" + "github.com/nats-io/nats" gnatsd "github.com/nats-io/gnatsd/test" ) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go index 29265c70e..918b9a305 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go @@ -61,9 +61,26 @@ func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) erro if err != nil { return err } - if err := s.ensureParent(dir, root); err != nil { + // 'ensureParent' start with parent because we don't want to + // explicitly inherit from parent, it could conflict with + // 'cpuset.cpu_exclusive'. + if err := s.ensureParent(filepath.Dir(dir), root); err != nil { return err } + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + // We didn't inherit cpuset configs from parent, but we have + // to ensure cpuset configs are set before moving task into the + // cgroup. + // The logic is, if user specified cpuset configs, use these + // specified configs, otherwise, inherit from parent. This makes + // cpuset configs work correctly with 'cpuset.cpu_exclusive', and + // keep backward compatbility. + if err := s.ensureCpusAndMems(dir, cgroup); err != nil { + return err + } + // because we are not using d.join we need to place the pid into the procs file // unlike the other subsystems if err := cgroups.WriteCgroupProc(dir, pid); err != nil { @@ -136,3 +153,10 @@ func (s *CpusetGroup) copyIfNeeded(current, parent string) error { func (s *CpusetGroup) isEmpty(b []byte) bool { return len(bytes.Trim(b, "\n")) == 0 } + +func (s *CpusetGroup) ensureCpusAndMems(path string, cgroup *configs.Cgroup) error { + if err := s.Set(path, cgroup); err != nil { + return err + } + return s.copyIfNeeded(path, filepath.Dir(path)) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go b/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go index d76846eaf..65bccdb85 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go @@ -177,7 +177,7 @@ func UnreserveLabel(label string) error { return nil } -// DupSecOpt takes an process label and returns security options that +// DupSecOpt takes a process label and returns security options that // can be used to set duplicate labels on future container processes func DupSecOpt(src string) []string { return selinux.DupSecOpt(src) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture_test.go b/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture_test.go index 40d4071e6..18ca924ee 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture_test.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture_test.go @@ -19,7 +19,7 @@ func TestCaptureTestFunc(t *testing.T) { // the first frame is the caller frame := stack.Frames[0] if expected := "captureFunc"; frame.Function != expected { - t.Fatalf("expteced function %q but recevied %q", expected, frame.Function) + t.Fatalf("expected function %q but recevied %q", expected, frame.Function) } expected := "/runc/libcontainer/stacktrace" if !strings.HasSuffix(frame.Package, expected) { diff --git a/vendor/github.com/prometheus/common/route/route.go b/vendor/github.com/prometheus/common/route/route.go index 930b52d4f..1e5638ed9 100644 --- a/vendor/github.com/prometheus/common/route/route.go +++ b/vendor/github.com/prometheus/common/route/route.go @@ -33,18 +33,19 @@ func WithParam(ctx context.Context, p, v string) context.Context { return context.WithValue(ctx, param(p), v) } -type contextFn func(r *http.Request) (context.Context, error) +// ContextFunc returns a new context for a request. +type ContextFunc func(r *http.Request) (context.Context, error) // Router wraps httprouter.Router and adds support for prefixed sub-routers // and per-request context injections. type Router struct { rtr *httprouter.Router prefix string - ctxFn contextFn + ctxFn ContextFunc } // New returns a new Router. -func New(ctxFn contextFn) *Router { +func New(ctxFn ContextFunc) *Router { if ctxFn == nil { ctxFn = func(r *http.Request) (context.Context, error) { return context.Background(), nil diff --git a/vendor/github.com/prometheus/common/route/route_test.go b/vendor/github.com/prometheus/common/route/route_test.go index 4055d69d5..e7b1cba33 100644 --- a/vendor/github.com/prometheus/common/route/route_test.go +++ b/vendor/github.com/prometheus/common/route/route_test.go @@ -29,7 +29,7 @@ func TestRedirect(t *testing.T) { } } -func TestContextFn(t *testing.T) { +func TestContextFunc(t *testing.T) { router := New(func(r *http.Request) (context.Context, error) { return context.WithValue(context.Background(), "testkey", "testvalue"), nil }) diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go index 12c9010dd..877bfba1c 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/autocert.go +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go @@ -187,6 +187,7 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, } // regular domain + name = strings.TrimSuffix(name, ".") // golang.org/issue/18114 cert, err := m.cert(name) if err == nil { return cert, nil diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go b/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go index 3a9daa10c..295f702dc 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go @@ -108,6 +108,14 @@ func decodePayload(v interface{}, r io.Reader) error { } func TestGetCertificate(t *testing.T) { + testGetCertificate(t, false) +} + +func TestGetCertificate_trailingDot(t *testing.T) { + testGetCertificate(t, true) +} + +func testGetCertificate(t *testing.T, trailingDot bool) { const domain = "example.org" man := &Manager{Prompt: AcceptTOS} defer man.stopRenew() @@ -167,6 +175,9 @@ func TestGetCertificate(t *testing.T) { if err != nil { t.Fatalf("new-cert: CSR: %v", err) } + if csr.Subject.CommonName != domain { + t.Errorf("CommonName in CSR = %q; want %q", csr.Subject.CommonName, domain) + } der, err := dummyCert(csr.PublicKey, domain) if err != nil { t.Fatalf("new-cert: dummyCert: %v", err) @@ -201,11 +212,14 @@ func TestGetCertificate(t *testing.T) { // simulate tls.Config.GetCertificate var tlscert *tls.Certificate done := make(chan struct{}) - go func() { - hello := &tls.ClientHelloInfo{ServerName: domain} + go func(serverName string) { + if trailingDot { + serverName += "." + } + hello := &tls.ClientHelloInfo{ServerName: serverName} tlscert, err = man.GetCertificate(hello) close(done) - }() + }(domain) select { case <-time.After(time.Minute): t.Fatal("man.GetCertificate took too long to return") diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go index dc993efb5..4243f4cb9 100644 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -20,6 +20,9 @@ var appengineVM bool // Set at init time by appengine_hook.go. If nil, we're not on App Engine. var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineAppIDFunc func(c context.Context) string + // AppEngineTokenSource returns a token source that fetches tokens // issued to the current App Engine application's service account. // If you are implementing a 3-legged OAuth 2.0 flow on App Engine diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go index 4f42c8b34..6f6641141 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_hook.go +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -10,4 +10,5 @@ import "google.golang.org/appengine" func init() { appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID } diff --git a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go index 633611cc3..10747801f 100644 --- a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go +++ b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go @@ -11,4 +11,5 @@ import "google.golang.org/appengine" func init() { appengineVM = true appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID } diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index c572d1a70..b45e79616 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -18,16 +18,16 @@ import ( "golang.org/x/oauth2" ) -// DefaultClient returns an HTTP Client that uses the -// DefaultTokenSource to obtain authentication credentials. -// -// This client should be used when developing services -// that run on Google App Engine or Google Compute Engine -// and use "Application Default Credentials." -// +// DefaultCredentials holds "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials -// +type DefaultCredentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource +} + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { ts, err := DefaultTokenSource(ctx, scope...) if err != nil { @@ -36,8 +36,18 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { return oauth2.NewClient(ctx, ts), nil } -// DefaultTokenSource is a token source that uses +// DefaultTokenSource returns the token source for // "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentials searches for "Application Default Credentials". // // It looks for credentials in the following places, // preferring the first location found: @@ -51,45 +61,40 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { // 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches // credentials from the metadata server. // (In this final case any provided scopes are ignored.) -// -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -// -func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { +func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) { // First, try the environment variable. const envVar = "GOOGLE_APPLICATION_CREDENTIALS" if filename := os.Getenv(envVar); filename != "" { - ts, err := tokenSourceFromFile(ctx, filename, scope) + creds, err := readCredentialsFile(ctx, filename, scope) if err != nil { return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) } - return ts, nil + return creds, nil } // Second, try a well-known file. filename := wellKnownFile() - _, err := os.Stat(filename) - if err == nil { - ts, err2 := tokenSourceFromFile(ctx, filename, scope) - if err2 == nil { - return ts, nil - } - err = err2 - } else if os.IsNotExist(err) { - err = nil // ignore this error - } - if err != nil { + if creds, err := readCredentialsFile(ctx, filename, scope); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) } // Third, if we're on Google App Engine use those credentials. if appengineTokenFunc != nil && !appengineVM { - return AppEngineTokenSource(ctx, scope...), nil + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scope...), + }, nil } // Fourth, if we're on Google Compute Engine use the metadata server. if metadata.OnGCE() { - return ComputeTokenSource(""), nil + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource(""), + }, nil } // None are found; return helpful error. @@ -105,7 +110,7 @@ func wellKnownFile() string { return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) } -func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { b, err := ioutil.ReadFile(filename) if err != nil { return nil, err @@ -114,5 +119,12 @@ func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) if err := json.Unmarshal(b, &f); err != nil { return nil, err } - return f.tokenSource(ctx, scopes) + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + }, nil } diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 07047329f..66a8b0e18 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -112,6 +112,7 @@ type credentialsFile struct { PrivateKeyID string `json:"private_key_id"` PrivateKey string `json:"private_key"` TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` // User Credential fields // (These typically come from gcloud auth.) diff --git a/vendor/golang.org/x/sys/plan9/const_plan9.go b/vendor/golang.org/x/sys/plan9/const_plan9.go index b4e85a3a9..430dac0a0 100644 --- a/vendor/golang.org/x/sys/plan9/const_plan9.go +++ b/vendor/golang.org/x/sys/plan9/const_plan9.go @@ -57,14 +57,3 @@ const ( ERRMAX = 128 STATFIXLEN = 49 ) - -// Mount and bind flags -const ( - MREPL = 0x0000 - MBEFORE = 0x0001 - MAFTER = 0x0002 - MORDER = 0x0003 - MCREATE = 0x0004 - MCACHE = 0x0010 - MMASK = 0x0017 -) diff --git a/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh b/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh index 3c3ab0581..d798930b1 100755 --- a/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh +++ b/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh @@ -1,6 +1,8 @@ #!/bin/sh # Copyright 2009 The Go Authors. All rights reserved. # Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file.# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. COMMAND="mksysnum_plan9.sh $@" diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore deleted file mode 100644 index e48271590..000000000 --- a/vendor/golang.org/x/sys/unix/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_obj/ diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_386.s similarity index 56% rename from vendor/golang.org/x/sys/unix/asm_linux_mips64x.s rename to vendor/golang.org/x/sys/unix/asm_dragonfly_386.s index 724e580c4..7e55e0d31 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_386.s @@ -1,28 +1,29 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux -// +build mips64 mips64le // +build !gccgo #include "textflag.h" // -// System calls for mips64, Linux +// System call support for 386, FreeBSD // // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. -TEXT ·Syscall(SB),NOSPLIT,$0-56 +TEXT ·Syscall(SB),NOSPLIT,$0-32 JMP syscall·Syscall(SB) -TEXT ·Syscall6(SB),NOSPLIT,$0-80 +TEXT ·Syscall6(SB),NOSPLIT,$0-44 JMP syscall·Syscall6(SB) -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 +TEXT ·Syscall9(SB),NOSPLIT,$0-56 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-32 JMP syscall·RawSyscall(SB) -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 +TEXT ·RawSyscall6(SB),NOSPLIT,$0-44 JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s deleted file mode 100644 index 11889859f..000000000 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x -// +build linux -// +build !gccgo - -#include "textflag.h" - -// -// System calls for s390x, Linux -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - BR syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - BR syscall·Syscall6(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - BR syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - BR syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/vendor/golang.org/x/sys/unix/bluetooth_linux.go deleted file mode 100644 index 6e3229697..000000000 --- a/vendor/golang.org/x/sys/unix/bluetooth_linux.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Bluetooth sockets and messages - -package unix - -// Bluetooth Protocols -const ( - BTPROTO_L2CAP = 0 - BTPROTO_HCI = 1 - BTPROTO_SCO = 2 - BTPROTO_RFCOMM = 3 - BTPROTO_BNEP = 4 - BTPROTO_CMTP = 5 - BTPROTO_HIDP = 6 - BTPROTO_AVDTP = 7 -) - -const ( - HCI_CHANNEL_RAW = 0 - HCI_CHANNEL_USER = 1 - HCI_CHANNEL_MONITOR = 2 - HCI_CHANNEL_CONTROL = 3 -) - -// Socketoption Level -const ( - SOL_BLUETOOTH = 0x112 - SOL_HCI = 0x0 - SOL_L2CAP = 0x6 - SOL_RFCOMM = 0x12 - SOL_SCO = 0x11 -) diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go deleted file mode 100644 index 56332692c..000000000 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gccgo,linux,sparc64 - -package unix - -import "syscall" - -//extern sysconf -func realSysconf(name int) int64 - -func sysconf(name int) (n int64, err syscall.Errno) { - r := realSysconf(name) - if r < 0 { - return 0, syscall.GetErrno() - } - return r, 0 -} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 2a1473f16..d41d6100e 100755 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -116,12 +116,12 @@ _* | *_ | _) darwin_386) mkerrors="$mkerrors -m32" mksyscall="./mksyscall.pl -l32" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" + mksysnum="./mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; darwin_amd64) mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" + mksysnum="./mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; darwin_arm) @@ -131,7 +131,7 @@ darwin_arm) ;; darwin_arm64) mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" + mksysnum="./mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; dragonfly_386) @@ -161,7 +161,7 @@ freebsd_arm) mkerrors="$mkerrors" mksyscall="./mksyscall.pl -l32 -arm" mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" - # Let the type of C char be signed for making the bare syscall + # Let the type of C char be singed for making the bare syscall # API consistent across over platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; @@ -194,7 +194,7 @@ linux_arm64) exit 1 fi mksysnum="./mksysnum_linux.pl $unistd_h" - # Let the type of C char be signed for making the bare syscall + # Let the type of C char be singed for making the bare syscall # API consistent across over platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; @@ -212,24 +212,6 @@ linux_ppc64le) mksysnum="./mksysnum_linux.pl $unistd_h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; -linux_s390x) - GOOSARCH_in=syscall_linux_s390x.go - unistd_h=/usr/include/asm/unistd.h - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_linux.pl $unistd_h" - # Let the type of C char be signed to make the bare sys - # API more consistent between platforms. - # This is a deliberate departure from the way the syscall - # package generates its version of the types file. - mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" - ;; -linux_sparc64) - GOOSARCH_in=syscall_linux_sparc64.go - unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_linux.pl $unistd_h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; netbsd_386) mkerrors="$mkerrors -m32" mksyscall="./mksyscall.pl -l32 -netbsd" @@ -287,6 +269,6 @@ esac if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi if [ -n "$mktypes" ]; then echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go"; - echo "$mktypes types_$GOOS.go | go run mkpost.go >>ztypes_$GOOSARCH.go"; + echo "$mktypes types_$GOOS.go | gofmt >>ztypes_$GOOSARCH.go"; fi ) | $run diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 7e6276b9c..77c48048e 100755 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -12,16 +12,11 @@ export LC_ALL=C export LC_CTYPE=C if test -z "$GOARCH" -o -z "$GOOS"; then - echo 1>&2 "GOARCH or GOOS not defined in environment" - exit 1 + echo 1>&2 "GOARCH or GOOS not defined in environment" + exit 1 fi -CC=${CC:-cc} - -if [[ "$GOOS" -eq "solaris" ]]; then - # Assumes GNU versions of utilities in PATH. - export PATH=/usr/gnu/bin:$PATH -fi +CC=${CC:-gcc} uname=$(uname) @@ -127,10 +122,8 @@ includes_Linux=' #include #include #include -#include -#include #include -#include +#include #ifndef MSG_FASTOPEN #define MSG_FASTOPEN 0x20000000 @@ -143,12 +136,6 @@ includes_Linux=' #ifndef PTRACE_SETREGS #define PTRACE_SETREGS 0xd #endif - -#ifdef SOL_BLUETOOTH -// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h -// but it is already in bluetooth_linux.go -#undef SOL_BLUETOOTH -#endif ' includes_NetBSD=' @@ -213,7 +200,6 @@ includes_OpenBSD=' ' includes_SunOS=' -#include #include #include #include @@ -284,31 +270,21 @@ ccflags="$@" $2 !~ /^EXPR_/ && $2 ~ /^E[A-Z0-9_]+$/ || $2 ~ /^B[0-9_]+$/ || - $2 == "BOTHER" || - $2 ~ /^CI?BAUD(EX)?$/ || - $2 == "IBSHIFT" || $2 ~ /^V[A-Z0-9]+$/ || $2 ~ /^CS[A-Z0-9]/ || - $2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ || + $2 ~ /^I(SIG|CANON|CRNL|EXTEN|MAXBEL|STRIP|UTF8)$/ || $2 ~ /^IGN/ || $2 ~ /^IX(ON|ANY|OFF)$/ || $2 ~ /^IN(LCR|PCK)$/ || $2 ~ /(^FLU?SH)|(FLU?SH$)/ || - $2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ || + $2 ~ /^C(LOCAL|READ)$/ || $2 == "BRKINT" || $2 == "HUPCL" || $2 == "PENDIN" || $2 == "TOSTOP" || - $2 == "XCASE" || - $2 == "ALTWERASE" || - $2 == "NOKERNINFO" || $2 ~ /^PAR/ || $2 ~ /^SIG[^_]/ || - $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || - $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || - $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || - $2 ~ /^O?XTABS$/ || - $2 ~ /^TC[IO](ON|OFF)$/ || + $2 ~ /^O[CNPFP][A-Z]+[^_][A-Z]+$/ || $2 ~ /^IN_/ || $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || @@ -327,9 +303,6 @@ ccflags="$@" $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || - $2 ~ /^TCGET/ || - $2 ~ /^TCSET/ || - $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ || $2 !~ "RTF_BITS" && $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || $2 ~ /^BIOC/ || @@ -340,7 +313,6 @@ ccflags="$@" $2 !~ /^(BPF_TIMEVAL)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^CLOCK_/ || - $2 ~ /^CAN_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go deleted file mode 100644 index ed50d902a..000000000 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkpost processes the output of cgo -godefs to -// modify the generated types. It is used to clean up -// the sys API in an architecture specific manner. -// -// mkpost is run after cgo -godefs by mkall.sh. -package main - -import ( - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "regexp" -) - -func main() { - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - s := string(b) - - goarch := os.Getenv("GOARCH") - goos := os.Getenv("GOOS") - if goarch == "s390x" && goos == "linux" { - // Export the types of PtraceRegs fields. - re := regexp.MustCompile("ptrace(Psw|Fpregs|Per)") - s = re.ReplaceAllString(s, "Ptrace$1") - - // Replace padding fields inserted by cgo with blank identifiers. - re = regexp.MustCompile("Pad_cgo[A-Za-z0-9_]*") - s = re.ReplaceAllString(s, "_") - - // Replace other unwanted fields with blank identifiers. - re = regexp.MustCompile("X_[A-Za-z0-9_]*") - s = re.ReplaceAllString(s, "_") - - // Replace the control_regs union with a blank identifier for now. - re = regexp.MustCompile("(Control_regs)\\s+\\[0\\]uint64") - s = re.ReplaceAllString(s, "_ [0]uint64") - } - - // gofmt - b, err = format.Source([]byte(s)) - if err != nil { - log.Fatal(err) - } - - // Append this command to the header to show where the new file - // came from. - re := regexp.MustCompile("(cgo -godefs [a-zA-Z0-9_]+\\.go.*)") - b = re.ReplaceAll(b, []byte("$1 | go run mkpost.go")) - - fmt.Printf("%s", b) -} diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl b/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl index 06bade768..f17b6125b 100755 --- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl +++ b/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl @@ -110,9 +110,9 @@ ($) $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase. # Runtime import of function to allow cross-platform builds. - $dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n"; + $dynimports .= "//go:cgo_import_dynamic ${modname}_${sysname} ${sysname} \"$modname.so\"\n"; # Link symbol to proc address variable. - $linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n"; + $linknames .= "//go:linkname ${sysvarname} ${modname}_${sysname}\n"; # Library proc address variable. push @vars, $sysvarname; diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index f1493a3e6..6668bec71 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -62,7 +62,7 @@ func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) { func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) { h := (*Cmsghdr)(unsafe.Pointer(&b[0])) - if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) { + if h.Len < SizeofCmsghdr || int(h.Len) > len(b) { return nil, nil, EINVAL } return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil @@ -77,10 +77,10 @@ func UnixRights(fds ...int) []byte { h.Level = SOL_SOCKET h.Type = SCM_RIGHTS h.SetLen(CmsgLen(datalen)) - data := cmsgData(h) + data := uintptr(cmsgData(h)) for _, fd := range fds { - *(*int32)(data) = int32(fd) - data = unsafe.Pointer(uintptr(data) + 4) + *(*int32)(unsafe.Pointer(data)) = int32(fd) + data += 4 } return b } diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index a0bcf842c..6442a9939 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -68,8 +68,6 @@ func (tv *Timeval) Nano() int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 } -func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } - // use is a no-op, but the compiler cannot see that it is. // Calling use(p) ensures that p is kept live until that point. //go:noescape diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index e9671764c..9679dec89 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -450,34 +450,16 @@ func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err e //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL -// sysctlmib translates name to mib number and appends any additional args. -func sysctlmib(name string, args ...int) ([]_C_int, error) { +func Sysctl(name string) (value string, err error) { // Translate name to mib number. mib, err := nametomib(name) - if err != nil { - return nil, err - } - - for _, a := range args { - mib = append(mib, _C_int(a)) - } - - return mib, nil -} - -func Sysctl(name string) (string, error) { - return SysctlArgs(name) -} - -func SysctlArgs(name string, args ...int) (string, error) { - mib, err := sysctlmib(name, args...) if err != nil { return "", err } // Find size. n := uintptr(0) - if err := sysctl(mib, nil, &n, nil, 0); err != nil { + if err = sysctl(mib, nil, &n, nil, 0); err != nil { return "", err } if n == 0 { @@ -486,7 +468,7 @@ func SysctlArgs(name string, args ...int) (string, error) { // Read into buffer of that size. buf := make([]byte, n) - if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + if err = sysctl(mib, &buf[0], &n, nil, 0); err != nil { return "", err } @@ -497,19 +479,17 @@ func SysctlArgs(name string, args ...int) (string, error) { return string(buf[0:n]), nil } -func SysctlUint32(name string) (uint32, error) { - return SysctlUint32Args(name) -} - -func SysctlUint32Args(name string, args ...int) (uint32, error) { - mib, err := sysctlmib(name, args...) +func SysctlUint32(name string) (value uint32, err error) { + // Translate name to mib number. + mib, err := nametomib(name) if err != nil { return 0, err } + // Read into buffer of that size. n := uintptr(4) buf := make([]byte, 4) - if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + if err = sysctl(mib, &buf[0], &n, nil, 0); err != nil { return 0, err } if n != 4 { @@ -518,49 +498,6 @@ func SysctlUint32Args(name string, args ...int) (uint32, error) { return *(*uint32)(unsafe.Pointer(&buf[0])), nil } -func SysctlUint64(name string, args ...int) (uint64, error) { - mib, err := sysctlmib(name, args...) - if err != nil { - return 0, err - } - - n := uintptr(8) - buf := make([]byte, 8) - if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { - return 0, err - } - if n != 8 { - return 0, EIO - } - return *(*uint64)(unsafe.Pointer(&buf[0])), nil -} - -func SysctlRaw(name string, args ...int) ([]byte, error) { - mib, err := sysctlmib(name, args...) - if err != nil { - return nil, err - } - - // Find size. - n := uintptr(0) - if err := sysctl(mib, nil, &n, nil, 0); err != nil { - return nil, err - } - if n == 0 { - return nil, nil - } - - // Read into buffer of that size. - buf := make([]byte, n) - if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { - return nil, err - } - - // The actual call may return less than the original reported required - // size so ensure we deal with that. - return buf[:n], nil -} - //sys utimes(path string, timeval *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd_test.go b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go index d8085a072..55d884309 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd_test.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go @@ -7,56 +7,29 @@ package unix_test import ( - "os/exec" - "runtime" "testing" "golang.org/x/sys/unix" ) const MNT_WAIT = 1 -const MNT_NOWAIT = 2 func TestGetfsstat(t *testing.T) { - const flags = MNT_NOWAIT // see golang.org/issue/16937 - n, err := unix.Getfsstat(nil, flags) + n, err := unix.Getfsstat(nil, MNT_WAIT) if err != nil { t.Fatal(err) } data := make([]unix.Statfs_t, n) - n2, err := unix.Getfsstat(data, flags) + n, err = unix.Getfsstat(data, MNT_WAIT) if err != nil { t.Fatal(err) } - if n != n2 { - t.Errorf("Getfsstat(nil) = %d, but subsequent Getfsstat(slice) = %d", n, n2) - } - for i, stat := range data { - if stat == (unix.Statfs_t{}) { - t.Errorf("index %v is an empty Statfs_t struct", i) - } - } - if t.Failed() { - for i, stat := range data[:n2] { - t.Logf("data[%v] = %+v", i, stat) - } - mount, err := exec.Command("mount").CombinedOutput() - if err != nil { - t.Logf("mount: %v\n%s", err, mount) - } else { - t.Logf("mount: %s", mount) - } - } -} - -func TestSysctlRaw(t *testing.T) { - if runtime.GOOS == "openbsd" { - t.Skip("kern.proc.pid does not exist on OpenBSD") - } - _, err := unix.SysctlRaw("kern.proc.pid", unix.Getpid()) - if err != nil { - t.Fatal(err) + empty := unix.Statfs_t{} + for _, stat := range data { + if stat == empty { + t.Fatal("an empty Statfs_t struct was returned") + } } } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 3d534d2da..0d1771c3f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -144,7 +144,6 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) ( uintptr(options), 0, ) - use(unsafe.Pointer(_p0)) if e1 != 0 { return nil, e1 } @@ -197,7 +196,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags)) - use(unsafe.Pointer(_p0)) n = int(r0) if e1 != 0 { err = e1 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index c172a3da5..3195c8bf5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -21,6 +21,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = int32(nsec % 1e9 / 1e3) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index fc1e5a4a8..db5a02dc6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -11,8 +11,6 @@ import ( "unsafe" ) -//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) - func Getpagesize() int { return 4096 } func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } @@ -23,6 +21,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = int32(nsec % 1e9 / 1e3) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index d286cf408..e47ffd739 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -19,6 +19,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = int32(nsec % 1e9 / 1e3) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index c33905cdc..2560a9599 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -21,6 +21,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = int32(nsec % 1e9 / 1e3) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index ec408ee78..fbbe0dce2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -109,7 +109,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - use(unsafe.Pointer(_p0)) n = int(r0) if e1 != 0 { err = e1 diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go new file mode 100644 index 000000000..41c2e6978 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go @@ -0,0 +1,63 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386,dragonfly + +package unix + +import ( + "syscall" + "unsafe" +) + +func Getpagesize() int { return 4096 } + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = int32(nsec / 1e9) + ts.Nsec = int32(nsec % 1e9) + return +} + +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Usec = int32(nsec % 1e9 / 1e3) + tv.Sec = int32(nsec / 1e9) + return +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint32(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index da7cb7982..2ed92590e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -21,6 +21,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = nsec % 1e9 / 1e3 diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 520ccbeaf..ec56ed608 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -129,7 +129,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - use(unsafe.Pointer(_p0)) n = int(r0) if e1 != 0 { err = e1 diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 6a0cd804d..6255d40ff 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -21,6 +21,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = int32(nsec % 1e9 / 1e3) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index e142540ef..8b395d596 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -21,6 +21,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = nsec % 1e9 / 1e3 diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 5504cb125..4e72d46a8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -21,6 +21,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return tv.Sec*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = int32(nsec % 1e9 / 1e3) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go deleted file mode 100644 index cd13080ad..000000000 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd - -package unix_test - -import ( - "os" - "testing" - - "golang.org/x/sys/unix" -) - -func TestSysctUint64(t *testing.T) { - _, err := unix.SysctlUint64("vm.max_kernel_address") - if err != nil { - if os.Getenv("GO_BUILDER_NAME") == "freebsd-386-gce101" { - t.Skipf("Ignoring known failing test (golang.org/issue/15186). Failed with: %v", err) - } - t.Fatal(err) - } -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 01c569ad5..9df719571 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -36,10 +36,10 @@ func Creat(path string, mode uint32) (fd int, err error) { return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) } -//sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) +//sys linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) func Link(oldpath string, newpath string) (err error) { - return Linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0) + return linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0) } func Mkdir(path string, mode uint32) (err error) { @@ -60,19 +60,10 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) return openat(dirfd, path, flags|O_LARGEFILE, mode) } -//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) - -func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - if len(fds) == 0 { - return ppoll(nil, 0, timeout, sigmask) - } - return ppoll(&fds[0], len(fds), timeout, sigmask) -} - -//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) +//sys readlinkat(dirfd int, path string, buf []byte) (n int, err error) func Readlink(path string, buf []byte) (n int, err error) { - return Readlinkat(AT_FDCWD, path, buf) + return readlinkat(AT_FDCWD, path, buf) } func Rename(oldpath string, newpath string) (err error) { @@ -80,41 +71,34 @@ func Rename(oldpath string, newpath string) (err error) { } func Rmdir(path string) error { - return Unlinkat(AT_FDCWD, path, AT_REMOVEDIR) + return unlinkat(AT_FDCWD, path, AT_REMOVEDIR) } -//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) +//sys symlinkat(oldpath string, newdirfd int, newpath string) (err error) func Symlink(oldpath string, newpath string) (err error) { - return Symlinkat(oldpath, AT_FDCWD, newpath) + return symlinkat(oldpath, AT_FDCWD, newpath) } func Unlink(path string) error { - return Unlinkat(AT_FDCWD, path, 0) + return unlinkat(AT_FDCWD, path, 0) } -//sys Unlinkat(dirfd int, path string, flags int) (err error) +//sys unlinkat(dirfd int, path string, flags int) (err error) + +func Unlinkat(dirfd int, path string) error { + return unlinkat(dirfd, path, 0) +} //sys utimes(path string, times *[2]Timeval) (err error) -func Utimes(path string, tv []Timeval) error { +func Utimes(path string, tv []Timeval) (err error) { if tv == nil { - err := utimensat(AT_FDCWD, path, nil, 0) - if err != ENOSYS { - return err - } return utimes(path, nil) } if len(tv) != 2 { return EINVAL } - var ts [2]Timespec - ts[0] = NsecToTimespec(TimevalToNsec(tv[0])) - ts[1] = NsecToTimespec(TimevalToNsec(tv[1])) - err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) - if err != ENOSYS { - return err - } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } @@ -139,7 +123,8 @@ func UtimesNano(path string, ts []Timespec) error { // in 2.6.22, Released, 8 July 2007) then fall back to utimes var tv [2]Timeval for i := 0; i < 2; i++ { - tv[i] = NsecToTimeval(TimespecToNsec(ts[i])) + tv[i].Sec = ts[i].Sec + tv[i].Usec = ts[i].Nsec / 1000 } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } @@ -398,60 +383,6 @@ func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil } -type SockaddrHCI struct { - Dev uint16 - Channel uint16 - raw RawSockaddrHCI -} - -func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) { - sa.raw.Family = AF_BLUETOOTH - sa.raw.Dev = sa.Dev - sa.raw.Channel = sa.Channel - return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil -} - -// SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets. -// The RxID and TxID fields are used for transport protocol addressing in -// (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with -// zero values for CAN_RAW and CAN_BCM sockets as they have no meaning. -// -// The SockaddrCAN struct must be bound to the socket file descriptor -// using Bind before the CAN socket can be used. -// -// // Read one raw CAN frame -// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) -// addr := &SockaddrCAN{Ifindex: index} -// Bind(fd, addr) -// frame := make([]byte, 16) -// Read(fd, frame) -// -// The full SocketCAN documentation can be found in the linux kernel -// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt -type SockaddrCAN struct { - Ifindex int - RxID uint32 - TxID uint32 - raw RawSockaddrCAN -} - -func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) { - if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff { - return nil, 0, EINVAL - } - sa.raw.Family = AF_CAN - sa.raw.Ifindex = int32(sa.Ifindex) - rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { - sa.raw.Addr[i] = rx[i] - } - tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { - sa.raw.Addr[i+4] = tx[i] - } - return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil -} - func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: @@ -917,6 +848,7 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri //sysnb EpollCreate(size int) (fd int, err error) //sysnb EpollCreate1(flag int) (fd int, err error) //sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Exit(code int) = SYS_EXIT_GROUP //sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) @@ -940,7 +872,6 @@ func Getpgrp() (pid int) { //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) -//sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) //sys Getxattr(path string, attr string, dest []byte) (sz int, err error) //sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) @@ -952,9 +883,9 @@ func Getpgrp() (pid int) { //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Pause() (err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 -//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) +//sysnb prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) = SYS_PRLIMIT64 //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) @@ -963,7 +894,6 @@ func Getpgrp() (pid int) { //sysnb Setpgid(pid int, pgid int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tv *Timeval) (err error) -//sys Setns(fd int, nstype int) (err error) // issue 1435. // On linux Setuid and Setgid only affects the current thread, not the process. @@ -990,6 +920,7 @@ func Setgid(uid int) (err error) { //sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2 //sys Unshare(flags int) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) +//sys Utime(path string, buf *Utimbuf) (err error) //sys write(fd int, p []byte) (n int, err error) //sys exitThread(code int) (err error) = SYS_EXIT //sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ @@ -1089,6 +1020,9 @@ func Munmap(b []byte) (err error) { // Newfstatat // Nfsservctl // Personality +// Poll +// Ppoll +// Prctl // Pselect6 // Ptrace // Putpmsg diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 2b881b979..7171219af 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -24,6 +24,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Sec = int32(nsec / 1e9) @@ -91,8 +93,6 @@ func Pipe2(p []int, flags int) (err error) { //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) -//sys Pause() (err error) func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { page := uintptr(offset / 4096) @@ -181,8 +181,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Time(t *Time_t) (tt Time_t, err error) -//sys Utime(path string, buf *Utimbuf) (err error) - // On x86 Linux, all the socket calls go through an extra indirection, // I think because the 5-register system call interface can't handle // the 6-argument calls like sendto and recvfrom. Instead the @@ -388,12 +386,3 @@ func (msghdr *Msghdr) SetControllen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 18911c2d9..ae70c2afc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -9,7 +9,6 @@ package unix import "syscall" //sys Dup2(oldfd int, newfd int) (err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -26,7 +25,6 @@ import "syscall" //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) -//sys Pause() (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -88,8 +86,6 @@ func Time(t *Time_t) (tt Time_t, err error) { return Time_t(tv.Sec), nil } -//sys Utime(path string, buf *Utimbuf) (err error) - func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { @@ -98,6 +94,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Sec = nsec / 1e9 @@ -146,12 +144,3 @@ func (msghdr *Msghdr) SetControllen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 71d870228..abc41c3ea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -108,28 +108,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // Vsyscalls on amd64. //sysnb Gettimeofday(tv *Timeval) (err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) -//sys Pause() (err error) - -func Time(t *Time_t) (Time_t, error) { - var tv Timeval - err := Gettimeofday(&tv) - if err != nil { - return 0, err - } - if t != nil { - *t = Time_t(tv.Sec) - } - return Time_t(tv.Sec), nil -} - -func Utime(path string, buf *Utimbuf) error { - tv := []Timeval{ - {Sec: buf.Actime}, - {Sec: buf.Modtime}, - } - return Utimes(path, tv) -} +//sysnb Time(t *Time_t) (tt Time_t, err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 @@ -179,7 +158,7 @@ type rlimit32 struct { Max uint32 } -//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_UGETRLIMIT +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) @@ -252,12 +231,3 @@ func (msghdr *Msghdr) SetControllen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 4a136396c..f3d72dfd3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -6,7 +6,8 @@ package unix -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +const _SYS_dup = SYS_DUP3 + //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) @@ -69,6 +70,7 @@ func Lstat(path string, stat *Stat_t) (err error) { func Getpagesize() int { return 65536 } //sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } @@ -78,6 +80,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Sec = nsec / 1e9 @@ -85,26 +89,6 @@ func NsecToTimeval(nsec int64) (tv Timeval) { return } -func Time(t *Time_t) (Time_t, error) { - var tv Timeval - err := Gettimeofday(&tv) - if err != nil { - return 0, err - } - if t != nil { - *t = Time_t(tv.Sec) - } - return Time_t(tv.Sec), nil -} - -func Utime(path string, buf *Utimbuf) error { - tv := []Timeval{ - {Sec: buf.Actime}, - {Sec: buf.Modtime}, - } - return Utimes(path, tv) -} - func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL @@ -149,18 +133,6 @@ func InotifyInit() (fd int, err error) { return InotifyInit1(0) } -func Dup2(oldfd int, newfd int) (err error) { - return Dup3(oldfd, newfd, 0) -} - -func Pause() (err error) { - _, _, e1 := Syscall6(SYS_PPOLL, 0, 0, 0, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - // TODO(dfc): constants that should be in zsysnum_linux_arm64.go, remove // these when the deprecated syscalls that the syscall package relies on // are removed. @@ -176,15 +148,3 @@ const ( SYS_EPOLL_CREATE = 1042 SYS_EPOLL_WAIT = 1069 ) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - var ts *Timespec - if timeout >= 0 { - ts = new(Timespec) - *ts = NsecToTimespec(int64(timeout) * 1e6) - } - if len(fds) == 0 { - return ppoll(nil, 0, ts, nil) - } - return ppoll(&fds[0], len(fds), ts, nil) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go deleted file mode 100644 index 8119fde37..000000000 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux -// +build mips64 mips64le - -package unix - -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) -//sys Fchown(fd int, uid int, gid int) (err error) -//sys Fstatfs(fd int, buf *Statfs_t) (err error) -//sys Ftruncate(fd int, length int64) (err error) -//sysnb Getegid() (egid int) -//sysnb Geteuid() (euid int) -//sysnb Getgid() (gid int) -//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Getuid() (uid int) -//sys Lchown(path string, uid int, gid int) (err error) -//sys Listen(s int, n int) (err error) -//sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 -//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6 -//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) -//sys Shutdown(fd int, how int) (err error) -//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) -//sys Statfs(path string, buf *Statfs_t) (err error) -//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) -//sys Truncate(path string, length int64) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) -//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) -//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) -//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) -//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) -//sysnb setgroups(n int, list *_Gid_t) (err error) -//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) -//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) -//sysnb socket(domain int, typ int, proto int) (fd int, err error) -//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) -//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) -//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) -//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) -//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) -//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) -//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) -//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) - -func Getpagesize() int { return 65536 } - -//sysnb Gettimeofday(tv *Timeval) (err error) - -func Time(t *Time_t) (tt Time_t, err error) { - var tv Timeval - err = Gettimeofday(&tv) - if err != nil { - return 0, err - } - if t != nil { - *t = Time_t(tv.Sec) - } - return Time_t(tv.Sec), nil -} - -//sys Utime(path string, buf *Utimbuf) (err error) - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return -} - -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return -} - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, 0) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -func Ioperm(from int, num int, on int) (err error) { - return ENOSYS -} - -func Iopl(level int) (err error) { - return ENOSYS -} - -type stat_t struct { - Dev uint32 - Pad0 [3]int32 - Ino uint64 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint32 - Pad1 [3]uint32 - Size int64 - Atime uint32 - Atime_nsec uint32 - Mtime uint32 - Mtime_nsec uint32 - Ctime uint32 - Ctime_nsec uint32 - Blksize uint32 - Pad2 uint32 - Blocks int64 -} - -//sys fstat(fd int, st *stat_t) (err error) -//sys lstat(path string, st *stat_t) (err error) -//sys stat(path string, st *stat_t) (err error) - -func Fstat(fd int, s *Stat_t) (err error) { - st := &stat_t{} - err = fstat(fd, st) - fillStat_t(s, st) - return -} - -func Lstat(path string, s *Stat_t) (err error) { - st := &stat_t{} - err = lstat(path, st) - fillStat_t(s, st) - return -} - -func Stat(path string, s *Stat_t) (err error) { - st := &stat_t{} - err = stat(path, st) - fillStat_t(s, st) - return -} - -func fillStat_t(s *Stat_t, st *stat_t) { - s.Dev = st.Dev - s.Ino = st.Ino - s.Mode = st.Mode - s.Nlink = st.Nlink - s.Uid = st.Uid - s.Gid = st.Gid - s.Rdev = st.Rdev - s.Size = st.Size - s.Atim = Timespec{int64(st.Atime), int64(st.Atime_nsec)} - s.Mtim = Timespec{int64(st.Mtime), int64(st.Mtime_nsec)} - s.Ctim = Timespec{int64(st.Ctime), int64(st.Ctime_nsec)} - s.Blksize = st.Blksize - s.Blocks = st.Blocks -} - -func (r *PtraceRegs) PC() uint64 { return r.Regs[64] } - -func (r *PtraceRegs) SetPC(pc uint64) { r.Regs[64] = pc } - -func (iov *Iovec) SetLen(length int) { - iov.Len = uint64(length) -} - -func (msghdr *Msghdr) SetControllen(length int) { - msghdr.Controllen = uint64(length) -} - -func (cmsg *Cmsghdr) SetLen(length int) { - cmsg.Len = uint64(length) -} - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 60770f627..67eed6334 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -7,8 +7,6 @@ package unix -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) -//sys Dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatfs(fd int, buf *Statfs_t) (err error) @@ -18,13 +16,11 @@ package unix //sysnb Getgid() (gid int) //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_UGETRLIMIT //sysnb Getuid() (uid int) -//sysnb InotifyInit() (fd int, err error) //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) -//sys Pause() (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -66,8 +62,6 @@ func Getpagesize() int { return 65536 } //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Time(t *Time_t) (tt Time_t, err error) -//sys Utime(path string, buf *Utimbuf) (err error) - func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { @@ -76,6 +70,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Sec = nsec / 1e9 @@ -98,38 +94,3 @@ func (msghdr *Msghdr) SetControllen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } - -//sysnb pipe(p *[2]_C_int) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go deleted file mode 100644 index 81c5f4732..000000000 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,linux - -package unix - -import ( - "unsafe" -) - -//sys Dup2(oldfd int, newfd int) (err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) -//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 -//sys Fchown(fd int, uid int, gid int) (err error) -//sys Fstat(fd int, stat *Stat_t) (err error) -//sys Fstatfs(fd int, buf *Statfs_t) (err error) -//sys Ftruncate(fd int, length int64) (err error) -//sysnb Getegid() (egid int) -//sysnb Geteuid() (euid int) -//sysnb Getgid() (gid int) -//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Getuid() (uid int) -//sysnb InotifyInit() (fd int, err error) -//sys Lchown(path string, uid int, gid int) (err error) -//sys Lstat(path string, stat *Stat_t) (err error) -//sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 -//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) -//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) -//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) -//sys Stat(path string, stat *Stat_t) (err error) -//sys Statfs(path string, buf *Statfs_t) (err error) -//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) -//sys Truncate(path string, length int64) (err error) -//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) -//sysnb setgroups(n int, list *_Gid_t) (err error) - -func Getpagesize() int { return 4096 } - -//sysnb Gettimeofday(tv *Timeval) (err error) - -func Time(t *Time_t) (tt Time_t, err error) { - var tv Timeval - err = Gettimeofday(&tv) - if err != nil { - return 0, err - } - if t != nil { - *t = Time_t(tv.Sec) - } - return Time_t(tv.Sec), nil -} - -//sys Utime(path string, buf *Utimbuf) (err error) - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return -} - -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, 0) // pipe2 is the same as pipe when flags are set to 0. - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -func Ioperm(from int, num int, on int) (err error) { - return ENOSYS -} - -func Iopl(level int) (err error) { - return ENOSYS -} - -func (r *PtraceRegs) PC() uint64 { return r.Psw.Addr } - -func (r *PtraceRegs) SetPC(pc uint64) { r.Psw.Addr = pc } - -func (iov *Iovec) SetLen(length int) { - iov.Len = uint64(length) -} - -func (msghdr *Msghdr) SetControllen(length int) { - msghdr.Controllen = uint64(length) -} - -func (cmsg *Cmsghdr) SetLen(length int) { - cmsg.Len = uint64(length) -} - -// Linux on s390x uses the old mmap interface, which requires arguments to be passed in a struct. -// mmap2 also requires arguments to be passed in a struct; it is currently not exposed in . -func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { - mmap_args := [6]uintptr{addr, length, uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)} - r0, _, e1 := Syscall(SYS_MMAP, uintptr(unsafe.Pointer(&mmap_args[0])), 0, 0) - use(unsafe.Pointer(&mmap_args[0])) - xaddr = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// On s390x Linux, all the socket calls go through an extra indirection. -// The arguments to the underlying system call (SYS_SOCKETCALL) are the -// number below and a pointer to an array of uintptr. -const ( - // see linux/net.h - netSocket = 1 - netBind = 2 - netConnect = 3 - netListen = 4 - netAccept = 5 - netGetSockName = 6 - netGetPeerName = 7 - netSocketPair = 8 - netSend = 9 - netRecv = 10 - netSendTo = 11 - netRecvFrom = 12 - netShutdown = 13 - netSetSockOpt = 14 - netGetSockOpt = 15 - netSendMsg = 16 - netRecvMsg = 17 - netAccept4 = 18 - netRecvMMsg = 19 - netSendMMsg = 20 -) - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (int, error) { - args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))} - fd, _, err := Syscall(SYS_SOCKETCALL, netAccept, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return 0, err - } - return int(fd), nil -} - -func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (int, error) { - args := [4]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)} - fd, _, err := Syscall(SYS_SOCKETCALL, netAccept4, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return 0, err - } - return int(fd), nil -} - -func getsockname(s int, rsa *RawSockaddrAny, addrlen *_Socklen) error { - args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))} - _, _, err := RawSyscall(SYS_SOCKETCALL, netGetSockName, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return err - } - return nil -} - -func getpeername(s int, rsa *RawSockaddrAny, addrlen *_Socklen) error { - args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))} - _, _, err := RawSyscall(SYS_SOCKETCALL, netGetPeerName, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return err - } - return nil -} - -func socketpair(domain int, typ int, flags int, fd *[2]int32) error { - args := [4]uintptr{uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd))} - _, _, err := RawSyscall(SYS_SOCKETCALL, netSocketPair, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return err - } - return nil -} - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) error { - args := [3]uintptr{uintptr(s), uintptr(addr), uintptr(addrlen)} - _, _, err := Syscall(SYS_SOCKETCALL, netBind, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return err - } - return nil -} - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) error { - args := [3]uintptr{uintptr(s), uintptr(addr), uintptr(addrlen)} - _, _, err := Syscall(SYS_SOCKETCALL, netConnect, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return err - } - return nil -} - -func socket(domain int, typ int, proto int) (int, error) { - args := [3]uintptr{uintptr(domain), uintptr(typ), uintptr(proto)} - fd, _, err := RawSyscall(SYS_SOCKETCALL, netSocket, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return 0, err - } - return int(fd), nil -} - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) error { - args := [5]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))} - _, _, err := Syscall(SYS_SOCKETCALL, netGetSockOpt, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return err - } - return nil -} - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) error { - args := [4]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val)} - _, _, err := Syscall(SYS_SOCKETCALL, netSetSockOpt, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return err - } - return nil -} - -func recvfrom(s int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (int, error) { - var base uintptr - if len(p) > 0 { - base = uintptr(unsafe.Pointer(&p[0])) - } - args := [6]uintptr{uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))} - n, _, err := Syscall(SYS_SOCKETCALL, netRecvFrom, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return 0, err - } - return int(n), nil -} - -func sendto(s int, p []byte, flags int, to unsafe.Pointer, addrlen _Socklen) error { - var base uintptr - if len(p) > 0 { - base = uintptr(unsafe.Pointer(&p[0])) - } - args := [6]uintptr{uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(to), uintptr(addrlen)} - _, _, err := Syscall(SYS_SOCKETCALL, netSendTo, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return err - } - return nil -} - -func recvmsg(s int, msg *Msghdr, flags int) (int, error) { - args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)} - n, _, err := Syscall(SYS_SOCKETCALL, netRecvMsg, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return 0, err - } - return int(n), nil -} - -func sendmsg(s int, msg *Msghdr, flags int) (int, error) { - args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)} - n, _, err := Syscall(SYS_SOCKETCALL, netSendMsg, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return 0, err - } - return int(n), nil -} - -func Listen(s int, n int) error { - args := [2]uintptr{uintptr(s), uintptr(n)} - _, _, err := Syscall(SYS_SOCKETCALL, netListen, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return err - } - return nil -} - -func Shutdown(s, how int) error { - args := [2]uintptr{uintptr(s), uintptr(how)} - _, _, err := Syscall(SYS_SOCKETCALL, netShutdown, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return err - } - return nil -} - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go deleted file mode 100644 index 20b7454d7..000000000 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build sparc64,linux - -package unix - -import ( - "sync/atomic" - "syscall" -) - -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) -//sys Dup2(oldfd int, newfd int) (err error) -//sys Fchown(fd int, uid int, gid int) (err error) -//sys Fstat(fd int, stat *Stat_t) (err error) -//sys Fstatfs(fd int, buf *Statfs_t) (err error) -//sys Ftruncate(fd int, length int64) (err error) -//sysnb Getegid() (egid int) -//sysnb Geteuid() (euid int) -//sysnb Getgid() (gid int) -//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Getuid() (uid int) -//sysnb InotifyInit() (fd int, err error) -//sys Lchown(path string, uid int, gid int) (err error) -//sys Listen(s int, n int) (err error) -//sys Lstat(path string, stat *Stat_t) (err error) -//sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 -//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) -//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) -//sys Shutdown(fd int, how int) (err error) -//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) -//sys Stat(path string, stat *Stat_t) (err error) -//sys Statfs(path string, buf *Statfs_t) (err error) -//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) -//sys Truncate(path string, length int64) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) -//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) -//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) -//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) -//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) -//sysnb setgroups(n int, list *_Gid_t) (err error) -//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) -//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) -//sysnb socket(domain int, typ int, proto int) (fd int, err error) -//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) -//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) -//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) -//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) -//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) -//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) -//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) -//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) - -func sysconf(name int) (n int64, err syscall.Errno) - -// pageSize caches the value of Getpagesize, since it can't change -// once the system is booted. -var pageSize int64 // accessed atomically - -func Getpagesize() int { - n := atomic.LoadInt64(&pageSize) - if n == 0 { - n, _ = sysconf(_SC_PAGESIZE) - atomic.StoreInt64(&pageSize, n) - } - return int(n) -} - -func Ioperm(from int, num int, on int) (err error) { - return ENOSYS -} - -func Iopl(level int) (err error) { - return ENOSYS -} - -//sysnb Gettimeofday(tv *Timeval) (err error) - -func Time(t *Time_t) (tt Time_t, err error) { - var tv Timeval - err = Gettimeofday(&tv) - if err != nil { - return 0, err - } - if t != nil { - *t = Time_t(tv.Sec) - } - return Time_t(tv.Sec), nil -} - -//sys Utime(path string, buf *Utimbuf) (err error) - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return -} - -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = int32(nsec % 1e9 / 1e3) - return -} - -func (r *PtraceRegs) PC() uint64 { return r.Tpc } - -func (r *PtraceRegs) SetPC(pc uint64) { r.Tpc = pc } - -func (iov *Iovec) SetLen(length int) { - iov.Len = uint64(length) -} - -func (msghdr *Msghdr) SetControllen(length int) { - msghdr.Controllen = uint64(length) -} - -func (cmsg *Cmsghdr) SetLen(length int) { - cmsg.Len = uint64(length) -} - -//sysnb pipe(p *[2]_C_int) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go deleted file mode 100644 index 91184cae0..000000000 --- a/vendor/golang.org/x/sys/unix/syscall_linux_test.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package unix_test - -import ( - "io/ioutil" - "os" - "testing" - "time" - - "golang.org/x/sys/unix" -) - -func TestPoll(t *testing.T) { - f, cleanup := mktmpfifo(t) - defer cleanup() - - const timeout = 100 - - ok := make(chan bool, 1) - go func() { - select { - case <-time.After(10 * timeout * time.Millisecond): - t.Errorf("Poll: failed to timeout after %d milliseconds", 10*timeout) - case <-ok: - } - }() - - fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}} - n, err := unix.Poll(fds, timeout) - ok <- true - if err != nil { - t.Errorf("Poll: unexpected error: %v", err) - return - } - if n != 0 { - t.Errorf("Poll: wrong number of events: got %v, expected %v", n, 0) - return - } -} - -func TestPpoll(t *testing.T) { - f, cleanup := mktmpfifo(t) - defer cleanup() - - const timeout = 100 * time.Millisecond - - ok := make(chan bool, 1) - go func() { - select { - case <-time.After(10 * timeout): - t.Errorf("Ppoll: failed to timeout after %d", 10*timeout) - case <-ok: - } - }() - - fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}} - timeoutTs := unix.NsecToTimespec(int64(timeout)) - n, err := unix.Ppoll(fds, &timeoutTs, nil) - ok <- true - if err != nil { - t.Errorf("Ppoll: unexpected error: %v", err) - return - } - if n != 0 { - t.Errorf("Ppoll: wrong number of events: got %v, expected %v", n, 0) - return - } -} - -// mktmpfifo creates a temporary FIFO and provides a cleanup function. -func mktmpfifo(t *testing.T) (*os.File, func()) { - err := unix.Mkfifo("fifo", 0666) - if err != nil { - t.Fatalf("mktmpfifo: failed to create FIFO: %v", err) - } - - f, err := os.OpenFile("fifo", os.O_RDWR, 0666) - if err != nil { - os.Remove("fifo") - t.Fatalf("mktmpfifo: failed to open FIFO: %v", err) - } - - return f, func() { - f.Close() - os.Remove("fifo") - } -} - -func TestTime(t *testing.T) { - var ut unix.Time_t - ut2, err := unix.Time(&ut) - if err != nil { - t.Fatalf("Time: %v", err) - } - if ut != ut2 { - t.Errorf("Time: return value %v should be equal to argument %v", ut2, ut) - } - - var now time.Time - - for i := 0; i < 10; i++ { - ut, err = unix.Time(nil) - if err != nil { - t.Fatalf("Time: %v", err) - } - - now = time.Now() - - if int64(ut) == now.Unix() { - return - } - } - - t.Errorf("Time: return value %v should be nearly equal to time.Now().Unix() %v", ut, now.Unix()) -} - -func TestUtime(t *testing.T) { - defer chtmpdir(t)() - - touch(t, "file1") - - buf := &unix.Utimbuf{ - Modtime: 12345, - } - - err := unix.Utime("file1", buf) - if err != nil { - t.Fatalf("Utime: %v", err) - } - - fi, err := os.Stat("file1") - if err != nil { - t.Fatal(err) - } - - if fi.ModTime().Unix() != 12345 { - t.Errorf("Utime: failed to change modtime: expected %v, got %v", 12345, fi.ModTime().Unix()) - } -} - -func TestGetrlimit(t *testing.T) { - var rlim unix.Rlimit - err := unix.Getrlimit(unix.RLIMIT_AS, &rlim) - if err != nil { - t.Fatalf("Getrlimit: %v", err) - } -} - -// utilities taken from os/os_test.go - -func touch(t *testing.T, name string) { - f, err := os.Create(name) - if err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } -} - -// chtmpdir changes the working directory to a new temporary directory and -// provides a cleanup function. Used when PWD is read-only. -func chtmpdir(t *testing.T) func() { - oldwd, err := os.Getwd() - if err != nil { - t.Fatalf("chtmpdir: %v", err) - } - d, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatalf("chtmpdir: %v", err) - } - if err := os.Chdir(d); err != nil { - t.Fatalf("chtmpdir: %v", err) - } - return func() { - if err := os.Chdir(oldwd); err != nil { - t.Fatalf("chtmpdir: %v", err) - } - os.RemoveAll(d) - } -} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index afaca0983..1b0e1af12 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -16,6 +16,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = int32(nsec % 1e9 / 1e3) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index a6ff04ce5..1b6dcbe35 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -16,6 +16,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = int32(nsec % 1e9 / 1e3) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 68a6969b2..87d1d6fed 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -16,6 +16,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = int32(nsec % 1e9 / 1e3) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 554a82342..246131d2a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -111,7 +111,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - use(unsafe.Pointer(_p0)) n = int(r0) if e1 != 0 { err = e1 diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index a66ddc59c..9529b20e8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -16,6 +16,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = int32(nsec % 1e9 / 1e3) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 0776c1faf..fc6402946 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -16,6 +16,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = nsec % 1e9 / 1e3 diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index acb74b1d1..ab54718f6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -13,7 +13,6 @@ package unix import ( - "sync/atomic" "syscall" "unsafe" ) @@ -72,20 +71,18 @@ func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, return origlen - len(buf), count, names } -//sysnb pipe(p *[2]_C_int) (n int, err error) +func pipe() (r uintptr, w uintptr, err uintptr) func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL } - var pp [2]_C_int - n, err := pipe(&pp) - if n != 0 { - return err + r0, w0, e1 := pipe() + if e1 != 0 { + err = syscall.Errno(e1) } - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return nil + p[0], p[1] = int(r0), int(w0) + return } func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { @@ -141,8 +138,6 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), sl, nil } -//sys getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getsockname - func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -152,23 +147,12 @@ func Getsockname(fd int) (sa Sockaddr, err error) { return anyToSockaddr(&rsa) } -const ImplementsGetwd = true - -//sys Getcwd(buf []byte) (n int, err error) +// The const provides a compile-time constant so clients +// can adjust to whether there is a working Getwd and avoid +// even linking this function into the binary. See ../os/getwd.go. +const ImplementsGetwd = false -func Getwd() (wd string, err error) { - var buf [PathMax]byte - // Getcwd will return an error if it failed for any reason. - _, err = Getcwd(buf[0:]) - if err != nil { - return "", err - } - n := clen(buf[:]) - if n < 1 { - return "", EINVAL - } - return string(buf[:n]), nil -} +func Getwd() (string, error) { return "", ENOTSUP } /* * Wrapped @@ -179,20 +163,21 @@ func Getwd() (wd string, err error) { func Getgroups() (gids []int, err error) { n, err := getgroups(0, nil) - // Check for error and sanity check group count. Newer versions of - // Solaris allow up to 1024 (NGROUPS_MAX). - if n < 0 || n > 1024 { - if err != nil { - return nil, err - } - return nil, EINVAL - } else if n == 0 { + if err != nil { + return nil, err + } + if n == 0 { return nil, nil } + // Sanity check group count. Max is 16 on BSD. + if n < 0 || n > 1000 { + return nil, EINVAL + } + a := make([]_Gid_t, n) n, err = getgroups(n, &a[0]) - if n == -1 { + if err != nil { return nil, err } gids = make([]int, n) @@ -271,68 +256,39 @@ func (w WaitStatus) StopSignal() syscall.Signal { func (w WaitStatus) TrapCause() int { return -1 } -//sys wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, err error) +func wait4(pid uintptr, wstatus *WaitStatus, options uintptr, rusage *Rusage) (wpid uintptr, err uintptr) -func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (int, error) { - var status _C_int - rpid, err := wait4(int32(pid), &status, options, rusage) - wpid := int(rpid) - if wpid == -1 { - return wpid, err - } - if wstatus != nil { - *wstatus = WaitStatus(status) +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + r0, e1 := wait4(uintptr(pid), wstatus, uintptr(options), rusage) + if e1 != 0 { + err = syscall.Errno(e1) } - return wpid, nil + return int(r0), err } -//sys gethostname(buf []byte) (n int, err error) +func gethostname() (name string, err uintptr) func Gethostname() (name string, err error) { - var buf [MaxHostNameLen]byte - n, err := gethostname(buf[:]) - if n != 0 { - return "", err - } - n = clen(buf[:]) - if n < 1 { - return "", EFAULT - } - return string(buf[:n]), nil -} - -//sys utimes(path string, times *[2]Timeval) (err error) - -func Utimes(path string, tv []Timeval) (err error) { - if tv == nil { - return utimes(path, nil) - } - if len(tv) != 2 { - return EINVAL + name, e1 := gethostname() + if e1 != 0 { + err = syscall.Errno(e1) } - return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) + return name, err } -//sys utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) - func UtimesNano(path string, ts []Timespec) error { if ts == nil { - return utimensat(AT_FDCWD, path, nil, 0) + return Utimes(path, nil) } if len(ts) != 2 { return EINVAL } - return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) -} - -func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { - if ts == nil { - return utimensat(dirfd, path, nil, flags) - } - if len(ts) != 2 { - return EINVAL + var tv [2]Timeval + for i := 0; i < 2; i++ { + tv[i].Sec = ts[i].Sec + tv[i].Usec = ts[i].Nsec / 1000 } - return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) + return Utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } //sys fcntl(fd int, cmd int, arg int) (val int, err error) @@ -346,35 +302,6 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { return nil } -//sys futimesat(fildes int, path *byte, times *[2]Timeval) (err error) - -func Futimesat(dirfd int, path string, tv []Timeval) error { - pathp, err := BytePtrFromString(path) - if err != nil { - return err - } - if tv == nil { - return futimesat(dirfd, pathp, nil) - } - if len(tv) != 2 { - return EINVAL - } - return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) -} - -// Solaris doesn't have an futimes function because it allows NULL to be -// specified as the path for futimesat. However, Go doesn't like -// NULL-style string interfaces, so this simple wrapper is provided. -func Futimes(fd int, tv []Timeval) error { - if tv == nil { - return futimesat(fd, nil, nil) - } - if len(tv) != 2 { - return EINVAL - } - return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) -} - func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: @@ -423,7 +350,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny nfd, err = accept(fd, &rsa, &len) - if nfd == -1 { + if err != nil { return } sa, err = anyToSockaddr(&rsa) @@ -434,8 +361,6 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } -//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.recvmsg - func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var msg Msghdr var rsa RawSockaddrAny @@ -457,7 +382,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from } msg.Iov = &iov msg.Iovlen = 1 - if n, err = recvmsg(fd, &msg, flags); n == -1 { + if n, err = recvmsg(fd, &msg, flags); err != nil { return } oobn = int(msg.Accrightslen) @@ -512,67 +437,6 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) return n, nil } -//sys acct(path *byte) (err error) - -func Acct(path string) (err error) { - if len(path) == 0 { - // Assume caller wants to disable accounting. - return acct(nil) - } - - pathp, err := BytePtrFromString(path) - if err != nil { - return err - } - return acct(pathp) -} - -/* - * Expose the ioctl function - */ - -//sys ioctl(fd int, req int, arg uintptr) (err error) - -func IoctlSetInt(fd int, req int, value int) (err error) { - return ioctl(fd, req, uintptr(value)) -} - -func IoctlSetWinsize(fd int, req int, value *Winsize) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func IoctlSetTermios(fd int, req int, value *Termios) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func IoctlSetTermio(fd int, req int, value *Termio) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func IoctlGetInt(fd int, req int) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req int) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req int) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermio(fd int, req int) (*Termio, error) { - var value Termio - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - /* * Exposed directly */ @@ -583,29 +447,21 @@ func IoctlGetTermio(fd int, req int) (*Termio, error) { //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) //sys Close(fd int) (err error) -//sys Creat(path string, mode uint32) (fd int, err error) //sys Dup(fd int) (nfd int, err error) -//sys Dup2(oldfd int, newfd int) (err error) //sys Exit(code int) //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) -//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) -//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) -//sys Fdatasync(fd int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) -//sysnb Getpgid(pid int) (pgid int, err error) -//sysnb Getpgrp() (pgid int, err error) //sys Geteuid() (euid int) //sys Getegid() (egid int) //sys Getppid() (ppid int) //sys Getpriority(which int, who int) (n int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) -//sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Getuid() (uid int) //sys Kill(pid int, signum syscall.Signal) (err error) @@ -615,33 +471,20 @@ func IoctlGetTermio(fd int, req int) (*Termio, error) { //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) -//sys Mkdirat(dirfd int, path string, mode uint32) (err error) -//sys Mkfifo(path string, mode uint32) (err error) -//sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) -//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) -//sys Mlock(b []byte) (err error) -//sys Mlockall(flags int) (err error) -//sys Mprotect(b []byte, prot int) (err error) -//sys Munlock(b []byte) (err error) -//sys Munlockall() (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) -//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pause() (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) -//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) -//sys Sethostname(p []byte) (err error) //sysnb Setpgid(pid int, pgid int) (err error) //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) @@ -653,17 +496,12 @@ func IoctlGetTermio(fd int, req int) (*Termio, error) { //sys Stat(path string, stat *Stat_t) (err error) //sys Symlink(path string, link string) (err error) //sys Sync() (err error) -//sysnb Times(tms *Tms) (ticks uintptr, err error) //sys Truncate(path string, length int64) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) -//sys Umask(mask int) (oldmask int) -//sysnb Uname(buf *Utsname) (err error) -//sys Unmount(target string, flags int) (err error) = libc.umount +//sys Umask(newmask int) (oldmask int) //sys Unlink(path string) (err error) -//sys Unlinkat(dirfd int, path string, flags int) (err error) -//sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys Utime(path string, buf *Utimbuf) (err error) +//sys Utimes(path string, times *[2]Timeval) (err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.bind //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.connect //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) @@ -674,8 +512,10 @@ func IoctlGetTermio(fd int, req int) (*Termio, error) { //sys write(fd int, p []byte) (n int, err error) //sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) = libsocket.getsockopt //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getpeername +//sys getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getsockname //sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.recvmsg func readlen(fd int, buf *byte, nbuf int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) @@ -708,18 +548,3 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e func Munmap(b []byte) (err error) { return mapper.Munmap(b) } - -//sys sysconf(name int) (n int64, err error) - -// pageSize caches the value of Getpagesize, since it can't change -// once the system is booted. -var pageSize int64 // accessed atomically - -func Getpagesize() int { - n := atomic.LoadInt64(&pageSize) - if n == 0 { - n, _ = sysconf(_SC_PAGESIZE) - atomic.StoreInt64(&pageSize, n) - } - return int(n) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 5aff62c3b..9c173cd5f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -6,6 +6,8 @@ package unix +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { @@ -14,6 +16,8 @@ func NsecToTimespec(nsec int64) (ts Timespec) { return } +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + func NsecToTimeval(nsec int64) (tv Timeval) { nsec += 999 // round up to microsecond tv.Usec = nsec % 1e9 / 1e3 diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_test.go b/vendor/golang.org/x/sys/unix/syscall_unix_test.go index 49208a000..bcc79d19c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_test.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_test.go @@ -316,38 +316,3 @@ func TestSeekFailure(t *testing.T) { t.Fatalf("Seek(-1, 0, 0) return error with empty message") } } - -func TestDup(t *testing.T) { - file, err := ioutil.TempFile("", "TestDup") - if err != nil { - t.Fatalf("Tempfile failed: %v", err) - } - defer os.Remove(file.Name()) - defer file.Close() - f := int(file.Fd()) - - newFd, err := unix.Dup(f) - if err != nil { - t.Fatalf("Dup: %v", err) - } - - err = unix.Dup2(newFd, newFd+1) - if err != nil { - t.Fatalf("Dup2: %v", err) - } - - b1 := []byte("Test123") - b2 := make([]byte, 7) - _, err = unix.Write(newFd+1, b1) - if err != nil { - t.Fatalf("Write to dup2 fd failed: %v", err) - } - _, err = unix.Seek(f, 0, 0) - _, err = unix.Read(f, b2) - if err != nil { - t.Fatalf("Read back failed: %v", err) - } - if string(b1) != string(b2) { - t.Errorf("Dup: stdout write not in file, expected %v, got %v", string(b1), string(b2)) - } -} diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go index 115326182..2bb15cb8b 100644 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ b/vendor/golang.org/x/sys/unix/types_darwin.go @@ -241,10 +241,3 @@ type BpfHdr C.struct_bpf_hdr // Terminal handling type Termios C.struct_termios - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) diff --git a/vendor/golang.org/x/sys/unix/types_linux.go b/vendor/golang.org/x/sys/unix/types_linux.go index 0d1118e51..9b75633b3 100644 --- a/vendor/golang.org/x/sys/unix/types_linux.go +++ b/vendor/golang.org/x/sys/unix/types_linux.go @@ -24,7 +24,6 @@ package unix #include #include #include -#include #include #include #include @@ -51,21 +50,11 @@ package unix #include #include #include -#include +#include #include #include #include #include -#include -#include -#include - -#ifdef TCSETS2 -// On systems that have "struct termios2" use this as type Termios. -typedef struct termios2 termios_t; -#else -typedef struct termios termios_t; -#endif enum { sizeofPtr = sizeof(void*), @@ -102,36 +91,17 @@ typedef struct user_regs PtraceRegs; typedef struct user_pt_regs PtraceRegs; #elif defined(__powerpc64__) typedef struct pt_regs PtraceRegs; -#elif defined(__mips__) -typedef struct user PtraceRegs; -#elif defined(__s390x__) -typedef struct _user_regs_struct PtraceRegs; -#elif defined(__sparc__) -#include -typedef struct pt_regs PtraceRegs; #else typedef struct user_regs_struct PtraceRegs; #endif -#if defined(__s390x__) -typedef struct _user_psw_struct ptracePsw; -typedef struct _user_fpregs_struct ptraceFpregs; -typedef struct _user_per_struct ptracePer; -#else -typedef struct {} ptracePsw; -typedef struct {} ptraceFpregs; -typedef struct {} ptracePer; -#endif - // The real epoll_event is a union, and godefs doesn't handle it well. struct my_epoll_event { uint32_t events; -#if defined(__ARM_EABI__) || defined(__aarch64__) +#ifdef __ARM_EABI__ // padding is not specified in linux/eventpoll.h but added to conform to the // alignment requirements of EABI int32_t padFd; -#elif defined(__powerpc64__) || defined(__s390x__) || defined(__sparc__) - int32_t _padFd; #endif int32_t fd; int32_t pad; @@ -217,10 +187,6 @@ type RawSockaddrLinklayer C.struct_sockaddr_ll type RawSockaddrNetlink C.struct_sockaddr_nl -type RawSockaddrHCI C.struct_sockaddr_hci - -type RawSockaddrCAN C.struct_sockaddr_can - type RawSockaddr C.struct_sockaddr type RawSockaddrAny C.struct_sockaddr_any @@ -260,8 +226,6 @@ const ( SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un SizeofSockaddrLinklayer = C.sizeof_struct_sockaddr_ll SizeofSockaddrNetlink = C.sizeof_struct_sockaddr_nl - SizeofSockaddrHCI = C.sizeof_struct_sockaddr_hci - SizeofSockaddrCAN = C.sizeof_struct_sockaddr_can SizeofLinger = C.sizeof_struct_linger SizeofIPMreq = C.sizeof_struct_ip_mreq SizeofIPMreqn = C.sizeof_struct_ip_mreqn @@ -412,13 +376,6 @@ const SizeofInotifyEvent = C.sizeof_struct_inotify_event // Register structures type PtraceRegs C.PtraceRegs -// Structures contained in PtraceRegs on s390x (exported by mkpost.go) -type ptracePsw C.ptracePsw - -type ptraceFpregs C.ptraceFpregs - -type ptracePer C.ptracePer - // Misc type FdSet C.fd_set @@ -434,28 +391,9 @@ type EpollEvent C.struct_my_epoll_event const ( AT_FDCWD = C.AT_FDCWD AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) -type PollFd C.struct_pollfd - -const ( - POLLIN = C.POLLIN - POLLPRI = C.POLLPRI - POLLOUT = C.POLLOUT - POLLRDHUP = C.POLLRDHUP - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLNVAL = C.POLLNVAL -) - -type Sigset_t C.sigset_t - -// sysconf information - -const _SC_PAGESIZE = C._SC_PAGESIZE - // Terminal handling -type Termios C.termios_t +type Termios C.struct_termios diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go index c5d5c8f16..753c7996b 100644 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ b/vendor/golang.org/x/sys/unix/types_solaris.go @@ -15,18 +15,10 @@ package unix /* #define KERNEL -// These defines ensure that builds done on newer versions of Solaris are -// backwards-compatible with older versions of Solaris and -// OpenSolaris-based derivatives. -#define __USE_SUNOS_SOCKETS__ // msghdr -#define __USE_LEGACY_PROTOTYPES__ // iovec #include #include -#include -#include #include #include -#include #include #include #include @@ -38,9 +30,7 @@ package unix #include #include #include -#include #include -#include #include #include #include @@ -50,8 +40,6 @@ package unix #include #include #include -#include -#include enum { sizeofPtr = sizeof(void*), @@ -81,8 +69,6 @@ const ( sizeofInt = C.sizeof_int sizeofLong = C.sizeof_long sizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX - MaxHostNameLen = C.MAXHOSTNAMELEN ) // Basic types @@ -102,10 +88,6 @@ type Timeval C.struct_timeval type Timeval32 C.struct_timeval32 -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - // Processes type Rusage C.struct_rusage @@ -193,20 +175,6 @@ const ( type FdSet C.fd_set -// Misc - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_EACCESS = C.AT_EACCESS -) - // Routing and interface messages const ( @@ -249,14 +217,6 @@ type BpfTimeval C.struct_bpf_timeval type BpfHdr C.struct_bpf_hdr -// sysconf information - -const _SC_PAGESIZE = C._SC_PAGESIZE - // Terminal handling type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_386.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_386.go new file mode 100644 index 000000000..2a329f06e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_386.go @@ -0,0 +1,1530 @@ +// mkerrors.sh -m32 +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// +build 386,dragonfly + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -m32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x21 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x23 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x22 + AF_NATM = 0x1d + AF_NETGRAPH = 0x20 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4008426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80084267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8008427b + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8008426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DEFAULTBUFSIZE = 0x1000 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MAX_CLONES = 0x80 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DOCSIS = 0x8f + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_REDBACK_SMARTEDGE = 0x20 + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DBF = 0xf + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0x8 + EVFILT_MARKER = 0xf + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x8 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_NODATA = 0x1000 + EV_ONESHOT = 0x10 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTEXIT_LWP = 0x10000 + EXTEXIT_PROC = 0x0 + EXTEXIT_SETINT = 0x1 + EXTEXIT_SIMPLE = 0x0 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x118e72 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NPOLLING = 0x100000 + IFF_OACTIVE = 0x400 + IFF_OACTIVE_COMPAT = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_POLLING = 0x10000 + IFF_POLLING_COMPAT = 0x10000 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SMART = 0x20 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xf3 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SKIP = 0x39 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UNKNOWN = 0x102 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PKTOPTIONS = 0x34 + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_RESETLOG = 0x37 + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CONTROL_END = 0xb + MADV_CONTROL_START = 0xa + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_INVAL = 0xa + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SETMAP = 0xb + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_INHERIT = 0x80 + MAP_NOCORE = 0x20000 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_NOSYNC = 0x800 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_SHARED = 0x1 + MAP_SIZEALIGN = 0x40000 + MAP_STACK = 0x400 + MAP_TRYFIXED = 0x10000 + MAP_VPAGETABLE = 0x2000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FBLOCKING = 0x10000 + MSG_FMASK = 0xffff0000 + MSG_FNONBLOCKING = 0x20000 + MSG_NOSIGNAL = 0x400 + MSG_NOTIFICATION = 0x200 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_SYNC = 0x800 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_MAXID = 0x4 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x20000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x8000000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FAPPEND = 0x100000 + O_FASYNCWRITE = 0x800000 + O_FBLOCKING = 0x40000 + O_FBUFFERED = 0x2000000 + O_FMASK = 0x7fc0000 + O_FNONBLOCKING = 0x80000 + O_FOFFSET = 0x200000 + O_FSYNC = 0x80 + O_FSYNCWRITE = 0x400000 + O_FUNBUFFERED = 0x1000000 + O_MAPONREAD = 0x4000000 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0xb + RTAX_MPLS1 = 0x8 + RTAX_MPLS2 = 0x9 + RTAX_MPLS3 = 0xa + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_MPLS1 = 0x100 + RTA_MPLS2 = 0x200 + RTA_MPLS3 = 0x400 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPLSOPS = 0x1000000 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x6 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_IWCAPSEGS = 0x400 + RTV_IWMAXSEGS = 0x200 + RTV_MSL = 0x100 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCADDRT = 0x8030720a + SIOCAIFADDR = 0x8040691a + SIOCALIFADDR = 0x8118691b + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDELRT = 0x8030720b + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8118691d + SIOCGDRVSPEC = 0xc01c697b + SIOCGETSGCNT = 0xc0147210 + SIOCGETVIFCNT = 0xc014720f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0086924 + SIOCGIFDATA = 0xc0206926 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMEDIA = 0xc0286938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPOLLCPU = 0xc020697e + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFTSOLEN = 0xc0206980 + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSDRVSPEC = 0x801c697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFPOLLCPU = 0x8020697d + SIOCSIFTSOLEN = 0x8020697f + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NOSIGPIPE = 0x800 + SO_OOBINLINE = 0x100 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDSPACE = 0x100a + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_FASTKEEP = 0x80 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x20 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0x100 + TCP_MIN_WINSHIFT = 0x5 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_SIGNATURE_ENABLE = 0x10 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40087458 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCISPTMASTER = 0x20007455 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40087459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VCHECKPT = 0x13 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WSTOPPED = 0x7f + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EASYNC = syscall.Errno(0x63) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x63) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEDIUM = syscall.Errno(0x5d) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUNUSED94 = syscall.Errno(0x5e) + EUNUSED95 = syscall.Errno(0x5f) + EUNUSED96 = syscall.Errno(0x60) + EUNUSED97 = syscall.Errno(0x61) + EUNUSED98 = syscall.Errno(0x62) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCKPT = syscall.Signal(0x21) + SIGCKPTEXIT = syscall.Signal(0x22) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "device not configured", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource deadlock avoided", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "operation not supported by device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "result too large", + 35: "resource temporarily unavailable", + 36: "operation now in progress", + 37: "operation already in progress", + 38: "socket operation on non-socket", + 39: "destination address required", + 40: "message too long", + 41: "protocol wrong type for socket", + 42: "protocol not available", + 43: "protocol not supported", + 44: "socket type not supported", + 45: "operation not supported", + 46: "protocol family not supported", + 47: "address family not supported by protocol family", + 48: "address already in use", + 49: "can't assign requested address", + 50: "network is down", + 51: "network is unreachable", + 52: "network dropped connection on reset", + 53: "software caused connection abort", + 54: "connection reset by peer", + 55: "no buffer space available", + 56: "socket is already connected", + 57: "socket is not connected", + 58: "can't send after socket shutdown", + 59: "too many references: can't splice", + 60: "operation timed out", + 61: "connection refused", + 62: "too many levels of symbolic links", + 63: "file name too long", + 64: "host is down", + 65: "no route to host", + 66: "directory not empty", + 67: "too many processes", + 68: "too many users", + 69: "disc quota exceeded", + 70: "stale NFS file handle", + 71: "too many levels of remote in path", + 72: "RPC struct is bad", + 73: "RPC version wrong", + 74: "RPC prog. not avail", + 75: "program version wrong", + 76: "bad procedure for program", + 77: "no locks available", + 78: "function not implemented", + 79: "inappropriate file type or format", + 80: "authentication error", + 81: "need authenticator", + 82: "identifier removed", + 83: "no message of desired type", + 84: "value too large to be stored in data type", + 85: "operation canceled", + 86: "illegal byte sequence", + 87: "attribute not found", + 88: "programming error", + 89: "bad message", + 90: "multihop attempted", + 91: "link has been severed", + 92: "protocol error", + 93: "no medium found", + 94: "unknown error: 94", + 95: "unknown error: 95", + 96: "unknown error: 96", + 97: "unknown error: 97", + 98: "unknown error: 98", + 99: "unknown error: 99", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "suspended (signal)", + 18: "suspended", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 26: "virtual timer expired", + 27: "profiling timer expired", + 28: "window size changes", + 29: "information request", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "thread Scheduler", + 33: "checkPoint", + 34: "checkPointExit", +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 7b95751c3..3c2a5bfc2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -225,20 +225,6 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x4 - CLOCK_MONOTONIC_FAST = 0xc - CLOCK_MONOTONIC_PRECISE = 0xb - CLOCK_PROCESS_CPUTIME_ID = 0xf - CLOCK_PROF = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_FAST = 0xa - CLOCK_REALTIME_PRECISE = 0x9 - CLOCK_SECOND = 0xd - CLOCK_THREAD_CPUTIME_ID = 0xe - CLOCK_UPTIME = 0x5 - CLOCK_UPTIME_FAST = 0x8 - CLOCK_UPTIME_PRECISE = 0x7 - CLOCK_VIRTUAL = 0x1 CREAD = 0x800 CS5 = 0x0 CS6 = 0x100 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index e48e7799a..3b3f7a9d2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -225,20 +225,6 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x4 - CLOCK_MONOTONIC_FAST = 0xc - CLOCK_MONOTONIC_PRECISE = 0xb - CLOCK_PROCESS_CPUTIME_ID = 0xf - CLOCK_PROF = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_FAST = 0xa - CLOCK_REALTIME_PRECISE = 0x9 - CLOCK_SECOND = 0xd - CLOCK_THREAD_CPUTIME_ID = 0xe - CLOCK_UPTIME = 0x5 - CLOCK_UPTIME_FAST = 0x8 - CLOCK_UPTIME_PRECISE = 0x7 - CLOCK_VIRTUAL = 0x1 CREAD = 0x800 CS5 = 0x0 CS6 = 0x100 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index b40d0299b..6fbef7522 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -145,7 +145,6 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd - BOTHER = 0x1000 BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 @@ -187,32 +186,7 @@ const ( BPF_W = 0x0 BPF_X = 0x8 BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CBAUD = 0x100f - CBAUDEX = 0x1000 CFLUSH = 0xf - CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 @@ -235,7 +209,6 @@ const ( CLONE_FILES = 0x400 CLONE_FS = 0x200 CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 CLONE_NEWIPC = 0x8000000 CLONE_NEWNET = 0x40000000 CLONE_NEWNS = 0x20000 @@ -252,14 +225,7 @@ const ( CLONE_UNTRACED = 0x800000 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRDLY = 0x600 CREAD = 0x80 - CRTSCTS = 0x80000000 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -387,9 +353,6 @@ const ( EXTPROC = 0x10000 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 FLUSHO = 0x1000 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -425,7 +388,6 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x1 HUPCL = 0x400 - IBSHIFT = 0x10 ICANON = 0x2 ICMPV6_FILTER = 0x1 ICRNL = 0x100 @@ -657,7 +619,6 @@ const ( IP_XFRM_POLICY = 0x11 ISIG = 0x1 ISTRIP = 0x20 - IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 @@ -789,13 +750,10 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NL0 = 0x0 - NL1 = 0x100 NLA_ALIGNTO = 0x4 NLA_F_NESTED = 0x8000 NLA_F_NET_BYTEORDER = 0x4000 NLA_HDRLEN = 0x4 - NLDLY = 0x100 NLMSG_ALIGNTO = 0x4 NLMSG_DONE = 0x3 NLMSG_ERROR = 0x2 @@ -820,7 +778,6 @@ const ( OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 - OLCUC = 0x2 ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 @@ -846,7 +803,6 @@ const ( O_RDWR = 0x2 O_RSYNC = 0x101000 O_SYNC = 0x101000 - O_TMPFILE = 0x410000 O_TRUNC = 0x200 O_WRONLY = 0x1 PACKET_ADD_MEMBERSHIP = 0x1 @@ -1319,23 +1275,10 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 TCIFLUSH = 0x0 - TCIOFF = 0x2 TCIOFLUSH = 0x2 - TCION = 0x3 TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 TCP_CONGESTION = 0xd TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 @@ -1355,32 +1298,14 @@ const ( TCP_SYNCNT = 0x7 TCP_WINDOW_CLAMP = 0xa TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c TIOCGDEV = 0x80045432 TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 TIOCGICOUNT = 0x545d TIOCGLCKTRMIOS = 0x5456 TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e @@ -1486,8 +1411,6 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 - XCASE = 0x4 - XTABS = 0x1800 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 9f0600ccb..b40ccb8d7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -145,7 +145,6 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd - BOTHER = 0x1000 BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 @@ -187,32 +186,7 @@ const ( BPF_W = 0x0 BPF_X = 0x8 BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CBAUD = 0x100f - CBAUDEX = 0x1000 CFLUSH = 0xf - CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 @@ -235,7 +209,6 @@ const ( CLONE_FILES = 0x400 CLONE_FS = 0x200 CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 CLONE_NEWIPC = 0x8000000 CLONE_NEWNET = 0x40000000 CLONE_NEWNS = 0x20000 @@ -252,14 +225,7 @@ const ( CLONE_UNTRACED = 0x800000 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRDLY = 0x600 CREAD = 0x80 - CRTSCTS = 0x80000000 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -387,9 +353,6 @@ const ( EXTPROC = 0x10000 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 FLUSHO = 0x1000 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -425,7 +388,6 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x1 HUPCL = 0x400 - IBSHIFT = 0x10 ICANON = 0x2 ICMPV6_FILTER = 0x1 ICRNL = 0x100 @@ -657,7 +619,6 @@ const ( IP_XFRM_POLICY = 0x11 ISIG = 0x1 ISTRIP = 0x20 - IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 @@ -789,13 +750,10 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NL0 = 0x0 - NL1 = 0x100 NLA_ALIGNTO = 0x4 NLA_F_NESTED = 0x8000 NLA_F_NET_BYTEORDER = 0x4000 NLA_HDRLEN = 0x4 - NLDLY = 0x100 NLMSG_ALIGNTO = 0x4 NLMSG_DONE = 0x3 NLMSG_ERROR = 0x2 @@ -820,7 +778,6 @@ const ( OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 - OLCUC = 0x2 ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 @@ -846,7 +803,6 @@ const ( O_RDWR = 0x2 O_RSYNC = 0x101000 O_SYNC = 0x101000 - O_TMPFILE = 0x410000 O_TRUNC = 0x200 O_WRONLY = 0x1 PACKET_ADD_MEMBERSHIP = 0x1 @@ -1320,23 +1276,10 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 TCIFLUSH = 0x0 - TCIOFF = 0x2 TCIOFLUSH = 0x2 - TCION = 0x3 TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 TCP_CONGESTION = 0xd TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 @@ -1356,32 +1299,14 @@ const ( TCP_SYNCNT = 0x7 TCP_WINDOW_CLAMP = 0xa TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c TIOCGDEV = 0x80045432 TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 TIOCGICOUNT = 0x545d TIOCGLCKTRMIOS = 0x5456 TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e @@ -1487,8 +1412,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XCASE = 0x4 - XTABS = 0x1800 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 647a796e3..4535b78b7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -110,38 +110,6 @@ const ( ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff ARPHRD_X25 = 0x10f - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BOTHER = 0x1000 BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 @@ -182,34 +150,6 @@ const ( BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CIBAUD = 0x100f0000 - CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 CLOCK_DEFAULT = 0x0 @@ -231,7 +171,6 @@ const ( CLONE_FILES = 0x400 CLONE_FS = 0x200 CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 CLONE_NEWIPC = 0x8000000 CLONE_NEWNET = 0x40000000 CLONE_NEWNS = 0x20000 @@ -248,25 +187,6 @@ const ( CLONE_UNTRACED = 0x800000 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -278,13 +198,6 @@ const ( DT_WHT = 0xe ELF_NGREG = 0x12 ELF_PRARGSZ = 0x50 - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 EPOLLERR = 0x8 EPOLLET = -0x80000000 EPOLLHUP = 0x10 @@ -367,15 +280,8 @@ const ( ETH_P_WAN_PPP = 0x7 ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 F_EXLCK = 0x4 @@ -409,12 +315,7 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 IFA_F_DADFAILED = 0x8 IFA_F_DEPRECATED = 0x20 IFA_F_HOMEADDRESS = 0x10 @@ -448,12 +349,6 @@ const ( IFF_UP = 0x1 IFF_VNET_HDR = 0x4000 IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 IN_ACCESS = 0x1 IN_ALL_EVENTS = 0xfff IN_ATTRIB = 0x4 @@ -617,13 +512,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -747,13 +635,10 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NL0 = 0x0 - NL1 = 0x100 NLA_ALIGNTO = 0x4 NLA_F_NESTED = 0x8000 NLA_F_NET_BYTEORDER = 0x4000 NLA_HDRLEN = 0x4 - NLDLY = 0x100 NLMSG_ALIGNTO = 0x4 NLMSG_DONE = 0x3 NLMSG_ERROR = 0x2 @@ -773,15 +658,6 @@ const ( NLM_F_REPLACE = 0x100 NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPOST = 0x1 O_ACCMODE = 0x3 O_APPEND = 0x400 O_ASYNC = 0x2000 @@ -798,7 +674,6 @@ const ( O_NOCTTY = 0x100 O_NOFOLLOW = 0x8000 O_NONBLOCK = 0x800 - O_PATH = 0x200000 O_RDONLY = 0x0 O_RDWR = 0x2 O_RSYNC = 0x1000 @@ -820,10 +695,6 @@ const ( PACKET_RECV_OUTPUT = 0x3 PACKET_RX_RING = 0x5 PACKET_STATISTICS = 0x6 - PARENB = 0x100 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1243,23 +1114,9 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 TCIFLUSH = 0x0 - TCIOFF = 0x2 TCIOFLUSH = 0x2 - TCION = 0x3 TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 TCP_CONGESTION = 0xd TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 @@ -1278,33 +1135,14 @@ const ( TCP_QUICKACK = 0xc TCP_SYNCNT = 0x7 TCP_WINDOW_CLAMP = 0xa - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c TIOCGDEV = 0x80045432 TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 TIOCGICOUNT = 0x545d TIOCGLCKTRMIOS = 0x5456 TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e @@ -1362,7 +1200,6 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 - TOSTOP = 0x100 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 TUNGETFEATURES = 0x800454cf @@ -1380,26 +1217,6 @@ const ( TUNSETSNDBUF = 0x400454d4 TUNSETTXFILTER = 0x400454d1 TUNSETVNETHDRSZ = 0x400454d8 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMIN = 0x6 - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 @@ -1410,8 +1227,6 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 - XCASE = 0x4 - XTABS = 0x1800 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index a6d1e1fa3..165073f13 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -149,7 +149,6 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd - BOTHER = 0x1000 BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 @@ -193,32 +192,7 @@ const ( BPF_X = 0x8 BPF_XOR = 0xa0 BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CBAUD = 0x100f - CBAUDEX = 0x1000 CFLUSH = 0xf - CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 @@ -241,7 +215,6 @@ const ( CLONE_FILES = 0x400 CLONE_FS = 0x200 CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 CLONE_NEWIPC = 0x8000000 CLONE_NEWNET = 0x40000000 CLONE_NEWNS = 0x20000 @@ -258,14 +231,7 @@ const ( CLONE_UNTRACED = 0x800000 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRDLY = 0x600 CREAD = 0x80 - CRTSCTS = 0x80000000 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -401,9 +367,6 @@ const ( EXTPROC = 0x10000 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 FLUSHO = 0x1000 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -439,7 +402,6 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x1 HUPCL = 0x400 - IBSHIFT = 0x10 ICANON = 0x2 ICMPV6_FILTER = 0x1 ICRNL = 0x100 @@ -683,7 +645,6 @@ const ( IP_XFRM_POLICY = 0x11 ISIG = 0x1 ISTRIP = 0x20 - IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 @@ -821,13 +782,10 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NL0 = 0x0 - NL1 = 0x100 NLA_ALIGNTO = 0x4 NLA_F_NESTED = 0x8000 NLA_F_NET_BYTEORDER = 0x4000 NLA_HDRLEN = 0x4 - NLDLY = 0x100 NLMSG_ALIGNTO = 0x4 NLMSG_DONE = 0x3 NLMSG_ERROR = 0x2 @@ -852,7 +810,6 @@ const ( OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 - OLCUC = 0x2 ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 @@ -1375,23 +1332,10 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 TCIFLUSH = 0x0 - TCIOFF = 0x2 TCIOFLUSH = 0x2 - TCION = 0x3 TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 TCP_CONGESTION = 0xd TCP_COOKIE_IN_ALWAYS = 0x1 TCP_COOKIE_MAX = 0x10 @@ -1430,21 +1374,6 @@ const ( TCP_USER_TIMEOUT = 0x12 TCP_WINDOW_CLAMP = 0xa TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c @@ -1564,8 +1493,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XCASE = 0x4 - XTABS = 0x1800 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go deleted file mode 100644 index 36535b242..000000000 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ /dev/null @@ -1,1917 +0,0 @@ -// mkerrors.sh -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT - -// +build mips64,linux - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- _const.go - -package unix - -import "syscall" - -const ( - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x29 - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_X25 = 0x10f - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_OR = 0x40 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XOR = 0xa0 - BRKINT = 0x2 - CFLUSH = 0xf - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CREAD = 0x80 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FLUSHO = 0x2000 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0xe - F_GETLK64 = 0xe - F_GETOWN = 0x17 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x18 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - HUPCL = 0x400 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x100 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x80 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_UNICAST_HOPS = 0x10 - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISTRIP = 0x20 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MAP_ANON = 0x800 - MAP_ANONYMOUS = 0x800 - MAP_DENYWRITE = 0x2000 - MAP_EXECUTABLE = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_GROWSDOWN = 0x1000 - MAP_HUGETLB = 0x80000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x8000 - MAP_NONBLOCK = 0x20000 - MAP_NORESERVE = 0x400 - MAP_POPULATE = 0x10000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x800 - MAP_SHARED = 0x1 - MAP_STACK = 0x40000 - MAP_TYPE = 0xf - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - NAME_MAX = 0xff - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPOST = 0x1 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x1000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x100 - O_DIRECT = 0x8000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x10 - O_EXCL = 0x400 - O_FSYNC = 0x4010 - O_LARGEFILE = 0x0 - O_NDELAY = 0x80 - O_NOATIME = 0x40000 - O_NOCTTY = 0x800 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x80 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x4010 - O_SYNC = 0x4010 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_GET_THREAD_AREA_3264 = 0xc4 - PTRACE_GET_WATCH_REGS = 0xd0 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_3264 = 0xc1 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_3264 = 0xc0 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_3264 = 0xc3 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_3264 = 0xc2 - PTRACE_POKEUSR = 0x6 - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SET_WATCH_REGS = 0xd1 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - RLIMIT_AS = 0x6 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_NOFILE = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x10 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x16 - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x13 - RTM_NR_MSGTYPES = 0x4c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x11 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_GATED = 0x8 - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPNS = 0x23 - SCM_WIFI_STATUS = 0x29 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x40047307 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCGARP = 0x8954 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGPGRP = 0x40047309 - SIOCGRARP = 0x8961 - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSPGRP = 0x80047308 - SIOCSRARP = 0x8962 - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x1 - SOCK_NONBLOCK = 0x80 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x2 - SOL_AAL = 0x109 - SOL_ATM = 0x108 - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_PACKET = 0x107 - SOL_RAW = 0xff - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_X25 = 0x106 - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1009 - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_BINDTODEVICE = 0x19 - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x11 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x12 - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x1f - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_STYLE = 0x1008 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TYPE = 0x1008 - SO_WIFI_STATUS = 0x29 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TCFLSH = 0x5407 - TCIFLUSH = 0x0 - TCIOFLUSH = 0x2 - TCOFLUSH = 0x1 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_INFO = 0xb - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCXONC = 0x5406 - TIOCCBRK = 0x5428 - TIOCCONS = 0x80047478 - TIOCEXCL = 0x740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x7400 - TIOCGETP = 0x7408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x5492 - TIOCGLCKTRMIOS = 0x548b - TIOCGLTC = 0x7474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGRS485 = 0x4020542e - TIOCGSERIAL = 0x5484 - TIOCGSID = 0x7416 - TIOCGSOFTCAR = 0x5481 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x467f - TIOCLINUX = 0x5483 - TIOCMBIC = 0x741c - TIOCMBIS = 0x741b - TIOCMGET = 0x741d - TIOCMIWAIT = 0x5491 - TIOCMSET = 0x741a - TIOCM_CAR = 0x100 - TIOCM_CD = 0x100 - TIOCM_CTS = 0x40 - TIOCM_DSR = 0x400 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x200 - TIOCM_RNG = 0x200 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x20 - TIOCM_ST = 0x10 - TIOCNOTTY = 0x5471 - TIOCNXCL = 0x740e - TIOCOUTQ = 0x7472 - TIOCPKT = 0x5470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x5480 - TIOCSERCONFIG = 0x5488 - TIOCSERGETLSR = 0x548e - TIOCSERGETMULTI = 0x548f - TIOCSERGSTRUCT = 0x548d - TIOCSERGWILD = 0x5489 - TIOCSERSETMULTI = 0x5490 - TIOCSERSWILD = 0x548a - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x7401 - TIOCSETN = 0x740a - TIOCSETP = 0x7409 - TIOCSIG = 0x80045436 - TIOCSLCKTRMIOS = 0x548c - TIOCSLTC = 0x7475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0xc020542f - TIOCSSERIAL = 0x5485 - TIOCSSOFTCAR = 0x5482 - TIOCSTI = 0x5472 - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TOSTOP = 0x8000 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETDEBUG = 0x800454c9 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - VDISCARD = 0xd - VEOF = 0x10 - VEOL = 0x11 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMIN = 0x4 - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VSWTCH = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 -) - -// Errors -const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) - EADDRINUSE = syscall.Errno(0x7d) - EADDRNOTAVAIL = syscall.Errno(0x7e) - EADV = syscall.Errno(0x44) - EAFNOSUPPORT = syscall.Errno(0x7c) - EAGAIN = syscall.Errno(0xb) - EALREADY = syscall.Errno(0x95) - EBADE = syscall.Errno(0x32) - EBADF = syscall.Errno(0x9) - EBADFD = syscall.Errno(0x51) - EBADMSG = syscall.Errno(0x4d) - EBADR = syscall.Errno(0x33) - EBADRQC = syscall.Errno(0x36) - EBADSLT = syscall.Errno(0x37) - EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) - ECANCELED = syscall.Errno(0x9e) - ECHILD = syscall.Errno(0xa) - ECHRNG = syscall.Errno(0x25) - ECOMM = syscall.Errno(0x46) - ECONNABORTED = syscall.Errno(0x82) - ECONNREFUSED = syscall.Errno(0x92) - ECONNRESET = syscall.Errno(0x83) - EDEADLK = syscall.Errno(0x2d) - EDEADLOCK = syscall.Errno(0x38) - EDESTADDRREQ = syscall.Errno(0x60) - EDOM = syscall.Errno(0x21) - EDOTDOT = syscall.Errno(0x49) - EDQUOT = syscall.Errno(0x46d) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) - EHOSTDOWN = syscall.Errno(0x93) - EHOSTUNREACH = syscall.Errno(0x94) - EHWPOISON = syscall.Errno(0xa8) - EIDRM = syscall.Errno(0x24) - EILSEQ = syscall.Errno(0x58) - EINIT = syscall.Errno(0x8d) - EINPROGRESS = syscall.Errno(0x96) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) - EISCONN = syscall.Errno(0x85) - EISDIR = syscall.Errno(0x15) - EISNAM = syscall.Errno(0x8b) - EKEYEXPIRED = syscall.Errno(0xa2) - EKEYREJECTED = syscall.Errno(0xa4) - EKEYREVOKED = syscall.Errno(0xa3) - EL2HLT = syscall.Errno(0x2c) - EL2NSYNC = syscall.Errno(0x26) - EL3HLT = syscall.Errno(0x27) - EL3RST = syscall.Errno(0x28) - ELIBACC = syscall.Errno(0x53) - ELIBBAD = syscall.Errno(0x54) - ELIBEXEC = syscall.Errno(0x57) - ELIBMAX = syscall.Errno(0x56) - ELIBSCN = syscall.Errno(0x55) - ELNRNG = syscall.Errno(0x29) - ELOOP = syscall.Errno(0x5a) - EMEDIUMTYPE = syscall.Errno(0xa0) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) - EMSGSIZE = syscall.Errno(0x61) - EMULTIHOP = syscall.Errno(0x4a) - ENAMETOOLONG = syscall.Errno(0x4e) - ENAVAIL = syscall.Errno(0x8a) - ENETDOWN = syscall.Errno(0x7f) - ENETRESET = syscall.Errno(0x81) - ENETUNREACH = syscall.Errno(0x80) - ENFILE = syscall.Errno(0x17) - ENOANO = syscall.Errno(0x35) - ENOBUFS = syscall.Errno(0x84) - ENOCSI = syscall.Errno(0x2b) - ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) - ENOKEY = syscall.Errno(0xa1) - ENOLCK = syscall.Errno(0x2e) - ENOLINK = syscall.Errno(0x43) - ENOMEDIUM = syscall.Errno(0x9f) - ENOMEM = syscall.Errno(0xc) - ENOMSG = syscall.Errno(0x23) - ENONET = syscall.Errno(0x40) - ENOPKG = syscall.Errno(0x41) - ENOPROTOOPT = syscall.Errno(0x63) - ENOSPC = syscall.Errno(0x1c) - ENOSR = syscall.Errno(0x3f) - ENOSTR = syscall.Errno(0x3c) - ENOSYS = syscall.Errno(0x59) - ENOTBLK = syscall.Errno(0xf) - ENOTCONN = syscall.Errno(0x86) - ENOTDIR = syscall.Errno(0x14) - ENOTEMPTY = syscall.Errno(0x5d) - ENOTNAM = syscall.Errno(0x89) - ENOTRECOVERABLE = syscall.Errno(0xa6) - ENOTSOCK = syscall.Errno(0x5f) - ENOTSUP = syscall.Errno(0x7a) - ENOTTY = syscall.Errno(0x19) - ENOTUNIQ = syscall.Errno(0x50) - ENXIO = syscall.Errno(0x6) - EOPNOTSUPP = syscall.Errno(0x7a) - EOVERFLOW = syscall.Errno(0x4f) - EOWNERDEAD = syscall.Errno(0xa5) - EPERM = syscall.Errno(0x1) - EPFNOSUPPORT = syscall.Errno(0x7b) - EPIPE = syscall.Errno(0x20) - EPROTO = syscall.Errno(0x47) - EPROTONOSUPPORT = syscall.Errno(0x78) - EPROTOTYPE = syscall.Errno(0x62) - ERANGE = syscall.Errno(0x22) - EREMCHG = syscall.Errno(0x52) - EREMDEV = syscall.Errno(0x8e) - EREMOTE = syscall.Errno(0x42) - EREMOTEIO = syscall.Errno(0x8c) - ERESTART = syscall.Errno(0x5b) - ERFKILL = syscall.Errno(0xa7) - EROFS = syscall.Errno(0x1e) - ESHUTDOWN = syscall.Errno(0x8f) - ESOCKTNOSUPPORT = syscall.Errno(0x79) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) - ESRMNT = syscall.Errno(0x45) - ESTALE = syscall.Errno(0x97) - ESTRPIPE = syscall.Errno(0x5c) - ETIME = syscall.Errno(0x3e) - ETIMEDOUT = syscall.Errno(0x91) - ETOOMANYREFS = syscall.Errno(0x90) - ETXTBSY = syscall.Errno(0x1a) - EUCLEAN = syscall.Errno(0x87) - EUNATCH = syscall.Errno(0x2a) - EUSERS = syscall.Errno(0x5e) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) - EXFULL = syscall.Errno(0x34) -) - -// Signals -const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) - SIGBUS = syscall.Signal(0xa) - SIGCHLD = syscall.Signal(0x12) - SIGCLD = syscall.Signal(0x12) - SIGCONT = syscall.Signal(0x19) - SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) - SIGIO = syscall.Signal(0x16) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) - SIGPOLL = syscall.Signal(0x16) - SIGPROF = syscall.Signal(0x1d) - SIGPWR = syscall.Signal(0x13) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) - SIGSTOP = syscall.Signal(0x17) - SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) - SIGTSTP = syscall.Signal(0x18) - SIGTTIN = syscall.Signal(0x1a) - SIGTTOU = syscall.Signal(0x1b) - SIGURG = syscall.Signal(0x15) - SIGUSR1 = syscall.Signal(0x10) - SIGUSR2 = syscall.Signal(0x11) - SIGVTALRM = syscall.Signal(0x1c) - SIGWINCH = syscall.Signal(0x14) - SIGXCPU = syscall.Signal(0x1e) - SIGXFSZ = syscall.Signal(0x1f) -) - -// Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "resource deadlock avoided", - 46: "no locks available", - 50: "invalid exchange", - 51: "invalid request descriptor", - 52: "exchange full", - 53: "no anode", - 54: "invalid request code", - 55: "invalid slot", - 56: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 73: "RFS specific error", - 74: "multihop attempted", - 77: "bad message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in too many shared libraries", - 87: "cannot exec a shared library directly", - 88: "invalid or incomplete multibyte or wide character", - 89: "function not implemented", - 90: "too many levels of symbolic links", - 91: "interrupted system call should be restarted", - 92: "streams pipe error", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "protocol not available", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported", - 123: "protocol family not supported", - 124: "address family not supported by protocol", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection on reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 135: "structure needs cleaning", - 137: "not a XENIX named type file", - 138: "no XENIX semaphores available", - 139: "is a named type file", - 140: "remote I/O error", - 141: "unknown error 141", - 142: "unknown error 142", - 143: "cannot send after transport endpoint shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale file handle", - 158: "operation canceled", - 159: "no medium found", - 160: "wrong medium type", - 161: "required key not available", - 162: "key has expired", - 163: "key has been revoked", - 164: "key was rejected by service", - 165: "owner died", - 166: "state not recoverable", - 167: "operation not possible due to RF-kill", - 168: "memory page has hardware error", - 1133: "disk quota exceeded", -} - -// Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "user defined signal 1", - 17: "user defined signal 2", - 18: "child exited", - 19: "power failure", - 20: "window changed", - 21: "urgent I/O condition", - 22: "I/O possible", - 23: "stopped (signal)", - 24: "stopped", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual timer expired", - 29: "profiling timer expired", - 30: "CPU time limit exceeded", - 31: "file size limit exceeded", -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go deleted file mode 100644 index 112f05de5..000000000 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ /dev/null @@ -1,1917 +0,0 @@ -// mkerrors.sh -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT - -// +build mips64le,linux - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- _const.go - -package unix - -import "syscall" - -const ( - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x29 - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_X25 = 0x10f - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_OR = 0x40 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XOR = 0xa0 - BRKINT = 0x2 - CFLUSH = 0xf - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CREAD = 0x80 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FLUSHO = 0x2000 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0xe - F_GETLK64 = 0xe - F_GETOWN = 0x17 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x18 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - HUPCL = 0x400 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x100 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x80 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_UNICAST_HOPS = 0x10 - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISTRIP = 0x20 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MAP_ANON = 0x800 - MAP_ANONYMOUS = 0x800 - MAP_DENYWRITE = 0x2000 - MAP_EXECUTABLE = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_GROWSDOWN = 0x1000 - MAP_HUGETLB = 0x80000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x8000 - MAP_NONBLOCK = 0x20000 - MAP_NORESERVE = 0x400 - MAP_POPULATE = 0x10000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x800 - MAP_SHARED = 0x1 - MAP_STACK = 0x40000 - MAP_TYPE = 0xf - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - NAME_MAX = 0xff - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPOST = 0x1 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x1000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x100 - O_DIRECT = 0x8000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x10 - O_EXCL = 0x400 - O_FSYNC = 0x4010 - O_LARGEFILE = 0x0 - O_NDELAY = 0x80 - O_NOATIME = 0x40000 - O_NOCTTY = 0x800 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x80 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x4010 - O_SYNC = 0x4010 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_GET_THREAD_AREA_3264 = 0xc4 - PTRACE_GET_WATCH_REGS = 0xd0 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_3264 = 0xc1 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_3264 = 0xc0 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_3264 = 0xc3 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_3264 = 0xc2 - PTRACE_POKEUSR = 0x6 - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SET_WATCH_REGS = 0xd1 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - RLIMIT_AS = 0x6 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_NOFILE = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x10 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x16 - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x13 - RTM_NR_MSGTYPES = 0x4c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x11 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_GATED = 0x8 - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPNS = 0x23 - SCM_WIFI_STATUS = 0x29 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x40047307 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCGARP = 0x8954 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGPGRP = 0x40047309 - SIOCGRARP = 0x8961 - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSPGRP = 0x80047308 - SIOCSRARP = 0x8962 - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x1 - SOCK_NONBLOCK = 0x80 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x2 - SOL_AAL = 0x109 - SOL_ATM = 0x108 - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_PACKET = 0x107 - SOL_RAW = 0xff - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_X25 = 0x106 - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1009 - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_BINDTODEVICE = 0x19 - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x11 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x12 - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x1f - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_STYLE = 0x1008 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TYPE = 0x1008 - SO_WIFI_STATUS = 0x29 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TCFLSH = 0x5407 - TCIFLUSH = 0x0 - TCIOFLUSH = 0x2 - TCOFLUSH = 0x1 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_INFO = 0xb - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCXONC = 0x5406 - TIOCCBRK = 0x5428 - TIOCCONS = 0x80047478 - TIOCEXCL = 0x740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x7400 - TIOCGETP = 0x7408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x5492 - TIOCGLCKTRMIOS = 0x548b - TIOCGLTC = 0x7474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGRS485 = 0x4020542e - TIOCGSERIAL = 0x5484 - TIOCGSID = 0x7416 - TIOCGSOFTCAR = 0x5481 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x467f - TIOCLINUX = 0x5483 - TIOCMBIC = 0x741c - TIOCMBIS = 0x741b - TIOCMGET = 0x741d - TIOCMIWAIT = 0x5491 - TIOCMSET = 0x741a - TIOCM_CAR = 0x100 - TIOCM_CD = 0x100 - TIOCM_CTS = 0x40 - TIOCM_DSR = 0x400 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x200 - TIOCM_RNG = 0x200 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x20 - TIOCM_ST = 0x10 - TIOCNOTTY = 0x5471 - TIOCNXCL = 0x740e - TIOCOUTQ = 0x7472 - TIOCPKT = 0x5470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x5480 - TIOCSERCONFIG = 0x5488 - TIOCSERGETLSR = 0x548e - TIOCSERGETMULTI = 0x548f - TIOCSERGSTRUCT = 0x548d - TIOCSERGWILD = 0x5489 - TIOCSERSETMULTI = 0x5490 - TIOCSERSWILD = 0x548a - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x7401 - TIOCSETN = 0x740a - TIOCSETP = 0x7409 - TIOCSIG = 0x80045436 - TIOCSLCKTRMIOS = 0x548c - TIOCSLTC = 0x7475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0xc020542f - TIOCSSERIAL = 0x5485 - TIOCSSOFTCAR = 0x5482 - TIOCSTI = 0x5472 - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TOSTOP = 0x8000 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETDEBUG = 0x800454c9 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - VDISCARD = 0xd - VEOF = 0x10 - VEOL = 0x11 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMIN = 0x4 - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VSWTCH = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 -) - -// Errors -const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) - EADDRINUSE = syscall.Errno(0x7d) - EADDRNOTAVAIL = syscall.Errno(0x7e) - EADV = syscall.Errno(0x44) - EAFNOSUPPORT = syscall.Errno(0x7c) - EAGAIN = syscall.Errno(0xb) - EALREADY = syscall.Errno(0x95) - EBADE = syscall.Errno(0x32) - EBADF = syscall.Errno(0x9) - EBADFD = syscall.Errno(0x51) - EBADMSG = syscall.Errno(0x4d) - EBADR = syscall.Errno(0x33) - EBADRQC = syscall.Errno(0x36) - EBADSLT = syscall.Errno(0x37) - EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) - ECANCELED = syscall.Errno(0x9e) - ECHILD = syscall.Errno(0xa) - ECHRNG = syscall.Errno(0x25) - ECOMM = syscall.Errno(0x46) - ECONNABORTED = syscall.Errno(0x82) - ECONNREFUSED = syscall.Errno(0x92) - ECONNRESET = syscall.Errno(0x83) - EDEADLK = syscall.Errno(0x2d) - EDEADLOCK = syscall.Errno(0x38) - EDESTADDRREQ = syscall.Errno(0x60) - EDOM = syscall.Errno(0x21) - EDOTDOT = syscall.Errno(0x49) - EDQUOT = syscall.Errno(0x46d) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) - EHOSTDOWN = syscall.Errno(0x93) - EHOSTUNREACH = syscall.Errno(0x94) - EHWPOISON = syscall.Errno(0xa8) - EIDRM = syscall.Errno(0x24) - EILSEQ = syscall.Errno(0x58) - EINIT = syscall.Errno(0x8d) - EINPROGRESS = syscall.Errno(0x96) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) - EISCONN = syscall.Errno(0x85) - EISDIR = syscall.Errno(0x15) - EISNAM = syscall.Errno(0x8b) - EKEYEXPIRED = syscall.Errno(0xa2) - EKEYREJECTED = syscall.Errno(0xa4) - EKEYREVOKED = syscall.Errno(0xa3) - EL2HLT = syscall.Errno(0x2c) - EL2NSYNC = syscall.Errno(0x26) - EL3HLT = syscall.Errno(0x27) - EL3RST = syscall.Errno(0x28) - ELIBACC = syscall.Errno(0x53) - ELIBBAD = syscall.Errno(0x54) - ELIBEXEC = syscall.Errno(0x57) - ELIBMAX = syscall.Errno(0x56) - ELIBSCN = syscall.Errno(0x55) - ELNRNG = syscall.Errno(0x29) - ELOOP = syscall.Errno(0x5a) - EMEDIUMTYPE = syscall.Errno(0xa0) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) - EMSGSIZE = syscall.Errno(0x61) - EMULTIHOP = syscall.Errno(0x4a) - ENAMETOOLONG = syscall.Errno(0x4e) - ENAVAIL = syscall.Errno(0x8a) - ENETDOWN = syscall.Errno(0x7f) - ENETRESET = syscall.Errno(0x81) - ENETUNREACH = syscall.Errno(0x80) - ENFILE = syscall.Errno(0x17) - ENOANO = syscall.Errno(0x35) - ENOBUFS = syscall.Errno(0x84) - ENOCSI = syscall.Errno(0x2b) - ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) - ENOKEY = syscall.Errno(0xa1) - ENOLCK = syscall.Errno(0x2e) - ENOLINK = syscall.Errno(0x43) - ENOMEDIUM = syscall.Errno(0x9f) - ENOMEM = syscall.Errno(0xc) - ENOMSG = syscall.Errno(0x23) - ENONET = syscall.Errno(0x40) - ENOPKG = syscall.Errno(0x41) - ENOPROTOOPT = syscall.Errno(0x63) - ENOSPC = syscall.Errno(0x1c) - ENOSR = syscall.Errno(0x3f) - ENOSTR = syscall.Errno(0x3c) - ENOSYS = syscall.Errno(0x59) - ENOTBLK = syscall.Errno(0xf) - ENOTCONN = syscall.Errno(0x86) - ENOTDIR = syscall.Errno(0x14) - ENOTEMPTY = syscall.Errno(0x5d) - ENOTNAM = syscall.Errno(0x89) - ENOTRECOVERABLE = syscall.Errno(0xa6) - ENOTSOCK = syscall.Errno(0x5f) - ENOTSUP = syscall.Errno(0x7a) - ENOTTY = syscall.Errno(0x19) - ENOTUNIQ = syscall.Errno(0x50) - ENXIO = syscall.Errno(0x6) - EOPNOTSUPP = syscall.Errno(0x7a) - EOVERFLOW = syscall.Errno(0x4f) - EOWNERDEAD = syscall.Errno(0xa5) - EPERM = syscall.Errno(0x1) - EPFNOSUPPORT = syscall.Errno(0x7b) - EPIPE = syscall.Errno(0x20) - EPROTO = syscall.Errno(0x47) - EPROTONOSUPPORT = syscall.Errno(0x78) - EPROTOTYPE = syscall.Errno(0x62) - ERANGE = syscall.Errno(0x22) - EREMCHG = syscall.Errno(0x52) - EREMDEV = syscall.Errno(0x8e) - EREMOTE = syscall.Errno(0x42) - EREMOTEIO = syscall.Errno(0x8c) - ERESTART = syscall.Errno(0x5b) - ERFKILL = syscall.Errno(0xa7) - EROFS = syscall.Errno(0x1e) - ESHUTDOWN = syscall.Errno(0x8f) - ESOCKTNOSUPPORT = syscall.Errno(0x79) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) - ESRMNT = syscall.Errno(0x45) - ESTALE = syscall.Errno(0x97) - ESTRPIPE = syscall.Errno(0x5c) - ETIME = syscall.Errno(0x3e) - ETIMEDOUT = syscall.Errno(0x91) - ETOOMANYREFS = syscall.Errno(0x90) - ETXTBSY = syscall.Errno(0x1a) - EUCLEAN = syscall.Errno(0x87) - EUNATCH = syscall.Errno(0x2a) - EUSERS = syscall.Errno(0x5e) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) - EXFULL = syscall.Errno(0x34) -) - -// Signals -const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) - SIGBUS = syscall.Signal(0xa) - SIGCHLD = syscall.Signal(0x12) - SIGCLD = syscall.Signal(0x12) - SIGCONT = syscall.Signal(0x19) - SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) - SIGIO = syscall.Signal(0x16) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) - SIGPOLL = syscall.Signal(0x16) - SIGPROF = syscall.Signal(0x1d) - SIGPWR = syscall.Signal(0x13) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) - SIGSTOP = syscall.Signal(0x17) - SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) - SIGTSTP = syscall.Signal(0x18) - SIGTTIN = syscall.Signal(0x1a) - SIGTTOU = syscall.Signal(0x1b) - SIGURG = syscall.Signal(0x15) - SIGUSR1 = syscall.Signal(0x10) - SIGUSR2 = syscall.Signal(0x11) - SIGVTALRM = syscall.Signal(0x1c) - SIGWINCH = syscall.Signal(0x14) - SIGXCPU = syscall.Signal(0x1e) - SIGXFSZ = syscall.Signal(0x1f) -) - -// Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "resource deadlock avoided", - 46: "no locks available", - 50: "invalid exchange", - 51: "invalid request descriptor", - 52: "exchange full", - 53: "no anode", - 54: "invalid request code", - 55: "invalid slot", - 56: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 73: "RFS specific error", - 74: "multihop attempted", - 77: "bad message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in too many shared libraries", - 87: "cannot exec a shared library directly", - 88: "invalid or incomplete multibyte or wide character", - 89: "function not implemented", - 90: "too many levels of symbolic links", - 91: "interrupted system call should be restarted", - 92: "streams pipe error", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "protocol not available", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported", - 123: "protocol family not supported", - 124: "address family not supported by protocol", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection on reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 135: "structure needs cleaning", - 137: "not a XENIX named type file", - 138: "no XENIX semaphores available", - 139: "is a named type file", - 140: "remote I/O error", - 141: "unknown error 141", - 142: "unknown error 142", - 143: "cannot send after transport endpoint shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale file handle", - 158: "operation canceled", - 159: "no medium found", - 160: "wrong medium type", - 161: "required key not available", - 162: "key has expired", - 163: "key has been revoked", - 164: "key was rejected by service", - 165: "owner died", - 166: "state not recoverable", - 167: "operation not possible due to RF-kill", - 168: "memory page has hardware error", - 1133: "disk quota exceeded", -} - -// Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "user defined signal 1", - 17: "user defined signal 2", - 18: "child exited", - 19: "power failure", - 20: "window changed", - 21: "urgent I/O condition", - 22: "I/O possible", - 23: "stopped (signal)", - 24: "stopped", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual timer expired", - 29: "profiling timer expired", - 30: "CPU time limit exceeded", - 31: "file size limit exceeded", -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 4e4193951..9d908d719 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -150,7 +150,6 @@ const ( B75 = 0x2 B921600 = 0x16 B9600 = 0xd - BOTHER = 0x1f BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 @@ -194,32 +193,7 @@ const ( BPF_X = 0x8 BPF_XOR = 0xa0 BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CBAUD = 0xff - CBAUDEX = 0x0 CFLUSH = 0xf - CIBAUD = 0xff0000 CLOCAL = 0x8000 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 @@ -242,7 +216,6 @@ const ( CLONE_FILES = 0x400 CLONE_FS = 0x200 CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 CLONE_NEWIPC = 0x8000000 CLONE_NEWNET = 0x40000000 CLONE_NEWNS = 0x20000 @@ -259,14 +232,7 @@ const ( CLONE_UNTRACED = 0x800000 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 CREAD = 0x800 - CRTSCTS = 0x80000000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -403,9 +369,6 @@ const ( EXTPROC = 0x10000000 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 FLUSHO = 0x800000 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -444,7 +407,6 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x1 HUPCL = 0x4000 - IBSHIFT = 0x10 ICANON = 0x100 ICMPV6_FILTER = 0x1 ICRNL = 0x100 @@ -673,7 +635,6 @@ const ( IP_XFRM_POLICY = 0x11 ISIG = 0x80 ISTRIP = 0x20 - IUCLC = 0x1000 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x400 @@ -811,15 +772,10 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 NLA_ALIGNTO = 0x4 NLA_F_NESTED = 0x8000 NLA_F_NET_BYTEORDER = 0x4000 NLA_HDRLEN = 0x4 - NLDLY = 0x300 NLMSG_ALIGNTO = 0x4 NLMSG_DONE = 0x3 NLMSG_ERROR = 0x2 @@ -844,7 +800,6 @@ const ( OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 - OLCUC = 0x4 ONLCR = 0x2 ONLRET = 0x20 ONOCR = 0x10 @@ -1443,21 +1398,10 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0xc00 - TABDLY = 0xc00 TCFLSH = 0x2000741f - TCGETA = 0x40147417 - TCGETS = 0x402c7413 TCIFLUSH = 0x0 - TCIOFF = 0x2 TCIOFLUSH = 0x2 - TCION = 0x3 TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 TCP_CONGESTION = 0xd TCP_COOKIE_IN_ALWAYS = 0x1 TCP_COOKIE_MAX = 0x10 @@ -1496,15 +1440,6 @@ const ( TCP_USER_TIMEOUT = 0x12 TCP_WINDOW_CLAMP = 0xa TCSAFLUSH = 0x2 - TCSBRK = 0x2000741d - TCSBRKP = 0x5425 - TCSETA = 0x80147418 - TCSETAF = 0x8014741c - TCSETAW = 0x80147419 - TCSETS = 0x802c7414 - TCSETSF = 0x802c7416 - TCSETSW = 0x802c7415 - TCXONC = 0x2000741e TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c @@ -1636,8 +1571,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XCASE = 0x4000 - XTABS = 0xc00 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 407e6b539..ccf05a274 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -149,7 +149,6 @@ const ( B75 = 0x2 B921600 = 0x16 B9600 = 0xd - BOTHER = 0x1f BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 @@ -193,32 +192,7 @@ const ( BPF_X = 0x8 BPF_XOR = 0xa0 BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CBAUD = 0xff - CBAUDEX = 0x0 CFLUSH = 0xf - CIBAUD = 0xff0000 CLOCAL = 0x8000 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 @@ -241,7 +215,6 @@ const ( CLONE_FILES = 0x400 CLONE_FS = 0x200 CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 CLONE_NEWIPC = 0x8000000 CLONE_NEWNET = 0x40000000 CLONE_NEWNS = 0x20000 @@ -258,14 +231,7 @@ const ( CLONE_UNTRACED = 0x800000 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 CREAD = 0x800 - CRTSCTS = 0x80000000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -399,9 +365,6 @@ const ( EXTPROC = 0x10000000 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 FLUSHO = 0x800000 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -437,7 +400,6 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x1 HUPCL = 0x4000 - IBSHIFT = 0x10 ICANON = 0x100 ICMPV6_FILTER = 0x1 ICRNL = 0x100 @@ -681,7 +643,6 @@ const ( IP_XFRM_POLICY = 0x11 ISIG = 0x80 ISTRIP = 0x20 - IUCLC = 0x1000 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x400 @@ -819,15 +780,10 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 NLA_ALIGNTO = 0x4 NLA_F_NESTED = 0x8000 NLA_F_NET_BYTEORDER = 0x4000 NLA_HDRLEN = 0x4 - NLDLY = 0x300 NLMSG_ALIGNTO = 0x4 NLMSG_DONE = 0x3 NLMSG_ERROR = 0x2 @@ -852,7 +808,6 @@ const ( OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 - OLCUC = 0x4 ONLCR = 0x2 ONLRET = 0x20 ONOCR = 0x10 @@ -1442,21 +1397,10 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0xc00 - TABDLY = 0xc00 TCFLSH = 0x2000741f - TCGETA = 0x40147417 - TCGETS = 0x402c7413 TCIFLUSH = 0x0 - TCIOFF = 0x2 TCIOFLUSH = 0x2 - TCION = 0x3 TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 TCP_CONGESTION = 0xd TCP_COOKIE_IN_ALWAYS = 0x1 TCP_COOKIE_MAX = 0x10 @@ -1495,15 +1439,6 @@ const ( TCP_USER_TIMEOUT = 0x12 TCP_WINDOW_CLAMP = 0xa TCSAFLUSH = 0x2 - TCSBRK = 0x2000741d - TCSBRKP = 0x5425 - TCSETA = 0x80147418 - TCSETAF = 0x8014741c - TCSETAW = 0x80147419 - TCSETS = 0x802c7414 - TCSETSF = 0x802c7416 - TCSETSW = 0x802c7415 - TCXONC = 0x2000741e TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c @@ -1635,8 +1570,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XCASE = 0x4000 - XTABS = 0xc00 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go deleted file mode 100644 index 40c9b8793..000000000 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ /dev/null @@ -1,2046 +0,0 @@ -// mkerrors.sh -m64 -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT - -// +build s390x,linux - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- -m64 _const.go - -package unix - -import "syscall" - -const ( - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x29 - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_X25 = 0x10f - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_OR = 0x40 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0x5 - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_UNICAST_HOPS = 0x10 - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_STACK = 0x20000 - MAP_TYPE = 0xf - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - NAME_MAX = 0xff - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPOST = 0x1 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x4000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_DISABLE_TE = 0x5010 - PTRACE_ENABLE_TE = 0x5009 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_LAST_BREAK = 0x5006 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_AREA = 0x5003 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_AREA = 0x5002 - PTRACE_PEEKUSR = 0x3 - PTRACE_PEEKUSR_AREA = 0x5000 - PTRACE_PEEK_SYSTEM_CALL = 0x5007 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_AREA = 0x5005 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_AREA = 0x5004 - PTRACE_POKEUSR = 0x6 - PTRACE_POKEUSR_AREA = 0x5001 - PTRACE_POKE_SYSTEM_CALL = 0x5008 - PTRACE_PROT = 0x15 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SEIZE = 0x4206 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SINGLEBLOCK = 0xc - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TE_ABORT_RAND = 0x5011 - PTRACE_TRACEME = 0x0 - PT_ACR0 = 0x90 - PT_ACR1 = 0x94 - PT_ACR10 = 0xb8 - PT_ACR11 = 0xbc - PT_ACR12 = 0xc0 - PT_ACR13 = 0xc4 - PT_ACR14 = 0xc8 - PT_ACR15 = 0xcc - PT_ACR2 = 0x98 - PT_ACR3 = 0x9c - PT_ACR4 = 0xa0 - PT_ACR5 = 0xa4 - PT_ACR6 = 0xa8 - PT_ACR7 = 0xac - PT_ACR8 = 0xb0 - PT_ACR9 = 0xb4 - PT_CR_10 = 0x168 - PT_CR_11 = 0x170 - PT_CR_9 = 0x160 - PT_ENDREGS = 0x1af - PT_FPC = 0xd8 - PT_FPR0 = 0xe0 - PT_FPR1 = 0xe8 - PT_FPR10 = 0x130 - PT_FPR11 = 0x138 - PT_FPR12 = 0x140 - PT_FPR13 = 0x148 - PT_FPR14 = 0x150 - PT_FPR15 = 0x158 - PT_FPR2 = 0xf0 - PT_FPR3 = 0xf8 - PT_FPR4 = 0x100 - PT_FPR5 = 0x108 - PT_FPR6 = 0x110 - PT_FPR7 = 0x118 - PT_FPR8 = 0x120 - PT_FPR9 = 0x128 - PT_GPR0 = 0x10 - PT_GPR1 = 0x18 - PT_GPR10 = 0x60 - PT_GPR11 = 0x68 - PT_GPR12 = 0x70 - PT_GPR13 = 0x78 - PT_GPR14 = 0x80 - PT_GPR15 = 0x88 - PT_GPR2 = 0x20 - PT_GPR3 = 0x28 - PT_GPR4 = 0x30 - PT_GPR5 = 0x38 - PT_GPR6 = 0x40 - PT_GPR7 = 0x48 - PT_GPR8 = 0x50 - PT_GPR9 = 0x58 - PT_IEEE_IP = 0x1a8 - PT_LASTOFF = 0x1a8 - PT_ORIGGPR2 = 0xd0 - PT_PSWADDR = 0x8 - PT_PSWMASK = 0x0 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_NOFILE = 0x7 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x10 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x16 - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x13 - RTM_NR_MSGTYPES = 0x4c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x11 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_GATED = 0x8 - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPNS = 0x23 - SCM_WIFI_STATUS = 0x29 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCGARP = 0x8954 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGPGRP = 0x8904 - SIOCGRARP = 0x8961 - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ATM = 0x108 - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_PACKET = 0x107 - SOL_RAW = 0xff - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_X25 = 0x106 - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_BINDTODEVICE = 0x19 - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TYPE = 0x3 - SO_WIFI_STATUS = 0x29 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_CC_INFO = 0x1a - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_INFO = 0xb - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TOSTOP = 0x100 - TUNATTACHFILTER = 0x401054d5 - TUNDETACHFILTER = 0x401054d6 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x801054db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETDEBUG = 0x400454c9 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMIN = 0x6 - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XCASE = 0x4 - XTABS = 0x1800 -) - -// Errors -const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) - EADDRINUSE = syscall.Errno(0x62) - EADDRNOTAVAIL = syscall.Errno(0x63) - EADV = syscall.Errno(0x44) - EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) - EALREADY = syscall.Errno(0x72) - EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) - EBADFD = syscall.Errno(0x4d) - EBADMSG = syscall.Errno(0x4a) - EBADR = syscall.Errno(0x35) - EBADRQC = syscall.Errno(0x38) - EBADSLT = syscall.Errno(0x39) - EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) - ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) - ECHRNG = syscall.Errno(0x2c) - ECOMM = syscall.Errno(0x46) - ECONNABORTED = syscall.Errno(0x67) - ECONNREFUSED = syscall.Errno(0x6f) - ECONNRESET = syscall.Errno(0x68) - EDEADLK = syscall.Errno(0x23) - EDEADLOCK = syscall.Errno(0x23) - EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) - EDOTDOT = syscall.Errno(0x49) - EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) - EHOSTDOWN = syscall.Errno(0x70) - EHOSTUNREACH = syscall.Errno(0x71) - EHWPOISON = syscall.Errno(0x85) - EIDRM = syscall.Errno(0x2b) - EILSEQ = syscall.Errno(0x54) - EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) - EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) - EISNAM = syscall.Errno(0x78) - EKEYEXPIRED = syscall.Errno(0x7f) - EKEYREJECTED = syscall.Errno(0x81) - EKEYREVOKED = syscall.Errno(0x80) - EL2HLT = syscall.Errno(0x33) - EL2NSYNC = syscall.Errno(0x2d) - EL3HLT = syscall.Errno(0x2e) - EL3RST = syscall.Errno(0x2f) - ELIBACC = syscall.Errno(0x4f) - ELIBBAD = syscall.Errno(0x50) - ELIBEXEC = syscall.Errno(0x53) - ELIBMAX = syscall.Errno(0x52) - ELIBSCN = syscall.Errno(0x51) - ELNRNG = syscall.Errno(0x30) - ELOOP = syscall.Errno(0x28) - EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) - EMSGSIZE = syscall.Errno(0x5a) - EMULTIHOP = syscall.Errno(0x48) - ENAMETOOLONG = syscall.Errno(0x24) - ENAVAIL = syscall.Errno(0x77) - ENETDOWN = syscall.Errno(0x64) - ENETRESET = syscall.Errno(0x66) - ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) - ENOANO = syscall.Errno(0x37) - ENOBUFS = syscall.Errno(0x69) - ENOCSI = syscall.Errno(0x32) - ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) - ENOKEY = syscall.Errno(0x7e) - ENOLCK = syscall.Errno(0x25) - ENOLINK = syscall.Errno(0x43) - ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) - ENOMSG = syscall.Errno(0x2a) - ENONET = syscall.Errno(0x40) - ENOPKG = syscall.Errno(0x41) - ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) - ENOSR = syscall.Errno(0x3f) - ENOSTR = syscall.Errno(0x3c) - ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) - ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) - ENOTEMPTY = syscall.Errno(0x27) - ENOTNAM = syscall.Errno(0x76) - ENOTRECOVERABLE = syscall.Errno(0x83) - ENOTSOCK = syscall.Errno(0x58) - ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) - ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) - EOPNOTSUPP = syscall.Errno(0x5f) - EOVERFLOW = syscall.Errno(0x4b) - EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) - EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) - EPROTO = syscall.Errno(0x47) - EPROTONOSUPPORT = syscall.Errno(0x5d) - EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) - EREMCHG = syscall.Errno(0x4e) - EREMOTE = syscall.Errno(0x42) - EREMOTEIO = syscall.Errno(0x79) - ERESTART = syscall.Errno(0x55) - ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) - ESHUTDOWN = syscall.Errno(0x6c) - ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) - ESRMNT = syscall.Errno(0x45) - ESTALE = syscall.Errno(0x74) - ESTRPIPE = syscall.Errno(0x56) - ETIME = syscall.Errno(0x3e) - ETIMEDOUT = syscall.Errno(0x6e) - ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) - EUCLEAN = syscall.Errno(0x75) - EUNATCH = syscall.Errno(0x31) - EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) - EXFULL = syscall.Errno(0x36) -) - -// Signals -const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) - SIGBUS = syscall.Signal(0x7) - SIGCHLD = syscall.Signal(0x11) - SIGCLD = syscall.Signal(0x11) - SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) - SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) - SIGPOLL = syscall.Signal(0x1d) - SIGPROF = syscall.Signal(0x1b) - SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) - SIGSTKFLT = syscall.Signal(0x10) - SIGSTOP = syscall.Signal(0x13) - SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) - SIGTSTP = syscall.Signal(0x14) - SIGTTIN = syscall.Signal(0x15) - SIGTTOU = syscall.Signal(0x16) - SIGUNUSED = syscall.Signal(0x1f) - SIGURG = syscall.Signal(0x17) - SIGUSR1 = syscall.Signal(0xa) - SIGUSR2 = syscall.Signal(0xc) - SIGVTALRM = syscall.Signal(0x1a) - SIGWINCH = syscall.Signal(0x1c) - SIGXCPU = syscall.Signal(0x18) - SIGXFSZ = syscall.Signal(0x19) -) - -// Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", -} - -// Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go deleted file mode 100644 index 62680ed8a..000000000 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ /dev/null @@ -1,2096 +0,0 @@ -// mkerrors.sh -m64 -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT - -// +build sparc64,linux - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- -m64 _const.go - -package unix - -import "syscall" - -const ( - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2a - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_X25 = 0x10f - ASI_LEON_DFLUSH = 0x11 - ASI_LEON_IFLUSH = 0x10 - ASI_LEON_MMUFLUSH = 0x18 - B0 = 0x0 - B1000000 = 0x100c - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x100d - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100e - B153600 = 0x1006 - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100f - B230400 = 0x1003 - B2400 = 0xb - B300 = 0x7 - B307200 = 0x1007 - B38400 = 0xf - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x100a - B57600 = 0x1001 - B576000 = 0x100b - B600 = 0x8 - B614400 = 0x1008 - B75 = 0x2 - B76800 = 0x1005 - B921600 = 0x1009 - B9600 = 0xd - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_OR = 0x40 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - EMT_TAGOVF = 0x1 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x400000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x2000 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x7 - F_GETLK64 = 0x7 - F_GETOWN = 0x5 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x1 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x8 - F_SETLK64 = 0x8 - F_SETLKW = 0x9 - F_SETLKW64 = 0x9 - F_SETOWN = 0x6 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x3 - F_WRLCK = 0x2 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x400000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x4000 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_UNICAST_HOPS = 0x10 - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_GROWSDOWN = 0x200 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x100 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x40 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_SHARED = 0x1 - MAP_STACK = 0x20000 - MAP_TYPE = 0xf - MCL_CURRENT = 0x2000 - MCL_FUTURE = 0x4000 - MCL_ONFAULT = 0x8000 - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - NAME_MAX = 0xff - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPOST = 0x1 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x400000 - O_CREAT = 0x200 - O_DIRECT = 0x100000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x2000 - O_EXCL = 0x800 - O_FSYNC = 0x802000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x4004 - O_NOATIME = 0x200000 - O_NOCTTY = 0x8000 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x4000 - O_PATH = 0x1000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x802000 - O_SYNC = 0x802000 - O_TMPFILE = 0x2010000 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPAREGS = 0x14 - PTRACE_GETFPREGS = 0xe - PTRACE_GETFPREGS64 = 0x19 - PTRACE_GETREGS = 0xc - PTRACE_GETREGS64 = 0x16 - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_READDATA = 0x10 - PTRACE_READTEXT = 0x12 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPAREGS = 0x15 - PTRACE_SETFPREGS = 0xf - PTRACE_SETFPREGS64 = 0x1a - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGS64 = 0x17 - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SINGLESTEP = 0x9 - PTRACE_SPARC_DETACH = 0xb - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - PTRACE_WRITEDATA = 0x11 - PTRACE_WRITETEXT = 0x13 - PT_FP = 0x48 - PT_G0 = 0x10 - PT_G1 = 0x14 - PT_G2 = 0x18 - PT_G3 = 0x1c - PT_G4 = 0x20 - PT_G5 = 0x24 - PT_G6 = 0x28 - PT_G7 = 0x2c - PT_I0 = 0x30 - PT_I1 = 0x34 - PT_I2 = 0x38 - PT_I3 = 0x3c - PT_I4 = 0x40 - PT_I5 = 0x44 - PT_I6 = 0x48 - PT_I7 = 0x4c - PT_NPC = 0x8 - PT_PC = 0x4 - PT_PSR = 0x0 - PT_REGS_MAGIC = 0x57ac6c00 - PT_TNPC = 0x90 - PT_TPC = 0x88 - PT_TSTATE = 0x80 - PT_V9_FP = 0x70 - PT_V9_G0 = 0x0 - PT_V9_G1 = 0x8 - PT_V9_G2 = 0x10 - PT_V9_G3 = 0x18 - PT_V9_G4 = 0x20 - PT_V9_G5 = 0x28 - PT_V9_G6 = 0x30 - PT_V9_G7 = 0x38 - PT_V9_I0 = 0x40 - PT_V9_I1 = 0x48 - PT_V9_I2 = 0x50 - PT_V9_I3 = 0x58 - PT_V9_I4 = 0x60 - PT_V9_I5 = 0x68 - PT_V9_I6 = 0x70 - PT_V9_I7 = 0x78 - PT_V9_MAGIC = 0x9c - PT_V9_TNPC = 0x90 - PT_V9_TPC = 0x88 - PT_V9_TSTATE = 0x80 - PT_V9_Y = 0x98 - PT_WIM = 0x10 - PT_Y = 0xc - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_NOFILE = 0x6 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x10 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x18 - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x11 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_GATED = 0x8 - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x23 - SCM_TIMESTAMPNS = 0x21 - SCM_WIFI_STATUS = 0x25 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGRARP = 0x8961 - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x4004667f - SIOCOUTQ = 0x40047473 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SOCK_CLOEXEC = 0x400000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_NONBLOCK = 0x4000 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_X25 = 0x106 - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x8000 - SO_ATTACH_BPF = 0x34 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x35 - SO_ATTACH_REUSEPORT_EBPF = 0x36 - SO_BINDTODEVICE = 0xd - SO_BPF_EXTENSIONS = 0x32 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0x400 - SO_BUSY_POLL = 0x30 - SO_CNX_ADVICE = 0x37 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x33 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x28 - SO_MARK = 0x22 - SO_MAX_PACING_RATE = 0x31 - SO_NOFCS = 0x27 - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x2 - SO_PASSSEC = 0x1f - SO_PEEK_OFF = 0x26 - SO_PEERCRED = 0x40 - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x100b - SO_RCVLOWAT = 0x800 - SO_RCVTIMEO = 0x2000 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x24 - SO_SECURITY_AUTHENTICATION = 0x5001 - SO_SECURITY_ENCRYPTION_NETWORK = 0x5004 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x5002 - SO_SELECT_ERR_QUEUE = 0x29 - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x100a - SO_SNDLOWAT = 0x1000 - SO_SNDTIMEO = 0x4000 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x23 - SO_TIMESTAMPNS = 0x21 - SO_TYPE = 0x1008 - SO_WIFI_STATUS = 0x25 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TCFLSH = 0x20005407 - TCGETA = 0x40125401 - TCGETS = 0x40245408 - TCGETS2 = 0x402c540c - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_CC_INFO = 0x1a - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_INFO = 0xb - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCSAFLUSH = 0x2 - TCSBRK = 0x20005405 - TCSBRKP = 0x5425 - TCSETA = 0x80125402 - TCSETAF = 0x80125404 - TCSETAW = 0x80125403 - TCSETS = 0x80245409 - TCSETS2 = 0x802c540d - TCSETSF = 0x8024540b - TCSETSF2 = 0x802c540f - TCSETSW = 0x8024540a - TCSETSW2 = 0x802c540e - TCXONC = 0x20005406 - TIOCCBRK = 0x2000747a - TIOCCONS = 0x20007424 - TIOCEXCL = 0x2000740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x40047400 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x545d - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x40047483 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40047486 - TIOCGRS485 = 0x40205441 - TIOCGSERIAL = 0x541e - TIOCGSID = 0x40047485 - TIOCGSOFTCAR = 0x40047464 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x4004667f - TIOCLINUX = 0x541c - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGET = 0x4004746a - TIOCMIWAIT = 0x545c - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_LOOP = 0x8000 - TIOCM_OUT1 = 0x2000 - TIOCM_OUT2 = 0x4000 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x2000747b - TIOCSCTTY = 0x20007484 - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x80047401 - TIOCSIG = 0x80047488 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x80047482 - TIOCSPTLCK = 0x80047487 - TIOCSRS485 = 0xc0205442 - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x80047465 - TIOCSTART = 0x2000746e - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x20005437 - TOSTOP = 0x100 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETDEBUG = 0x800454c9 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - VDISCARD = 0xd - VDSUSP = 0xb - VEOF = 0x4 - VEOL = 0x5 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMIN = 0x4 - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WRAP = 0x20000 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XCASE = 0x4 - XTABS = 0x1800 - __TIOCFLUSH = 0x80047410 -) - -// Errors -const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) - EADDRINUSE = syscall.Errno(0x30) - EADDRNOTAVAIL = syscall.Errno(0x31) - EADV = syscall.Errno(0x53) - EAFNOSUPPORT = syscall.Errno(0x2f) - EAGAIN = syscall.Errno(0xb) - EALREADY = syscall.Errno(0x25) - EBADE = syscall.Errno(0x66) - EBADF = syscall.Errno(0x9) - EBADFD = syscall.Errno(0x5d) - EBADMSG = syscall.Errno(0x4c) - EBADR = syscall.Errno(0x67) - EBADRQC = syscall.Errno(0x6a) - EBADSLT = syscall.Errno(0x6b) - EBFONT = syscall.Errno(0x6d) - EBUSY = syscall.Errno(0x10) - ECANCELED = syscall.Errno(0x7f) - ECHILD = syscall.Errno(0xa) - ECHRNG = syscall.Errno(0x5e) - ECOMM = syscall.Errno(0x55) - ECONNABORTED = syscall.Errno(0x35) - ECONNREFUSED = syscall.Errno(0x3d) - ECONNRESET = syscall.Errno(0x36) - EDEADLK = syscall.Errno(0x4e) - EDEADLOCK = syscall.Errno(0x6c) - EDESTADDRREQ = syscall.Errno(0x27) - EDOM = syscall.Errno(0x21) - EDOTDOT = syscall.Errno(0x58) - EDQUOT = syscall.Errno(0x45) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) - EHOSTDOWN = syscall.Errno(0x40) - EHOSTUNREACH = syscall.Errno(0x41) - EHWPOISON = syscall.Errno(0x87) - EIDRM = syscall.Errno(0x4d) - EILSEQ = syscall.Errno(0x7a) - EINPROGRESS = syscall.Errno(0x24) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) - EISCONN = syscall.Errno(0x38) - EISDIR = syscall.Errno(0x15) - EISNAM = syscall.Errno(0x78) - EKEYEXPIRED = syscall.Errno(0x81) - EKEYREJECTED = syscall.Errno(0x83) - EKEYREVOKED = syscall.Errno(0x82) - EL2HLT = syscall.Errno(0x65) - EL2NSYNC = syscall.Errno(0x5f) - EL3HLT = syscall.Errno(0x60) - EL3RST = syscall.Errno(0x61) - ELIBACC = syscall.Errno(0x72) - ELIBBAD = syscall.Errno(0x70) - ELIBEXEC = syscall.Errno(0x6e) - ELIBMAX = syscall.Errno(0x7b) - ELIBSCN = syscall.Errno(0x7c) - ELNRNG = syscall.Errno(0x62) - ELOOP = syscall.Errno(0x3e) - EMEDIUMTYPE = syscall.Errno(0x7e) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) - EMSGSIZE = syscall.Errno(0x28) - EMULTIHOP = syscall.Errno(0x57) - ENAMETOOLONG = syscall.Errno(0x3f) - ENAVAIL = syscall.Errno(0x77) - ENETDOWN = syscall.Errno(0x32) - ENETRESET = syscall.Errno(0x34) - ENETUNREACH = syscall.Errno(0x33) - ENFILE = syscall.Errno(0x17) - ENOANO = syscall.Errno(0x69) - ENOBUFS = syscall.Errno(0x37) - ENOCSI = syscall.Errno(0x64) - ENODATA = syscall.Errno(0x6f) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) - ENOKEY = syscall.Errno(0x80) - ENOLCK = syscall.Errno(0x4f) - ENOLINK = syscall.Errno(0x52) - ENOMEDIUM = syscall.Errno(0x7d) - ENOMEM = syscall.Errno(0xc) - ENOMSG = syscall.Errno(0x4b) - ENONET = syscall.Errno(0x50) - ENOPKG = syscall.Errno(0x71) - ENOPROTOOPT = syscall.Errno(0x2a) - ENOSPC = syscall.Errno(0x1c) - ENOSR = syscall.Errno(0x4a) - ENOSTR = syscall.Errno(0x48) - ENOSYS = syscall.Errno(0x5a) - ENOTBLK = syscall.Errno(0xf) - ENOTCONN = syscall.Errno(0x39) - ENOTDIR = syscall.Errno(0x14) - ENOTEMPTY = syscall.Errno(0x42) - ENOTNAM = syscall.Errno(0x76) - ENOTRECOVERABLE = syscall.Errno(0x85) - ENOTSOCK = syscall.Errno(0x26) - ENOTSUP = syscall.Errno(0x2d) - ENOTTY = syscall.Errno(0x19) - ENOTUNIQ = syscall.Errno(0x73) - ENXIO = syscall.Errno(0x6) - EOPNOTSUPP = syscall.Errno(0x2d) - EOVERFLOW = syscall.Errno(0x5c) - EOWNERDEAD = syscall.Errno(0x84) - EPERM = syscall.Errno(0x1) - EPFNOSUPPORT = syscall.Errno(0x2e) - EPIPE = syscall.Errno(0x20) - EPROCLIM = syscall.Errno(0x43) - EPROTO = syscall.Errno(0x56) - EPROTONOSUPPORT = syscall.Errno(0x2b) - EPROTOTYPE = syscall.Errno(0x29) - ERANGE = syscall.Errno(0x22) - EREMCHG = syscall.Errno(0x59) - EREMOTE = syscall.Errno(0x47) - EREMOTEIO = syscall.Errno(0x79) - ERESTART = syscall.Errno(0x74) - ERFKILL = syscall.Errno(0x86) - EROFS = syscall.Errno(0x1e) - ERREMOTE = syscall.Errno(0x51) - ESHUTDOWN = syscall.Errno(0x3a) - ESOCKTNOSUPPORT = syscall.Errno(0x2c) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) - ESRMNT = syscall.Errno(0x54) - ESTALE = syscall.Errno(0x46) - ESTRPIPE = syscall.Errno(0x5b) - ETIME = syscall.Errno(0x49) - ETIMEDOUT = syscall.Errno(0x3c) - ETOOMANYREFS = syscall.Errno(0x3b) - ETXTBSY = syscall.Errno(0x1a) - EUCLEAN = syscall.Errno(0x75) - EUNATCH = syscall.Errno(0x63) - EUSERS = syscall.Errno(0x44) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) - EXFULL = syscall.Errno(0x68) -) - -// Signals -const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) - SIGBUS = syscall.Signal(0xa) - SIGCHLD = syscall.Signal(0x14) - SIGCLD = syscall.Signal(0x14) - SIGCONT = syscall.Signal(0x13) - SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) - SIGIO = syscall.Signal(0x17) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGLOST = syscall.Signal(0x1d) - SIGPIPE = syscall.Signal(0xd) - SIGPOLL = syscall.Signal(0x17) - SIGPROF = syscall.Signal(0x1b) - SIGPWR = syscall.Signal(0x1d) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) - SIGSTOP = syscall.Signal(0x11) - SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) - SIGTSTP = syscall.Signal(0x12) - SIGTTIN = syscall.Signal(0x15) - SIGTTOU = syscall.Signal(0x16) - SIGURG = syscall.Signal(0x10) - SIGUSR1 = syscall.Signal(0x1e) - SIGUSR2 = syscall.Signal(0x1f) - SIGVTALRM = syscall.Signal(0x1a) - SIGWINCH = syscall.Signal(0x1c) - SIGXCPU = syscall.Signal(0x18) - SIGXFSZ = syscall.Signal(0x19) -) - -// Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol", - 48: "address already in use", - 49: "cannot assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "transport endpoint is already connected", - 57: "transport endpoint is not connected", - 58: "cannot send after transport endpoint shutdown", - 59: "too many references: cannot splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disk quota exceeded", - 70: "stale file handle", - 71: "object is remote", - 72: "device not a stream", - 73: "timer expired", - 74: "out of streams resources", - 75: "no message of desired type", - 76: "bad message", - 77: "identifier removed", - 78: "resource deadlock avoided", - 79: "no locks available", - 80: "machine is not on the network", - 81: "unknown error 81", - 82: "link has been severed", - 83: "advertise error", - 84: "srmount error", - 85: "communication error on send", - 86: "protocol error", - 87: "multihop attempted", - 88: "RFS specific error", - 89: "remote address changed", - 90: "function not implemented", - 91: "streams pipe error", - 92: "value too large for defined data type", - 93: "file descriptor in bad state", - 94: "channel number out of range", - 95: "level 2 not synchronized", - 96: "level 3 halted", - 97: "level 3 reset", - 98: "link number out of range", - 99: "protocol driver not attached", - 100: "no CSI structure available", - 101: "level 2 halted", - 102: "invalid exchange", - 103: "invalid request descriptor", - 104: "exchange full", - 105: "no anode", - 106: "invalid request code", - 107: "invalid slot", - 108: "file locking deadlock error", - 109: "bad font file format", - 110: "cannot exec a shared library directly", - 111: "no data available", - 112: "accessing a corrupted shared library", - 113: "package not installed", - 114: "can not access a needed shared library", - 115: "name not unique on network", - 116: "interrupted system call should be restarted", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "invalid or incomplete multibyte or wide character", - 123: "attempting to link in too many shared libraries", - 124: ".lib section in a.out corrupted", - 125: "no medium found", - 126: "wrong medium type", - 127: "operation canceled", - 128: "required key not available", - 129: "key has expired", - 130: "key has been revoked", - 131: "key was rejected by service", - 132: "owner died", - 133: "state not recoverable", - 134: "operation not possible due to RF-kill", - 135: "memory page has hardware error", -} - -// Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "resource lost", - 30: "user defined signal 1", - 31: "user defined signal 2", -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index a08922b98..afdf7c565 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -161,14 +161,6 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x800 - CLOCK_HIGHRES = 0x4 - CLOCK_LEVEL = 0xa - CLOCK_MONOTONIC = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x5 - CLOCK_PROF = 0x2 - CLOCK_REALTIME = 0x3 - CLOCK_THREAD_CPUTIME_ID = 0x2 - CLOCK_VIRTUAL = 0x1 CREAD = 0x80 CS5 = 0x0 CS6 = 0x10 @@ -176,7 +168,6 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTART = 0x11 - CSTATUS = 0x14 CSTOP = 0x13 CSTOPB = 0x40 CSUSP = 0x1a @@ -766,7 +757,9 @@ const ( SIOCDARP = -0x7fdb96e0 SIOCDELMULTI = -0x7fdf96ce SIOCDELRT = -0x7fcf8df5 + SIOCDIPSECONFIG = -0x7ffb9669 SIOCDXARP = -0x7fff9658 + SIOCFIPSECONFIG = -0x7ffb966b SIOCGARP = -0x3fdb96e1 SIOCGDSTINFO = -0x3fff965c SIOCGENADDR = -0x3fdf96ab @@ -828,6 +821,7 @@ const ( SIOCLIFGETND = -0x3f879672 SIOCLIFREMOVEIF = -0x7f879692 SIOCLIFSETND = -0x7f879671 + SIOCLIPSECONFIG = -0x7ffb9668 SIOCLOWER = -0x7fdf96d7 SIOCSARP = -0x7fdb96e2 SIOCSCTPGOPT = -0x3fef9653 @@ -850,6 +844,7 @@ const ( SIOCSIFNETMASK = -0x7fdf96e6 SIOCSIP6ADDRPOLICY = -0x7fff965d SIOCSIPMSFILTER = -0x7ffb964b + SIOCSIPSECONFIG = -0x7ffb966a SIOCSLGETREQ = -0x3fdf96b9 SIOCSLIFADDR = -0x7f879690 SIOCSLIFBRDADDR = -0x7f879684 @@ -956,8 +951,6 @@ const ( SO_VRRP = 0x1017 SO_WROFF = 0x2 TCFLSH = 0x5407 - TCGETA = 0x5401 - TCGETS = 0x540d TCIFLUSH = 0x0 TCIOFLUSH = 0x2 TCOFLUSH = 0x1 @@ -984,14 +977,6 @@ const ( TCP_RTO_MAX = 0x1b TCP_RTO_MIN = 0x1a TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCSETA = 0x5402 - TCSETAF = 0x5404 - TCSETAW = 0x5403 - TCSETS = 0x540e - TCSETSF = 0x5410 - TCSETSW = 0x540f - TCXONC = 0x5406 TIOC = 0x5400 TIOCCBRK = 0x747a TIOCCDTR = 0x7478 @@ -1067,7 +1052,6 @@ const ( VQUIT = 0x1 VREPRINT = 0xc VSTART = 0x8 - VSTATUS = 0x10 VSTOP = 0x9 VSUSP = 0xa VSWTCH = 0x7 @@ -1231,7 +1215,6 @@ const ( SIGFREEZE = syscall.Signal(0x22) SIGHUP = syscall.Signal(0x1) SIGILL = syscall.Signal(0x4) - SIGINFO = syscall.Signal(0x29) SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x16) SIGIOT = syscall.Signal(0x6) @@ -1432,5 +1415,4 @@ var signals = [...]string{ 38: "resource Control Exceeded", 39: "reserved for JVM 1", 40: "reserved for JVM 2", - 41: "information Request", } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 031034a34..a15aaf120 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ee96f78ba..e28b044e9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } @@ -1416,22 +1415,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) sec = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index e52cd0d54..640e85426 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 9863ef99e..933f67bbf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_386.go similarity index 57% rename from vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go rename to vendor/golang.org/x/sys/unix/zsyscall_dragonfly_386.go index 4c7ed08cc..32e46af60 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_386.go @@ -1,7 +1,7 @@ -// mksyscall.pl syscall_linux.go syscall_linux_s390x.go +// mksyscall.pl -l32 -dragonfly syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_386.go // MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT -// +build s390x,linux +// +build 386,dragonfly package unix @@ -14,20 +14,9 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -36,15 +25,8 @@ func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - use(unsafe.Pointer(_p0)) - fd = int(r0) +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -53,9 +35,9 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -64,21 +46,9 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - use(unsafe.Pointer(_p0)) - n = int(r0) +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -87,20 +57,8 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } @@ -109,14 +67,8 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - use(unsafe.Pointer(_p0)) +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } @@ -125,14 +77,9 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - use(unsafe.Pointer(_p0)) +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -141,14 +88,8 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -157,8 +98,8 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } @@ -167,15 +108,8 @@ func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } @@ -184,9 +118,8 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } @@ -195,8 +128,8 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } @@ -205,14 +138,8 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -221,26 +148,15 @@ func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - use(unsafe.Pointer(_p2)) + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -249,14 +165,14 @@ func mount(source string, target string, fstype string, flags uintptr, data *byt // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } @@ -265,9 +181,9 @@ func Acct(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -276,14 +192,9 @@ func Adjtimex(buf *Timex) (state int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -292,14 +203,9 @@ func Chdir(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -308,8 +214,14 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -318,8 +230,14 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -328,9 +246,8 @@ func Close(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -339,8 +256,9 @@ func Dup(oldfd int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -349,9 +267,10 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) +func pipe() (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + r = int(r0) + w = int(r1) if e1 != 0 { err = errnoErr(e1) } @@ -360,9 +279,15 @@ func EpollCreate(size int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) +func extpread(fd int, p []byte, flags int, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EXTPREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(offset), uintptr(offset>>32)) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -371,8 +296,15 @@ func EpollCreate1(flag int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) +func extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EXTPWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(offset), uintptr(offset>>32)) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -381,20 +313,13 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { +func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) @@ -404,8 +329,8 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -414,8 +339,14 @@ func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -424,8 +355,14 @@ func Fchdir(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -434,13 +371,13 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { +func Chmod(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) @@ -450,13 +387,13 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { +func Chown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) @@ -466,9 +403,14 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -477,8 +419,8 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -487,8 +429,9 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -497,8 +440,8 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } @@ -507,26 +450,15 @@ func Fsync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -535,25 +467,8 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -562,8 +477,8 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } @@ -572,9 +487,8 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } @@ -583,35 +497,19 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) - tid = int(r0) +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - sz = int(r0) +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -620,15 +518,8 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - use(unsafe.Pointer(_p0)) - watchdesc = int(r0) +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -637,9 +528,8 @@ func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -648,9 +538,8 @@ func InotifyInit1(flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -659,8 +548,8 @@ func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -669,14 +558,14 @@ func Kill(pid int, sig syscall.Signal) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Klogctl(typ int, buf []byte) (n int, err error) { +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -686,85 +575,41 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - use(unsafe.Pointer(_p0)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -773,57 +618,33 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -832,20 +653,8 @@ func Removexattr(path string, attr string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -854,14 +663,8 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -870,14 +673,9 @@ func Setdomainname(p []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -886,8 +684,8 @@ func Sethostname(p []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -896,29 +694,24 @@ func Setpgid(pid int, pgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } @@ -927,8 +720,9 @@ func Setns(fd int, nstype int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -937,54 +731,14 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setxattr(path string, attr string, data []byte, flags int) (err error) { +func Lchown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { - r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) - n = int64(r0) if e1 != 0 { err = errnoErr(e1) } @@ -993,63 +747,20 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { +func Link(path string, link string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(target) + _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) if e1 != 0 { err = errnoErr(e1) } @@ -1058,8 +769,8 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1068,25 +779,14 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -1095,9 +795,14 @@ func exitThread(code int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -1106,9 +811,14 @@ func readlen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -1117,8 +827,14 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -1127,14 +843,14 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, advice int) (err error) { +func Mlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1143,14 +859,8 @@ func Madvise(b []byte, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1159,14 +869,14 @@ func Mprotect(b []byte, prot int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlock(b []byte) (err error) { +func Mprotect(b []byte, prot int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } @@ -1191,8 +901,8 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1201,8 +911,8 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1211,8 +921,15 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + use(unsafe.Pointer(_p0)) + fd = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1221,15 +938,15 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + use(unsafe.Pointer(_p0)) + val = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1238,8 +955,15 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fadvise(fd int, offset int64, length int64, advice int) (err error) { - _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1248,8 +972,21 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + use(unsafe.Pointer(_p0)) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1258,8 +995,20 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) if e1 != 0 { err = errnoErr(e1) } @@ -1268,8 +1017,14 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatfs(fd int, buf *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -1278,8 +1033,14 @@ func Fstatfs(fd int, buf *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -1288,32 +1049,19 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - euid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + newoffset = int64(int64(r1)<<32 | int64(r0)) + if e1 != 0 { + err = errnoErr(e1) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1322,17 +1070,18 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1341,14 +1090,8 @@ func InotifyInit() (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - use(unsafe.Pointer(_p0)) +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1357,13 +1100,13 @@ func Lchown(path string, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func Setlogin(name string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(name) if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) @@ -1373,8 +1116,8 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1383,15 +1126,8 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } @@ -1400,15 +1136,8 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1417,9 +1146,8 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - off = int64(r0) +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1428,9 +1156,8 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } @@ -1439,9 +1166,8 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { - r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) - written = int(r0) +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } @@ -1450,8 +1176,8 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1460,8 +1186,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1470,8 +1197,8 @@ func Setfsuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1480,8 +1207,8 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1490,8 +1217,14 @@ func Setresgid(rgid int, egid int, sgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -1500,8 +1233,14 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -1510,8 +1249,20 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) if e1 != 0 { err = errnoErr(e1) } @@ -1520,9 +1271,8 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { - r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int64(r0) +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1531,13 +1281,13 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func Truncate(path string, length int64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) @@ -1547,13 +1297,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Statfs(path string, buf *Statfs_t) (err error) { +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) @@ -1563,8 +1321,14 @@ func Statfs(path string, buf *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } @@ -1573,13 +1337,13 @@ func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Truncate(path string, length int64) (err error) { +func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) @@ -1589,19 +1353,15 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1610,8 +1370,9 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1620,14 +1381,8 @@ func Gettimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Utime(path string, buf *Utimbuf) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1636,8 +1391,9 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1646,8 +1402,8 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 78de48dcf..3fa6ff796 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index fade994dc..1a0e528cd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index c28281e83..6e4cf1455 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index a18ba5c88..1872d3230 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index fa92387b1..81ae498a3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -14,7 +14,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -53,18 +53,7 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -87,7 +76,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -109,7 +98,7 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { +func unlinkat(dirfd int, path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -381,6 +370,23 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -572,17 +578,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -751,6 +746,16 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -773,18 +778,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -917,16 +912,6 @@ func Settimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { @@ -1068,6 +1053,22 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1577,33 +1578,6 @@ func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getrlimit(resource int, rlim *rlimit32) (err error) { _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -1642,30 +1616,3 @@ func Time(t *Time_t) (tt Time_t, err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Utime(path string, buf *Utimbuf) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index b34d5c26f..2adb9284a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -14,7 +14,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -53,18 +53,7 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -87,7 +76,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -109,7 +98,7 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { +func unlinkat(dirfd int, path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -381,6 +370,23 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -572,17 +578,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -751,6 +746,16 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -773,18 +778,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -917,16 +912,6 @@ func Settimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { @@ -1068,6 +1053,22 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1221,23 +1222,6 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) if e1 != 0 { @@ -1403,16 +1387,6 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1819,22 +1793,6 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Utime(path string, buf *Utimbuf) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1852,14 +1810,3 @@ func pipe2(p *[2]_C_int, flags int) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 2e5cb3984..ca00ed3df 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -14,7 +14,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -53,18 +53,7 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -87,7 +76,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -109,7 +98,7 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { +func unlinkat(dirfd int, path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -381,6 +370,23 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -572,17 +578,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -751,6 +746,16 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -773,18 +778,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -917,16 +912,6 @@ func Settimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { @@ -1068,6 +1053,22 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1644,25 +1645,9 @@ func Gettimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1743,7 +1728,7 @@ func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_UGETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1759,14 +1744,3 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 0d584cc0d..8eafcebcb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -14,7 +14,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -53,18 +53,7 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -87,7 +76,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -109,7 +98,7 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { +func unlinkat(dirfd int, path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -381,6 +370,23 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -572,17 +578,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -751,6 +746,16 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -773,18 +778,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -917,16 +912,6 @@ func Settimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { @@ -1068,6 +1053,22 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1211,23 +1212,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -1736,6 +1720,17 @@ func Gettimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go deleted file mode 100644 index bf6f3603b..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ /dev/null @@ -1,1814 +0,0 @@ -// mksyscall.pl syscall_linux.go syscall_linux_mips64x.go -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT - -// +build mips64,linux - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - use(unsafe.Pointer(_p0)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - use(unsafe.Pointer(_p0)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - use(unsafe.Pointer(_p2)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - use(unsafe.Pointer(_p0)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - use(unsafe.Pointer(_p0)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { - r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) - n = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, buf *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - euid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, n int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - off = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { - r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) - written = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { - r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, buf *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { - r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) - xaddr = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Utime(path string, buf *Utimbuf) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstat(fd int, st *stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(st)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func lstat(path string, st *stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func stat(path string, st *stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go deleted file mode 100644 index 8c86bd70b..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ /dev/null @@ -1,1814 +0,0 @@ -// mksyscall.pl syscall_linux.go syscall_linux_mips64x.go -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT - -// +build mips64le,linux - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - use(unsafe.Pointer(_p0)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - use(unsafe.Pointer(_p0)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - use(unsafe.Pointer(_p2)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - use(unsafe.Pointer(_p0)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - use(unsafe.Pointer(_p0)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { - r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) - n = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, buf *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - euid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, n int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - off = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { - r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) - written = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { - r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, buf *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { - r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) - xaddr = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Utime(path string, buf *Utimbuf) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstat(fd int, st *stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(st)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func lstat(path string, st *stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func stat(path string, st *stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index f5d488b4a..008a52638 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -14,7 +14,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -53,18 +53,7 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -87,7 +76,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -109,7 +98,7 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { +func unlinkat(dirfd int, path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -381,6 +370,23 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -572,17 +578,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -751,6 +746,16 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -773,18 +778,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -917,16 +912,6 @@ func Settimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { @@ -1068,6 +1053,22 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1211,33 +1212,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -1320,17 +1294,6 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -1393,16 +1356,6 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1827,50 +1780,3 @@ func Time(t *Time_t) (tt Time_t, err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Utime(path string, buf *Utimbuf) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 5183711ec..d91f763af 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -14,7 +14,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { +func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -53,18 +53,7 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { +func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -87,7 +76,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { +func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) if err != nil { @@ -109,7 +98,7 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { +func unlinkat(dirfd int, path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -381,6 +370,23 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -572,17 +578,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -751,6 +746,16 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -773,18 +778,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) +func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -917,16 +912,6 @@ func Settimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { @@ -1068,6 +1053,22 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1211,33 +1212,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -1320,17 +1294,6 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -1393,16 +1356,6 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1827,50 +1780,3 @@ func Time(t *Time_t) (tt Time_t, err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Utime(path string, buf *Utimbuf) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go deleted file mode 100644 index beb83e4fd..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ /dev/null @@ -1,1845 +0,0 @@ -// mksyscall.pl syscall_linux.go syscall_linux_sparc64.go -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT - -// +build sparc64,linux - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - use(unsafe.Pointer(_p0)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - use(unsafe.Pointer(_p0)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - use(unsafe.Pointer(_p2)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - use(unsafe.Pointer(_p0)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - use(unsafe.Pointer(_p0)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { - r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) - n = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, buf *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - euid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit() (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, n int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - off = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { - r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) - written = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { - r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, buf *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { - r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) - xaddr = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Utime(path string, buf *Utimbuf) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index b16e1d0ee..00ca1f9c1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index b63667da9..03f31b973 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index b0d19038d..84dc61cfa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index f91a5b856..02b3528a6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 2e8d59d72..7dc2b7eaf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -222,7 +222,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - use(_p0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index c0ecfc044..95cb1f65f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -10,22 +10,11 @@ import ( "unsafe" ) -//go:cgo_import_dynamic libc_pipe pipe "libc.so" -//go:cgo_import_dynamic libc_getsockname getsockname "libsocket.so" -//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" //go:cgo_import_dynamic libc_getgroups getgroups "libc.so" //go:cgo_import_dynamic libc_setgroups setgroups "libc.so" -//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" -//go:cgo_import_dynamic libc_gethostname gethostname "libc.so" -//go:cgo_import_dynamic libc_utimes utimes "libc.so" -//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" //go:cgo_import_dynamic libc_fcntl fcntl "libc.so" -//go:cgo_import_dynamic libc_futimesat futimesat "libc.so" -//go:cgo_import_dynamic libc_accept accept "libsocket.so" -//go:cgo_import_dynamic libc_recvmsg recvmsg "libsocket.so" -//go:cgo_import_dynamic libc_sendmsg sendmsg "libsocket.so" -//go:cgo_import_dynamic libc_acct acct "libc.so" -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" +//go:cgo_import_dynamic libsocket_accept accept "libsocket.so" +//go:cgo_import_dynamic libsocket_sendmsg sendmsg "libsocket.so" //go:cgo_import_dynamic libc_access access "libc.so" //go:cgo_import_dynamic libc_adjtime adjtime "libc.so" //go:cgo_import_dynamic libc_chdir chdir "libc.so" @@ -33,65 +22,44 @@ import ( //go:cgo_import_dynamic libc_chown chown "libc.so" //go:cgo_import_dynamic libc_chroot chroot "libc.so" //go:cgo_import_dynamic libc_close close "libc.so" -//go:cgo_import_dynamic libc_creat creat "libc.so" //go:cgo_import_dynamic libc_dup dup "libc.so" -//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" //go:cgo_import_dynamic libc_exit exit "libc.so" //go:cgo_import_dynamic libc_fchdir fchdir "libc.so" //go:cgo_import_dynamic libc_fchmod fchmod "libc.so" -//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" //go:cgo_import_dynamic libc_fchown fchown "libc.so" -//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" -//go:cgo_import_dynamic libc_fdatasync fdatasync "libc.so" //go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" //go:cgo_import_dynamic libc_fstat fstat "libc.so" //go:cgo_import_dynamic libc_getdents getdents "libc.so" //go:cgo_import_dynamic libc_getgid getgid "libc.so" //go:cgo_import_dynamic libc_getpid getpid "libc.so" -//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" -//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" //go:cgo_import_dynamic libc_geteuid geteuid "libc.so" //go:cgo_import_dynamic libc_getegid getegid "libc.so" //go:cgo_import_dynamic libc_getppid getppid "libc.so" //go:cgo_import_dynamic libc_getpriority getpriority "libc.so" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" -//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" //go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" //go:cgo_import_dynamic libc_getuid getuid "libc.so" //go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_link link "libc.so" -//go:cgo_import_dynamic libc_listen listen "libsocket.so" +//go:cgo_import_dynamic libsocket_listen listen "libsocket.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so" -//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" -//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" -//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" //go:cgo_import_dynamic libc_mknod mknod "libc.so" -//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" -//go:cgo_import_dynamic libc_mlock mlock "libc.so" -//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" -//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" -//go:cgo_import_dynamic libc_munlock munlock "libc.so" -//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" //go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" //go:cgo_import_dynamic libc_open open "libc.so" -//go:cgo_import_dynamic libc_openat openat "libc.so" //go:cgo_import_dynamic libc_pathconf pathconf "libc.so" -//go:cgo_import_dynamic libc_pause pause "libc.so" //go:cgo_import_dynamic libc_pread pread "libc.so" //go:cgo_import_dynamic libc_pwrite pwrite "libc.so" //go:cgo_import_dynamic libc_read read "libc.so" //go:cgo_import_dynamic libc_readlink readlink "libc.so" //go:cgo_import_dynamic libc_rename rename "libc.so" -//go:cgo_import_dynamic libc_renameat renameat "libc.so" //go:cgo_import_dynamic libc_rmdir rmdir "libc.so" //go:cgo_import_dynamic libc_lseek lseek "libc.so" //go:cgo_import_dynamic libc_setegid setegid "libc.so" //go:cgo_import_dynamic libc_seteuid seteuid "libc.so" //go:cgo_import_dynamic libc_setgid setgid "libc.so" -//go:cgo_import_dynamic libc_sethostname sethostname "libc.so" //go:cgo_import_dynamic libc_setpgid setpgid "libc.so" //go:cgo_import_dynamic libc_setpriority setpriority "libc.so" //go:cgo_import_dynamic libc_setregid setregid "libc.so" @@ -99,51 +67,36 @@ import ( //go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" //go:cgo_import_dynamic libc_setsid setsid "libc.so" //go:cgo_import_dynamic libc_setuid setuid "libc.so" -//go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" +//go:cgo_import_dynamic libsocket_shutdown shutdown "libsocket.so" //go:cgo_import_dynamic libc_stat stat "libc.so" //go:cgo_import_dynamic libc_symlink symlink "libc.so" //go:cgo_import_dynamic libc_sync sync "libc.so" -//go:cgo_import_dynamic libc_times times "libc.so" //go:cgo_import_dynamic libc_truncate truncate "libc.so" //go:cgo_import_dynamic libc_fsync fsync "libc.so" //go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" //go:cgo_import_dynamic libc_umask umask "libc.so" -//go:cgo_import_dynamic libc_uname uname "libc.so" -//go:cgo_import_dynamic libc_umount umount "libc.so" //go:cgo_import_dynamic libc_unlink unlink "libc.so" -//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" -//go:cgo_import_dynamic libc_ustat ustat "libc.so" -//go:cgo_import_dynamic libc_utime utime "libc.so" -//go:cgo_import_dynamic libc_bind bind "libsocket.so" -//go:cgo_import_dynamic libc_connect connect "libsocket.so" +//go:cgo_import_dynamic libc_utimes utimes "libc.so" +//go:cgo_import_dynamic libsocket_bind bind "libsocket.so" +//go:cgo_import_dynamic libsocket_connect connect "libsocket.so" //go:cgo_import_dynamic libc_mmap mmap "libc.so" //go:cgo_import_dynamic libc_munmap munmap "libc.so" -//go:cgo_import_dynamic libc_sendto sendto "libsocket.so" -//go:cgo_import_dynamic libc_socket socket "libsocket.so" -//go:cgo_import_dynamic libc_socketpair socketpair "libsocket.so" +//go:cgo_import_dynamic libsocket_sendto sendto "libsocket.so" +//go:cgo_import_dynamic libsocket_socket socket "libsocket.so" +//go:cgo_import_dynamic libsocket_socketpair socketpair "libsocket.so" //go:cgo_import_dynamic libc_write write "libc.so" -//go:cgo_import_dynamic libc_getsockopt getsockopt "libsocket.so" -//go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" -//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" -//go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" -//go:cgo_import_dynamic libc_sysconf sysconf "libc.so" - -//go:linkname procpipe libc_pipe -//go:linkname procgetsockname libc_getsockname -//go:linkname procGetcwd libc_getcwd +//go:cgo_import_dynamic libsocket_getsockopt getsockopt "libsocket.so" +//go:cgo_import_dynamic libsocket_getpeername getpeername "libsocket.so" +//go:cgo_import_dynamic libsocket_getsockname getsockname "libsocket.so" +//go:cgo_import_dynamic libsocket_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libsocket_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libsocket_recvmsg recvmsg "libsocket.so" + //go:linkname procgetgroups libc_getgroups //go:linkname procsetgroups libc_setgroups -//go:linkname procwait4 libc_wait4 -//go:linkname procgethostname libc_gethostname -//go:linkname procutimes libc_utimes -//go:linkname procutimensat libc_utimensat //go:linkname procfcntl libc_fcntl -//go:linkname procfutimesat libc_futimesat -//go:linkname procaccept libc_accept -//go:linkname procrecvmsg libc_recvmsg -//go:linkname procsendmsg libc_sendmsg -//go:linkname procacct libc_acct -//go:linkname procioctl libc_ioctl +//go:linkname procaccept libsocket_accept +//go:linkname procsendmsg libsocket_sendmsg //go:linkname procAccess libc_access //go:linkname procAdjtime libc_adjtime //go:linkname procChdir libc_chdir @@ -151,65 +104,44 @@ import ( //go:linkname procChown libc_chown //go:linkname procChroot libc_chroot //go:linkname procClose libc_close -//go:linkname procCreat libc_creat //go:linkname procDup libc_dup -//go:linkname procDup2 libc_dup2 //go:linkname procExit libc_exit //go:linkname procFchdir libc_fchdir //go:linkname procFchmod libc_fchmod -//go:linkname procFchmodat libc_fchmodat //go:linkname procFchown libc_fchown -//go:linkname procFchownat libc_fchownat -//go:linkname procFdatasync libc_fdatasync //go:linkname procFpathconf libc_fpathconf //go:linkname procFstat libc_fstat //go:linkname procGetdents libc_getdents //go:linkname procGetgid libc_getgid //go:linkname procGetpid libc_getpid -//go:linkname procGetpgid libc_getpgid -//go:linkname procGetpgrp libc_getpgrp //go:linkname procGeteuid libc_geteuid //go:linkname procGetegid libc_getegid //go:linkname procGetppid libc_getppid //go:linkname procGetpriority libc_getpriority //go:linkname procGetrlimit libc_getrlimit -//go:linkname procGetrusage libc_getrusage //go:linkname procGettimeofday libc_gettimeofday //go:linkname procGetuid libc_getuid //go:linkname procKill libc_kill //go:linkname procLchown libc_lchown //go:linkname procLink libc_link -//go:linkname proclisten libc_listen +//go:linkname proclisten libsocket_listen //go:linkname procLstat libc_lstat //go:linkname procMadvise libc_madvise //go:linkname procMkdir libc_mkdir -//go:linkname procMkdirat libc_mkdirat -//go:linkname procMkfifo libc_mkfifo -//go:linkname procMkfifoat libc_mkfifoat //go:linkname procMknod libc_mknod -//go:linkname procMknodat libc_mknodat -//go:linkname procMlock libc_mlock -//go:linkname procMlockall libc_mlockall -//go:linkname procMprotect libc_mprotect -//go:linkname procMunlock libc_munlock -//go:linkname procMunlockall libc_munlockall //go:linkname procNanosleep libc_nanosleep //go:linkname procOpen libc_open -//go:linkname procOpenat libc_openat //go:linkname procPathconf libc_pathconf -//go:linkname procPause libc_pause //go:linkname procPread libc_pread //go:linkname procPwrite libc_pwrite //go:linkname procread libc_read //go:linkname procReadlink libc_readlink //go:linkname procRename libc_rename -//go:linkname procRenameat libc_renameat //go:linkname procRmdir libc_rmdir //go:linkname proclseek libc_lseek //go:linkname procSetegid libc_setegid //go:linkname procSeteuid libc_seteuid //go:linkname procSetgid libc_setgid -//go:linkname procSethostname libc_sethostname //go:linkname procSetpgid libc_setpgid //go:linkname procSetpriority libc_setpriority //go:linkname procSetregid libc_setregid @@ -217,52 +149,37 @@ import ( //go:linkname procSetrlimit libc_setrlimit //go:linkname procSetsid libc_setsid //go:linkname procSetuid libc_setuid -//go:linkname procshutdown libc_shutdown +//go:linkname procshutdown libsocket_shutdown //go:linkname procStat libc_stat //go:linkname procSymlink libc_symlink //go:linkname procSync libc_sync -//go:linkname procTimes libc_times //go:linkname procTruncate libc_truncate //go:linkname procFsync libc_fsync //go:linkname procFtruncate libc_ftruncate //go:linkname procUmask libc_umask -//go:linkname procUname libc_uname -//go:linkname procumount libc_umount //go:linkname procUnlink libc_unlink -//go:linkname procUnlinkat libc_unlinkat -//go:linkname procUstat libc_ustat -//go:linkname procUtime libc_utime -//go:linkname procbind libc_bind -//go:linkname procconnect libc_connect +//go:linkname procUtimes libc_utimes +//go:linkname procbind libsocket_bind +//go:linkname procconnect libsocket_connect //go:linkname procmmap libc_mmap //go:linkname procmunmap libc_munmap -//go:linkname procsendto libc_sendto -//go:linkname procsocket libc_socket -//go:linkname procsocketpair libc_socketpair +//go:linkname procsendto libsocket_sendto +//go:linkname procsocket libsocket_socket +//go:linkname procsocketpair libsocket_socketpair //go:linkname procwrite libc_write -//go:linkname procgetsockopt libc_getsockopt -//go:linkname procgetpeername libc_getpeername -//go:linkname procsetsockopt libc_setsockopt -//go:linkname procrecvfrom libc_recvfrom -//go:linkname procsysconf libc_sysconf +//go:linkname procgetsockopt libsocket_getsockopt +//go:linkname procgetpeername libsocket_getpeername +//go:linkname procgetsockname libsocket_getsockname +//go:linkname procsetsockopt libsocket_setsockopt +//go:linkname procrecvfrom libsocket_recvfrom +//go:linkname procrecvmsg libsocket_recvmsg var ( - procpipe, - procgetsockname, - procGetcwd, procgetgroups, procsetgroups, - procwait4, - procgethostname, - procutimes, - procutimensat, procfcntl, - procfutimesat, procaccept, - procrecvmsg, procsendmsg, - procacct, - procioctl, procAccess, procAdjtime, procChdir, @@ -270,29 +187,21 @@ var ( procChown, procChroot, procClose, - procCreat, procDup, - procDup2, procExit, procFchdir, procFchmod, - procFchmodat, procFchown, - procFchownat, - procFdatasync, procFpathconf, procFstat, procGetdents, procGetgid, procGetpid, - procGetpgid, - procGetpgrp, procGeteuid, procGetegid, procGetppid, procGetpriority, procGetrlimit, - procGetrusage, procGettimeofday, procGetuid, procKill, @@ -302,33 +211,20 @@ var ( procLstat, procMadvise, procMkdir, - procMkdirat, - procMkfifo, - procMkfifoat, procMknod, - procMknodat, - procMlock, - procMlockall, - procMprotect, - procMunlock, - procMunlockall, procNanosleep, procOpen, - procOpenat, procPathconf, - procPause, procPread, procPwrite, procread, procReadlink, procRename, - procRenameat, procRmdir, proclseek, procSetegid, procSeteuid, procSetgid, - procSethostname, procSetpgid, procSetpriority, procSetregid, @@ -340,17 +236,12 @@ var ( procStat, procSymlink, procSync, - procTimes, procTruncate, procFsync, procFtruncate, procUmask, - procUname, - procumount, procUnlink, - procUnlinkat, - procUstat, - procUtime, + procUtimes, procbind, procconnect, procmmap, @@ -361,41 +252,12 @@ var ( procwrite, procgetsockopt, procgetpeername, + procgetsockname, procsetsockopt, procrecvfrom, - procsysconf syscallFunc + procrecvmsg syscallFunc ) -func pipe(p *[2]_C_int) (n int, err error) { - r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -func Getcwd(buf []byte) (n int, err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetcwd)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - func getgroups(ngid int, gid *_Gid_t) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) n = int(r0) @@ -413,56 +275,6 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { return } -func wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(statusp)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int32(r0) - if e1 != 0 { - err = e1 - } - return -} - -func gethostname(buf []byte) (n int, err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = e1 - } - return -} - -func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimensat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = e1 - } - return -} - func fcntl(fd int, cmd int, arg int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) val = int(r0) @@ -472,14 +284,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } -func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) fd = int(r0) @@ -489,15 +293,6 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) @@ -507,22 +302,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -func acct(path *byte) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -func ioctl(fd int, req int, arg uintptr) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -609,21 +388,6 @@ func Close(fd int) (err error) { return } -func Creat(path string, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procCreat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) - use(unsafe.Pointer(_p0)) - fd = int(r0) - if e1 != 0 { - err = e1 - } - return -} - func Dup(fd int) (nfd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0) nfd = int(r0) @@ -633,14 +397,6 @@ func Dup(fd int) (nfd int, err error) { return } -func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - func Exit(code int) { sysvicall6(uintptr(unsafe.Pointer(&procExit)), 1, uintptr(code), 0, 0, 0, 0, 0) return @@ -662,20 +418,6 @@ func Fchmod(fd int, mode uint32) (err error) { return } -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = e1 - } - return -} - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { @@ -684,28 +426,6 @@ func Fchown(fd int, uid int, gid int) (err error) { return } -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = e1 - } - return -} - -func Fdatasync(fd int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - func Fpathconf(fd int, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) val = int(r0) @@ -748,24 +468,6 @@ func Getpid() (pid int) { return } -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) - pgid = int(r0) - if e1 != 0 { - err = e1 - } - return -} - -func Getpgrp() (pgid int, err error) { - r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0) - pgid = int(r0) - if e1 != 0 { - err = e1 - } - return -} - func Geteuid() (euid int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGeteuid)), 0, 0, 0, 0, 0, 0, 0) euid = int(r0) @@ -801,14 +503,6 @@ func Getrlimit(which int, lim *Rlimit) (err error) { return } -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -913,48 +607,6 @@ func Mkdir(path string, mode uint32) (err error) { return } -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = e1 - } - return -} - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifo)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = e1 - } - return -} - -func Mkfifoat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifoat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = e1 - } - return -} - func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -969,72 +621,6 @@ func Mknod(path string, mode uint32, dev int) (err error) { return } -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = e1 - } - return -} - -func Mlock(b []byte) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -func Mlockall(flags int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -func Mprotect(b []byte, prot int) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMprotect)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(prot), 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -func Munlock(b []byte) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -func Munlockall() (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) if e1 != 0 { @@ -1058,21 +644,6 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } -func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - use(unsafe.Pointer(_p0)) - fd = int(r0) - if e1 != 0 { - err = e1 - } - return -} - func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1088,14 +659,6 @@ func Pathconf(path string, name int) (val int, err error) { return } -func Pause() (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1174,26 +737,6 @@ func Rename(from string, to string) (err error) { return } -func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRenameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) - use(unsafe.Pointer(_p0)) - use(unsafe.Pointer(_p1)) - if e1 != 0 { - err = e1 - } - return -} - func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1241,18 +784,6 @@ func Setgid(gid int) (err error) { return } -func Sethostname(p []byte) (err error) { - var _p0 *byte - if len(p) > 0 { - _p0 = &p[0] - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - func Setpgid(pid int, pgid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) if e1 != 0 { @@ -1360,15 +891,6 @@ func Sync() (err error) { return } -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = e1 - } - return -} - func Truncate(path string, length int64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1399,34 +921,12 @@ func Ftruncate(fd int, length int64) (err error) { return } -func Umask(mask int) (oldmask int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procUmask)), 1, uintptr(mask), 0, 0, 0, 0, 0) +func Umask(newmask int) (oldmask int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procUmask)), 1, uintptr(newmask), 0, 0, 0, 0, 0) oldmask = int(r0) return } -func Uname(buf *Utsname) (err error) { - _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procumount)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = e1 - } - return -} - func Unlink(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1441,35 +941,13 @@ func Unlink(path string) (err error) { return } -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0) - use(unsafe.Pointer(_p0)) - if e1 != 0 { - err = e1 - } - return -} - -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -func Utime(path string, buf *Utimbuf) (err error) { +func Utimes(path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtime)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0) + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = e1 @@ -1568,6 +1046,14 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { return } +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { @@ -1589,9 +1075,9 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } -func sysconf(name int) (n int64, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsysconf)), 1, uintptr(name), 0, 0, 0, 0, 0) - n = int64(r0) +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + n = int(r0) if e1 != 0 { err = e1 } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 26677ebbf..39537d210 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -1,4 +1,4 @@ -// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS8.4.sdk/usr/include/sys/syscall.h +// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/usr/include/sys/syscall.h // MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT // +build arm64,darwin diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_386.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_386.go new file mode 100644 index 000000000..785240a75 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_386.go @@ -0,0 +1,304 @@ +// mksysnum_dragonfly.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +// +build 386,dragonfly + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void exit(int rval); } + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_GETFSSTAT = 18 // { int getfsstat(struct statfs *buf, long bufsize, \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, caddr_t msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, \ + SYS_ACCEPT = 30 // { int accept(int s, caddr_t name, int *anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, caddr_t asa, int *alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, caddr_t asa, int *alen); } + SYS_ACCESS = 33 // { int access(char *path, int flags); } + SYS_CHFLAGS = 34 // { int chflags(char *path, int flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, int flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { int readlink(char *path, char *buf, int count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { pid_t vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_STATFS = 157 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 158 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_GETDOMAINNAME = 162 // { int getdomainname(char *domainname, int len); } + SYS_SETDOMAINNAME = 163 // { int setdomainname(char *domainname, int len); } + SYS_UNAME = 164 // { int uname(struct utsname *name); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_EXTPREAD = 173 // { ssize_t extpread(int fd, void *buf, \ + SYS_EXTPWRITE = 174 // { ssize_t extpwrite(int fd, const void *buf, \ + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_MMAP = 197 // { caddr_t mmap(caddr_t addr, size_t len, int prot, \ + // SYS_NOSYS = 198; // { int nosys(void); } __syscall __syscall_args int + SYS_LSEEK = 199 // { off_t lseek(int fd, int pad, off_t offset, \ + SYS_TRUNCATE = 200 // { int truncate(char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int ftruncate(int fd, int pad, off_t length); } + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS___SEMCTL = 220 // { int __semctl(int semid, int semnum, int cmd, \ + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ + SYS_MSGCTL = 224 // { int msgctl(int msqid, int cmd, \ + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, void *msgp, size_t msgsz, \ + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, \ + SYS_SHMAT = 228 // { caddr_t shmat(int shmid, const void *shmaddr, \ + SYS_SHMCTL = 229 // { int shmctl(int shmid, int cmd, \ + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } + SYS_EXTPREADV = 289 // { ssize_t extpreadv(int fd, struct iovec *iovp, \ + SYS_EXTPWRITEV = 290 // { ssize_t extpwritev(int fd, struct iovec *iovp,\ + SYS_FHSTATFS = 297 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_AIO_READ = 318 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 319 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 320 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(u_char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGACTION = 342 // { int sigaction(int sig, const struct sigaction *act, \ + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGRETURN = 344 // { int sigreturn(ucontext_t *sigcntxp); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set,\ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set,\ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { int extattr_set_file(const char *path, \ + SYS_EXTATTR_GET_FILE = 357 // { int extattr_get_file(const char *path, \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_SCTP_PEELOFF = 364 // { int sctp_peeloff(int sd, caddr_t name ); } + SYS_LCHFLAGS = 391 // { int lchflags(char *path, int flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, \ + SYS_VARSYM_SET = 450 // { int varsym_set(int level, const char *name, const char *data); } + SYS_VARSYM_GET = 451 // { int varsym_get(int mask, const char *wild, char *buf, int bufsize); } + SYS_VARSYM_LIST = 452 // { int varsym_list(int level, char *buf, int maxsize, int *marker); } + SYS_EXEC_SYS_REGISTER = 465 // { int exec_sys_register(void *entry); } + SYS_EXEC_SYS_UNREGISTER = 466 // { int exec_sys_unregister(int id); } + SYS_SYS_CHECKPOINT = 467 // { int sys_checkpoint(int type, int fd, pid_t pid, int retval); } + SYS_MOUNTCTL = 468 // { int mountctl(const char *path, int op, int fd, const void *ctl, int ctllen, void *buf, int buflen); } + SYS_UMTX_SLEEP = 469 // { int umtx_sleep(volatile const int *ptr, int value, int timeout); } + SYS_UMTX_WAKEUP = 470 // { int umtx_wakeup(volatile const int *ptr, int count); } + SYS_JAIL_ATTACH = 471 // { int jail_attach(int jid); } + SYS_SET_TLS_AREA = 472 // { int set_tls_area(int which, struct tls_info *info, size_t infosize); } + SYS_GET_TLS_AREA = 473 // { int get_tls_area(int which, struct tls_info *info, size_t infosize); } + SYS_CLOSEFROM = 474 // { int closefrom(int fd); } + SYS_STAT = 475 // { int stat(const char *path, struct stat *ub); } + SYS_FSTAT = 476 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 477 // { int lstat(const char *path, struct stat *ub); } + SYS_FHSTAT = 478 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 479 // { int getdirentries(int fd, char *buf, u_int count, \ + SYS_GETDENTS = 480 // { int getdents(int fd, char *buf, size_t count); } + SYS_USCHED_SET = 481 // { int usched_set(pid_t pid, int cmd, void *data, \ + SYS_EXTACCEPT = 482 // { int extaccept(int s, int flags, caddr_t name, int *anamelen); } + SYS_EXTCONNECT = 483 // { int extconnect(int s, int flags, caddr_t name, int namelen); } + SYS_MCONTROL = 485 // { int mcontrol(void *addr, size_t len, int behav, off_t value); } + SYS_VMSPACE_CREATE = 486 // { int vmspace_create(void *id, int type, void *data); } + SYS_VMSPACE_DESTROY = 487 // { int vmspace_destroy(void *id); } + SYS_VMSPACE_CTL = 488 // { int vmspace_ctl(void *id, int cmd, \ + SYS_VMSPACE_MMAP = 489 // { int vmspace_mmap(void *id, void *addr, size_t len, \ + SYS_VMSPACE_MUNMAP = 490 // { int vmspace_munmap(void *id, void *addr, \ + SYS_VMSPACE_MCONTROL = 491 // { int vmspace_mcontrol(void *id, void *addr, \ + SYS_VMSPACE_PREAD = 492 // { ssize_t vmspace_pread(void *id, void *buf, \ + SYS_VMSPACE_PWRITE = 493 // { ssize_t vmspace_pwrite(void *id, const void *buf, \ + SYS_EXTEXIT = 494 // { void extexit(int how, int status, void *addr); } + SYS_LWP_CREATE = 495 // { int lwp_create(struct lwp_params *params); } + SYS_LWP_GETTID = 496 // { lwpid_t lwp_gettid(void); } + SYS_LWP_KILL = 497 // { int lwp_kill(pid_t pid, lwpid_t tid, int signum); } + SYS_LWP_RTPRIO = 498 // { int lwp_rtprio(int function, pid_t pid, lwpid_t tid, struct rtprio *rtp); } + SYS_PSELECT = 499 // { int pselect(int nd, fd_set *in, fd_set *ou, \ + SYS_STATVFS = 500 // { int statvfs(const char *path, struct statvfs *buf); } + SYS_FSTATVFS = 501 // { int fstatvfs(int fd, struct statvfs *buf); } + SYS_FHSTATVFS = 502 // { int fhstatvfs(const struct fhandle *u_fhp, struct statvfs *buf); } + SYS_GETVFSSTAT = 503 // { int getvfsstat(struct statfs *buf, \ + SYS_OPENAT = 504 // { int openat(int fd, char *path, int flags, int mode); } + SYS_FSTATAT = 505 // { int fstatat(int fd, char *path, \ + SYS_FCHMODAT = 506 // { int fchmodat(int fd, char *path, int mode, \ + SYS_FCHOWNAT = 507 // { int fchownat(int fd, char *path, int uid, int gid, \ + SYS_UNLINKAT = 508 // { int unlinkat(int fd, char *path, int flags); } + SYS_FACCESSAT = 509 // { int faccessat(int fd, char *path, int amode, \ + SYS_MQ_OPEN = 510 // { mqd_t mq_open(const char * name, int oflag, \ + SYS_MQ_CLOSE = 511 // { int mq_close(mqd_t mqdes); } + SYS_MQ_UNLINK = 512 // { int mq_unlink(const char *name); } + SYS_MQ_GETATTR = 513 // { int mq_getattr(mqd_t mqdes, \ + SYS_MQ_SETATTR = 514 // { int mq_setattr(mqd_t mqdes, \ + SYS_MQ_NOTIFY = 515 // { int mq_notify(mqd_t mqdes, \ + SYS_MQ_SEND = 516 // { int mq_send(mqd_t mqdes, const char *msg_ptr, \ + SYS_MQ_RECEIVE = 517 // { ssize_t mq_receive(mqd_t mqdes, char *msg_ptr, \ + SYS_MQ_TIMEDSEND = 518 // { int mq_timedsend(mqd_t mqdes, \ + SYS_MQ_TIMEDRECEIVE = 519 // { ssize_t mq_timedreceive(mqd_t mqdes, \ + SYS_IOPRIO_SET = 520 // { int ioprio_set(int which, int who, int prio); } + SYS_IOPRIO_GET = 521 // { int ioprio_get(int which, int who); } + SYS_CHROOT_KERNEL = 522 // { int chroot_kernel(char *path); } + SYS_RENAMEAT = 523 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_MKDIRAT = 524 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 525 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 526 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_READLINKAT = 527 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_SYMLINKAT = 528 // { int symlinkat(char *path1, int fd, char *path2); } + SYS_SWAPOFF = 529 // { int swapoff(char *name); } + SYS_VQUOTACTL = 530 // { int vquotactl(const char *path, \ + SYS_LINKAT = 531 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_EACCESS = 532 // { int eaccess(char *path, int flags); } + SYS_LPATHCONF = 533 // { int lpathconf(char *path, int name); } + SYS_VMM_GUEST_CTL = 534 // { int vmm_guest_ctl(int op, struct vmm_guest_options *options); } + SYS_VMM_GUEST_SYNC_ADDR = 535 // { int vmm_guest_sync_addr(long *dstaddr, long *srcaddr); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go deleted file mode 100644 index 5ffe1c719..000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ /dev/null @@ -1,327 +0,0 @@ -// mksysnum_linux.pl /usr/include/asm/unistd.h -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT - -// +build mips64,linux - -package unix - -const ( - SYS_READ = 5000 - SYS_WRITE = 5001 - SYS_OPEN = 5002 - SYS_CLOSE = 5003 - SYS_STAT = 5004 - SYS_FSTAT = 5005 - SYS_LSTAT = 5006 - SYS_POLL = 5007 - SYS_LSEEK = 5008 - SYS_MMAP = 5009 - SYS_MPROTECT = 5010 - SYS_MUNMAP = 5011 - SYS_BRK = 5012 - SYS_RT_SIGACTION = 5013 - SYS_RT_SIGPROCMASK = 5014 - SYS_IOCTL = 5015 - SYS_PREAD64 = 5016 - SYS_PWRITE64 = 5017 - SYS_READV = 5018 - SYS_WRITEV = 5019 - SYS_ACCESS = 5020 - SYS_PIPE = 5021 - SYS__NEWSELECT = 5022 - SYS_SCHED_YIELD = 5023 - SYS_MREMAP = 5024 - SYS_MSYNC = 5025 - SYS_MINCORE = 5026 - SYS_MADVISE = 5027 - SYS_SHMGET = 5028 - SYS_SHMAT = 5029 - SYS_SHMCTL = 5030 - SYS_DUP = 5031 - SYS_DUP2 = 5032 - SYS_PAUSE = 5033 - SYS_NANOSLEEP = 5034 - SYS_GETITIMER = 5035 - SYS_SETITIMER = 5036 - SYS_ALARM = 5037 - SYS_GETPID = 5038 - SYS_SENDFILE = 5039 - SYS_SOCKET = 5040 - SYS_CONNECT = 5041 - SYS_ACCEPT = 5042 - SYS_SENDTO = 5043 - SYS_RECVFROM = 5044 - SYS_SENDMSG = 5045 - SYS_RECVMSG = 5046 - SYS_SHUTDOWN = 5047 - SYS_BIND = 5048 - SYS_LISTEN = 5049 - SYS_GETSOCKNAME = 5050 - SYS_GETPEERNAME = 5051 - SYS_SOCKETPAIR = 5052 - SYS_SETSOCKOPT = 5053 - SYS_GETSOCKOPT = 5054 - SYS_CLONE = 5055 - SYS_FORK = 5056 - SYS_EXECVE = 5057 - SYS_EXIT = 5058 - SYS_WAIT4 = 5059 - SYS_KILL = 5060 - SYS_UNAME = 5061 - SYS_SEMGET = 5062 - SYS_SEMOP = 5063 - SYS_SEMCTL = 5064 - SYS_SHMDT = 5065 - SYS_MSGGET = 5066 - SYS_MSGSND = 5067 - SYS_MSGRCV = 5068 - SYS_MSGCTL = 5069 - SYS_FCNTL = 5070 - SYS_FLOCK = 5071 - SYS_FSYNC = 5072 - SYS_FDATASYNC = 5073 - SYS_TRUNCATE = 5074 - SYS_FTRUNCATE = 5075 - SYS_GETDENTS = 5076 - SYS_GETCWD = 5077 - SYS_CHDIR = 5078 - SYS_FCHDIR = 5079 - SYS_RENAME = 5080 - SYS_MKDIR = 5081 - SYS_RMDIR = 5082 - SYS_CREAT = 5083 - SYS_LINK = 5084 - SYS_UNLINK = 5085 - SYS_SYMLINK = 5086 - SYS_READLINK = 5087 - SYS_CHMOD = 5088 - SYS_FCHMOD = 5089 - SYS_CHOWN = 5090 - SYS_FCHOWN = 5091 - SYS_LCHOWN = 5092 - SYS_UMASK = 5093 - SYS_GETTIMEOFDAY = 5094 - SYS_GETRLIMIT = 5095 - SYS_GETRUSAGE = 5096 - SYS_SYSINFO = 5097 - SYS_TIMES = 5098 - SYS_PTRACE = 5099 - SYS_GETUID = 5100 - SYS_SYSLOG = 5101 - SYS_GETGID = 5102 - SYS_SETUID = 5103 - SYS_SETGID = 5104 - SYS_GETEUID = 5105 - SYS_GETEGID = 5106 - SYS_SETPGID = 5107 - SYS_GETPPID = 5108 - SYS_GETPGRP = 5109 - SYS_SETSID = 5110 - SYS_SETREUID = 5111 - SYS_SETREGID = 5112 - SYS_GETGROUPS = 5113 - SYS_SETGROUPS = 5114 - SYS_SETRESUID = 5115 - SYS_GETRESUID = 5116 - SYS_SETRESGID = 5117 - SYS_GETRESGID = 5118 - SYS_GETPGID = 5119 - SYS_SETFSUID = 5120 - SYS_SETFSGID = 5121 - SYS_GETSID = 5122 - SYS_CAPGET = 5123 - SYS_CAPSET = 5124 - SYS_RT_SIGPENDING = 5125 - SYS_RT_SIGTIMEDWAIT = 5126 - SYS_RT_SIGQUEUEINFO = 5127 - SYS_RT_SIGSUSPEND = 5128 - SYS_SIGALTSTACK = 5129 - SYS_UTIME = 5130 - SYS_MKNOD = 5131 - SYS_PERSONALITY = 5132 - SYS_USTAT = 5133 - SYS_STATFS = 5134 - SYS_FSTATFS = 5135 - SYS_SYSFS = 5136 - SYS_GETPRIORITY = 5137 - SYS_SETPRIORITY = 5138 - SYS_SCHED_SETPARAM = 5139 - SYS_SCHED_GETPARAM = 5140 - SYS_SCHED_SETSCHEDULER = 5141 - SYS_SCHED_GETSCHEDULER = 5142 - SYS_SCHED_GET_PRIORITY_MAX = 5143 - SYS_SCHED_GET_PRIORITY_MIN = 5144 - SYS_SCHED_RR_GET_INTERVAL = 5145 - SYS_MLOCK = 5146 - SYS_MUNLOCK = 5147 - SYS_MLOCKALL = 5148 - SYS_MUNLOCKALL = 5149 - SYS_VHANGUP = 5150 - SYS_PIVOT_ROOT = 5151 - SYS__SYSCTL = 5152 - SYS_PRCTL = 5153 - SYS_ADJTIMEX = 5154 - SYS_SETRLIMIT = 5155 - SYS_CHROOT = 5156 - SYS_SYNC = 5157 - SYS_ACCT = 5158 - SYS_SETTIMEOFDAY = 5159 - SYS_MOUNT = 5160 - SYS_UMOUNT2 = 5161 - SYS_SWAPON = 5162 - SYS_SWAPOFF = 5163 - SYS_REBOOT = 5164 - SYS_SETHOSTNAME = 5165 - SYS_SETDOMAINNAME = 5166 - SYS_CREATE_MODULE = 5167 - SYS_INIT_MODULE = 5168 - SYS_DELETE_MODULE = 5169 - SYS_GET_KERNEL_SYMS = 5170 - SYS_QUERY_MODULE = 5171 - SYS_QUOTACTL = 5172 - SYS_NFSSERVCTL = 5173 - SYS_GETPMSG = 5174 - SYS_PUTPMSG = 5175 - SYS_AFS_SYSCALL = 5176 - SYS_RESERVED177 = 5177 - SYS_GETTID = 5178 - SYS_READAHEAD = 5179 - SYS_SETXATTR = 5180 - SYS_LSETXATTR = 5181 - SYS_FSETXATTR = 5182 - SYS_GETXATTR = 5183 - SYS_LGETXATTR = 5184 - SYS_FGETXATTR = 5185 - SYS_LISTXATTR = 5186 - SYS_LLISTXATTR = 5187 - SYS_FLISTXATTR = 5188 - SYS_REMOVEXATTR = 5189 - SYS_LREMOVEXATTR = 5190 - SYS_FREMOVEXATTR = 5191 - SYS_TKILL = 5192 - SYS_RESERVED193 = 5193 - SYS_FUTEX = 5194 - SYS_SCHED_SETAFFINITY = 5195 - SYS_SCHED_GETAFFINITY = 5196 - SYS_CACHEFLUSH = 5197 - SYS_CACHECTL = 5198 - SYS_SYSMIPS = 5199 - SYS_IO_SETUP = 5200 - SYS_IO_DESTROY = 5201 - SYS_IO_GETEVENTS = 5202 - SYS_IO_SUBMIT = 5203 - SYS_IO_CANCEL = 5204 - SYS_EXIT_GROUP = 5205 - SYS_LOOKUP_DCOOKIE = 5206 - SYS_EPOLL_CREATE = 5207 - SYS_EPOLL_CTL = 5208 - SYS_EPOLL_WAIT = 5209 - SYS_REMAP_FILE_PAGES = 5210 - SYS_RT_SIGRETURN = 5211 - SYS_SET_TID_ADDRESS = 5212 - SYS_RESTART_SYSCALL = 5213 - SYS_SEMTIMEDOP = 5214 - SYS_FADVISE64 = 5215 - SYS_TIMER_CREATE = 5216 - SYS_TIMER_SETTIME = 5217 - SYS_TIMER_GETTIME = 5218 - SYS_TIMER_GETOVERRUN = 5219 - SYS_TIMER_DELETE = 5220 - SYS_CLOCK_SETTIME = 5221 - SYS_CLOCK_GETTIME = 5222 - SYS_CLOCK_GETRES = 5223 - SYS_CLOCK_NANOSLEEP = 5224 - SYS_TGKILL = 5225 - SYS_UTIMES = 5226 - SYS_MBIND = 5227 - SYS_GET_MEMPOLICY = 5228 - SYS_SET_MEMPOLICY = 5229 - SYS_MQ_OPEN = 5230 - SYS_MQ_UNLINK = 5231 - SYS_MQ_TIMEDSEND = 5232 - SYS_MQ_TIMEDRECEIVE = 5233 - SYS_MQ_NOTIFY = 5234 - SYS_MQ_GETSETATTR = 5235 - SYS_VSERVER = 5236 - SYS_WAITID = 5237 - SYS_ADD_KEY = 5239 - SYS_REQUEST_KEY = 5240 - SYS_KEYCTL = 5241 - SYS_SET_THREAD_AREA = 5242 - SYS_INOTIFY_INIT = 5243 - SYS_INOTIFY_ADD_WATCH = 5244 - SYS_INOTIFY_RM_WATCH = 5245 - SYS_MIGRATE_PAGES = 5246 - SYS_OPENAT = 5247 - SYS_MKDIRAT = 5248 - SYS_MKNODAT = 5249 - SYS_FCHOWNAT = 5250 - SYS_FUTIMESAT = 5251 - SYS_NEWFSTATAT = 5252 - SYS_UNLINKAT = 5253 - SYS_RENAMEAT = 5254 - SYS_LINKAT = 5255 - SYS_SYMLINKAT = 5256 - SYS_READLINKAT = 5257 - SYS_FCHMODAT = 5258 - SYS_FACCESSAT = 5259 - SYS_PSELECT6 = 5260 - SYS_PPOLL = 5261 - SYS_UNSHARE = 5262 - SYS_SPLICE = 5263 - SYS_SYNC_FILE_RANGE = 5264 - SYS_TEE = 5265 - SYS_VMSPLICE = 5266 - SYS_MOVE_PAGES = 5267 - SYS_SET_ROBUST_LIST = 5268 - SYS_GET_ROBUST_LIST = 5269 - SYS_KEXEC_LOAD = 5270 - SYS_GETCPU = 5271 - SYS_EPOLL_PWAIT = 5272 - SYS_IOPRIO_SET = 5273 - SYS_IOPRIO_GET = 5274 - SYS_UTIMENSAT = 5275 - SYS_SIGNALFD = 5276 - SYS_TIMERFD = 5277 - SYS_EVENTFD = 5278 - SYS_FALLOCATE = 5279 - SYS_TIMERFD_CREATE = 5280 - SYS_TIMERFD_GETTIME = 5281 - SYS_TIMERFD_SETTIME = 5282 - SYS_SIGNALFD4 = 5283 - SYS_EVENTFD2 = 5284 - SYS_EPOLL_CREATE1 = 5285 - SYS_DUP3 = 5286 - SYS_PIPE2 = 5287 - SYS_INOTIFY_INIT1 = 5288 - SYS_PREADV = 5289 - SYS_PWRITEV = 5290 - SYS_RT_TGSIGQUEUEINFO = 5291 - SYS_PERF_EVENT_OPEN = 5292 - SYS_ACCEPT4 = 5293 - SYS_RECVMMSG = 5294 - SYS_FANOTIFY_INIT = 5295 - SYS_FANOTIFY_MARK = 5296 - SYS_PRLIMIT64 = 5297 - SYS_NAME_TO_HANDLE_AT = 5298 - SYS_OPEN_BY_HANDLE_AT = 5299 - SYS_CLOCK_ADJTIME = 5300 - SYS_SYNCFS = 5301 - SYS_SENDMMSG = 5302 - SYS_SETNS = 5303 - SYS_PROCESS_VM_READV = 5304 - SYS_PROCESS_VM_WRITEV = 5305 - SYS_KCMP = 5306 - SYS_FINIT_MODULE = 5307 - SYS_GETDENTS64 = 5308 - SYS_SCHED_SETATTR = 5309 - SYS_SCHED_GETATTR = 5310 - SYS_RENAMEAT2 = 5311 - SYS_SECCOMP = 5312 - SYS_GETRANDOM = 5313 - SYS_MEMFD_CREATE = 5314 - SYS_BPF = 5315 - SYS_EXECVEAT = 5316 - SYS_USERFAULTFD = 5317 - SYS_MEMBARRIER = 5318 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go deleted file mode 100644 index d192b940c..000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ /dev/null @@ -1,327 +0,0 @@ -// mksysnum_linux.pl /usr/include/asm/unistd.h -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT - -// +build mips64le,linux - -package unix - -const ( - SYS_READ = 5000 - SYS_WRITE = 5001 - SYS_OPEN = 5002 - SYS_CLOSE = 5003 - SYS_STAT = 5004 - SYS_FSTAT = 5005 - SYS_LSTAT = 5006 - SYS_POLL = 5007 - SYS_LSEEK = 5008 - SYS_MMAP = 5009 - SYS_MPROTECT = 5010 - SYS_MUNMAP = 5011 - SYS_BRK = 5012 - SYS_RT_SIGACTION = 5013 - SYS_RT_SIGPROCMASK = 5014 - SYS_IOCTL = 5015 - SYS_PREAD64 = 5016 - SYS_PWRITE64 = 5017 - SYS_READV = 5018 - SYS_WRITEV = 5019 - SYS_ACCESS = 5020 - SYS_PIPE = 5021 - SYS__NEWSELECT = 5022 - SYS_SCHED_YIELD = 5023 - SYS_MREMAP = 5024 - SYS_MSYNC = 5025 - SYS_MINCORE = 5026 - SYS_MADVISE = 5027 - SYS_SHMGET = 5028 - SYS_SHMAT = 5029 - SYS_SHMCTL = 5030 - SYS_DUP = 5031 - SYS_DUP2 = 5032 - SYS_PAUSE = 5033 - SYS_NANOSLEEP = 5034 - SYS_GETITIMER = 5035 - SYS_SETITIMER = 5036 - SYS_ALARM = 5037 - SYS_GETPID = 5038 - SYS_SENDFILE = 5039 - SYS_SOCKET = 5040 - SYS_CONNECT = 5041 - SYS_ACCEPT = 5042 - SYS_SENDTO = 5043 - SYS_RECVFROM = 5044 - SYS_SENDMSG = 5045 - SYS_RECVMSG = 5046 - SYS_SHUTDOWN = 5047 - SYS_BIND = 5048 - SYS_LISTEN = 5049 - SYS_GETSOCKNAME = 5050 - SYS_GETPEERNAME = 5051 - SYS_SOCKETPAIR = 5052 - SYS_SETSOCKOPT = 5053 - SYS_GETSOCKOPT = 5054 - SYS_CLONE = 5055 - SYS_FORK = 5056 - SYS_EXECVE = 5057 - SYS_EXIT = 5058 - SYS_WAIT4 = 5059 - SYS_KILL = 5060 - SYS_UNAME = 5061 - SYS_SEMGET = 5062 - SYS_SEMOP = 5063 - SYS_SEMCTL = 5064 - SYS_SHMDT = 5065 - SYS_MSGGET = 5066 - SYS_MSGSND = 5067 - SYS_MSGRCV = 5068 - SYS_MSGCTL = 5069 - SYS_FCNTL = 5070 - SYS_FLOCK = 5071 - SYS_FSYNC = 5072 - SYS_FDATASYNC = 5073 - SYS_TRUNCATE = 5074 - SYS_FTRUNCATE = 5075 - SYS_GETDENTS = 5076 - SYS_GETCWD = 5077 - SYS_CHDIR = 5078 - SYS_FCHDIR = 5079 - SYS_RENAME = 5080 - SYS_MKDIR = 5081 - SYS_RMDIR = 5082 - SYS_CREAT = 5083 - SYS_LINK = 5084 - SYS_UNLINK = 5085 - SYS_SYMLINK = 5086 - SYS_READLINK = 5087 - SYS_CHMOD = 5088 - SYS_FCHMOD = 5089 - SYS_CHOWN = 5090 - SYS_FCHOWN = 5091 - SYS_LCHOWN = 5092 - SYS_UMASK = 5093 - SYS_GETTIMEOFDAY = 5094 - SYS_GETRLIMIT = 5095 - SYS_GETRUSAGE = 5096 - SYS_SYSINFO = 5097 - SYS_TIMES = 5098 - SYS_PTRACE = 5099 - SYS_GETUID = 5100 - SYS_SYSLOG = 5101 - SYS_GETGID = 5102 - SYS_SETUID = 5103 - SYS_SETGID = 5104 - SYS_GETEUID = 5105 - SYS_GETEGID = 5106 - SYS_SETPGID = 5107 - SYS_GETPPID = 5108 - SYS_GETPGRP = 5109 - SYS_SETSID = 5110 - SYS_SETREUID = 5111 - SYS_SETREGID = 5112 - SYS_GETGROUPS = 5113 - SYS_SETGROUPS = 5114 - SYS_SETRESUID = 5115 - SYS_GETRESUID = 5116 - SYS_SETRESGID = 5117 - SYS_GETRESGID = 5118 - SYS_GETPGID = 5119 - SYS_SETFSUID = 5120 - SYS_SETFSGID = 5121 - SYS_GETSID = 5122 - SYS_CAPGET = 5123 - SYS_CAPSET = 5124 - SYS_RT_SIGPENDING = 5125 - SYS_RT_SIGTIMEDWAIT = 5126 - SYS_RT_SIGQUEUEINFO = 5127 - SYS_RT_SIGSUSPEND = 5128 - SYS_SIGALTSTACK = 5129 - SYS_UTIME = 5130 - SYS_MKNOD = 5131 - SYS_PERSONALITY = 5132 - SYS_USTAT = 5133 - SYS_STATFS = 5134 - SYS_FSTATFS = 5135 - SYS_SYSFS = 5136 - SYS_GETPRIORITY = 5137 - SYS_SETPRIORITY = 5138 - SYS_SCHED_SETPARAM = 5139 - SYS_SCHED_GETPARAM = 5140 - SYS_SCHED_SETSCHEDULER = 5141 - SYS_SCHED_GETSCHEDULER = 5142 - SYS_SCHED_GET_PRIORITY_MAX = 5143 - SYS_SCHED_GET_PRIORITY_MIN = 5144 - SYS_SCHED_RR_GET_INTERVAL = 5145 - SYS_MLOCK = 5146 - SYS_MUNLOCK = 5147 - SYS_MLOCKALL = 5148 - SYS_MUNLOCKALL = 5149 - SYS_VHANGUP = 5150 - SYS_PIVOT_ROOT = 5151 - SYS__SYSCTL = 5152 - SYS_PRCTL = 5153 - SYS_ADJTIMEX = 5154 - SYS_SETRLIMIT = 5155 - SYS_CHROOT = 5156 - SYS_SYNC = 5157 - SYS_ACCT = 5158 - SYS_SETTIMEOFDAY = 5159 - SYS_MOUNT = 5160 - SYS_UMOUNT2 = 5161 - SYS_SWAPON = 5162 - SYS_SWAPOFF = 5163 - SYS_REBOOT = 5164 - SYS_SETHOSTNAME = 5165 - SYS_SETDOMAINNAME = 5166 - SYS_CREATE_MODULE = 5167 - SYS_INIT_MODULE = 5168 - SYS_DELETE_MODULE = 5169 - SYS_GET_KERNEL_SYMS = 5170 - SYS_QUERY_MODULE = 5171 - SYS_QUOTACTL = 5172 - SYS_NFSSERVCTL = 5173 - SYS_GETPMSG = 5174 - SYS_PUTPMSG = 5175 - SYS_AFS_SYSCALL = 5176 - SYS_RESERVED177 = 5177 - SYS_GETTID = 5178 - SYS_READAHEAD = 5179 - SYS_SETXATTR = 5180 - SYS_LSETXATTR = 5181 - SYS_FSETXATTR = 5182 - SYS_GETXATTR = 5183 - SYS_LGETXATTR = 5184 - SYS_FGETXATTR = 5185 - SYS_LISTXATTR = 5186 - SYS_LLISTXATTR = 5187 - SYS_FLISTXATTR = 5188 - SYS_REMOVEXATTR = 5189 - SYS_LREMOVEXATTR = 5190 - SYS_FREMOVEXATTR = 5191 - SYS_TKILL = 5192 - SYS_RESERVED193 = 5193 - SYS_FUTEX = 5194 - SYS_SCHED_SETAFFINITY = 5195 - SYS_SCHED_GETAFFINITY = 5196 - SYS_CACHEFLUSH = 5197 - SYS_CACHECTL = 5198 - SYS_SYSMIPS = 5199 - SYS_IO_SETUP = 5200 - SYS_IO_DESTROY = 5201 - SYS_IO_GETEVENTS = 5202 - SYS_IO_SUBMIT = 5203 - SYS_IO_CANCEL = 5204 - SYS_EXIT_GROUP = 5205 - SYS_LOOKUP_DCOOKIE = 5206 - SYS_EPOLL_CREATE = 5207 - SYS_EPOLL_CTL = 5208 - SYS_EPOLL_WAIT = 5209 - SYS_REMAP_FILE_PAGES = 5210 - SYS_RT_SIGRETURN = 5211 - SYS_SET_TID_ADDRESS = 5212 - SYS_RESTART_SYSCALL = 5213 - SYS_SEMTIMEDOP = 5214 - SYS_FADVISE64 = 5215 - SYS_TIMER_CREATE = 5216 - SYS_TIMER_SETTIME = 5217 - SYS_TIMER_GETTIME = 5218 - SYS_TIMER_GETOVERRUN = 5219 - SYS_TIMER_DELETE = 5220 - SYS_CLOCK_SETTIME = 5221 - SYS_CLOCK_GETTIME = 5222 - SYS_CLOCK_GETRES = 5223 - SYS_CLOCK_NANOSLEEP = 5224 - SYS_TGKILL = 5225 - SYS_UTIMES = 5226 - SYS_MBIND = 5227 - SYS_GET_MEMPOLICY = 5228 - SYS_SET_MEMPOLICY = 5229 - SYS_MQ_OPEN = 5230 - SYS_MQ_UNLINK = 5231 - SYS_MQ_TIMEDSEND = 5232 - SYS_MQ_TIMEDRECEIVE = 5233 - SYS_MQ_NOTIFY = 5234 - SYS_MQ_GETSETATTR = 5235 - SYS_VSERVER = 5236 - SYS_WAITID = 5237 - SYS_ADD_KEY = 5239 - SYS_REQUEST_KEY = 5240 - SYS_KEYCTL = 5241 - SYS_SET_THREAD_AREA = 5242 - SYS_INOTIFY_INIT = 5243 - SYS_INOTIFY_ADD_WATCH = 5244 - SYS_INOTIFY_RM_WATCH = 5245 - SYS_MIGRATE_PAGES = 5246 - SYS_OPENAT = 5247 - SYS_MKDIRAT = 5248 - SYS_MKNODAT = 5249 - SYS_FCHOWNAT = 5250 - SYS_FUTIMESAT = 5251 - SYS_NEWFSTATAT = 5252 - SYS_UNLINKAT = 5253 - SYS_RENAMEAT = 5254 - SYS_LINKAT = 5255 - SYS_SYMLINKAT = 5256 - SYS_READLINKAT = 5257 - SYS_FCHMODAT = 5258 - SYS_FACCESSAT = 5259 - SYS_PSELECT6 = 5260 - SYS_PPOLL = 5261 - SYS_UNSHARE = 5262 - SYS_SPLICE = 5263 - SYS_SYNC_FILE_RANGE = 5264 - SYS_TEE = 5265 - SYS_VMSPLICE = 5266 - SYS_MOVE_PAGES = 5267 - SYS_SET_ROBUST_LIST = 5268 - SYS_GET_ROBUST_LIST = 5269 - SYS_KEXEC_LOAD = 5270 - SYS_GETCPU = 5271 - SYS_EPOLL_PWAIT = 5272 - SYS_IOPRIO_SET = 5273 - SYS_IOPRIO_GET = 5274 - SYS_UTIMENSAT = 5275 - SYS_SIGNALFD = 5276 - SYS_TIMERFD = 5277 - SYS_EVENTFD = 5278 - SYS_FALLOCATE = 5279 - SYS_TIMERFD_CREATE = 5280 - SYS_TIMERFD_GETTIME = 5281 - SYS_TIMERFD_SETTIME = 5282 - SYS_SIGNALFD4 = 5283 - SYS_EVENTFD2 = 5284 - SYS_EPOLL_CREATE1 = 5285 - SYS_DUP3 = 5286 - SYS_PIPE2 = 5287 - SYS_INOTIFY_INIT1 = 5288 - SYS_PREADV = 5289 - SYS_PWRITEV = 5290 - SYS_RT_TGSIGQUEUEINFO = 5291 - SYS_PERF_EVENT_OPEN = 5292 - SYS_ACCEPT4 = 5293 - SYS_RECVMMSG = 5294 - SYS_FANOTIFY_INIT = 5295 - SYS_FANOTIFY_MARK = 5296 - SYS_PRLIMIT64 = 5297 - SYS_NAME_TO_HANDLE_AT = 5298 - SYS_OPEN_BY_HANDLE_AT = 5299 - SYS_CLOCK_ADJTIME = 5300 - SYS_SYNCFS = 5301 - SYS_SENDMMSG = 5302 - SYS_SETNS = 5303 - SYS_PROCESS_VM_READV = 5304 - SYS_PROCESS_VM_WRITEV = 5305 - SYS_KCMP = 5306 - SYS_FINIT_MODULE = 5307 - SYS_GETDENTS64 = 5308 - SYS_SCHED_SETATTR = 5309 - SYS_SCHED_GETATTR = 5310 - SYS_RENAMEAT2 = 5311 - SYS_SECCOMP = 5312 - SYS_GETRANDOM = 5313 - SYS_MEMFD_CREATE = 5314 - SYS_BPF = 5315 - SYS_EXECVEAT = 5316 - SYS_USERFAULTFD = 5317 - SYS_MEMBARRIER = 5318 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go deleted file mode 100644 index 42d4f5cda..000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ /dev/null @@ -1,328 +0,0 @@ -// mksysnum_linux.pl /usr/include/asm/unistd.h -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT - -// +build s390x,linux - -package unix - -const ( - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_RESTART_SYSCALL = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_BRK = 45 - SYS_SIGNAL = 48 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_SETPGID = 57 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_SYMLINK = 83 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_LOOKUP_DCOOKIE = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_GETDENTS = 141 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_QUERY_MODULE = 167 - SYS_POLL = 168 - SYS_NFSSERVCTL = 169 - SYS_PRCTL = 172 - SYS_RT_SIGRETURN = 173 - SYS_RT_SIGACTION = 174 - SYS_RT_SIGPROCMASK = 175 - SYS_RT_SIGPENDING = 176 - SYS_RT_SIGTIMEDWAIT = 177 - SYS_RT_SIGQUEUEINFO = 178 - SYS_RT_SIGSUSPEND = 179 - SYS_PREAD64 = 180 - SYS_PWRITE64 = 181 - SYS_GETCWD = 183 - SYS_CAPGET = 184 - SYS_CAPSET = 185 - SYS_SIGALTSTACK = 186 - SYS_SENDFILE = 187 - SYS_GETPMSG = 188 - SYS_PUTPMSG = 189 - SYS_VFORK = 190 - SYS_PIVOT_ROOT = 217 - SYS_MINCORE = 218 - SYS_MADVISE = 219 - SYS_GETDENTS64 = 220 - SYS_READAHEAD = 222 - SYS_SETXATTR = 224 - SYS_LSETXATTR = 225 - SYS_FSETXATTR = 226 - SYS_GETXATTR = 227 - SYS_LGETXATTR = 228 - SYS_FGETXATTR = 229 - SYS_LISTXATTR = 230 - SYS_LLISTXATTR = 231 - SYS_FLISTXATTR = 232 - SYS_REMOVEXATTR = 233 - SYS_LREMOVEXATTR = 234 - SYS_FREMOVEXATTR = 235 - SYS_GETTID = 236 - SYS_TKILL = 237 - SYS_FUTEX = 238 - SYS_SCHED_SETAFFINITY = 239 - SYS_SCHED_GETAFFINITY = 240 - SYS_TGKILL = 241 - SYS_IO_SETUP = 243 - SYS_IO_DESTROY = 244 - SYS_IO_GETEVENTS = 245 - SYS_IO_SUBMIT = 246 - SYS_IO_CANCEL = 247 - SYS_EXIT_GROUP = 248 - SYS_EPOLL_CREATE = 249 - SYS_EPOLL_CTL = 250 - SYS_EPOLL_WAIT = 251 - SYS_SET_TID_ADDRESS = 252 - SYS_FADVISE64 = 253 - SYS_TIMER_CREATE = 254 - SYS_TIMER_SETTIME = 255 - SYS_TIMER_GETTIME = 256 - SYS_TIMER_GETOVERRUN = 257 - SYS_TIMER_DELETE = 258 - SYS_CLOCK_SETTIME = 259 - SYS_CLOCK_GETTIME = 260 - SYS_CLOCK_GETRES = 261 - SYS_CLOCK_NANOSLEEP = 262 - SYS_STATFS64 = 265 - SYS_FSTATFS64 = 266 - SYS_REMAP_FILE_PAGES = 267 - SYS_MBIND = 268 - SYS_GET_MEMPOLICY = 269 - SYS_SET_MEMPOLICY = 270 - SYS_MQ_OPEN = 271 - SYS_MQ_UNLINK = 272 - SYS_MQ_TIMEDSEND = 273 - SYS_MQ_TIMEDRECEIVE = 274 - SYS_MQ_NOTIFY = 275 - SYS_MQ_GETSETATTR = 276 - SYS_KEXEC_LOAD = 277 - SYS_ADD_KEY = 278 - SYS_REQUEST_KEY = 279 - SYS_KEYCTL = 280 - SYS_WAITID = 281 - SYS_IOPRIO_SET = 282 - SYS_IOPRIO_GET = 283 - SYS_INOTIFY_INIT = 284 - SYS_INOTIFY_ADD_WATCH = 285 - SYS_INOTIFY_RM_WATCH = 286 - SYS_MIGRATE_PAGES = 287 - SYS_OPENAT = 288 - SYS_MKDIRAT = 289 - SYS_MKNODAT = 290 - SYS_FCHOWNAT = 291 - SYS_FUTIMESAT = 292 - SYS_UNLINKAT = 294 - SYS_RENAMEAT = 295 - SYS_LINKAT = 296 - SYS_SYMLINKAT = 297 - SYS_READLINKAT = 298 - SYS_FCHMODAT = 299 - SYS_FACCESSAT = 300 - SYS_PSELECT6 = 301 - SYS_PPOLL = 302 - SYS_UNSHARE = 303 - SYS_SET_ROBUST_LIST = 304 - SYS_GET_ROBUST_LIST = 305 - SYS_SPLICE = 306 - SYS_SYNC_FILE_RANGE = 307 - SYS_TEE = 308 - SYS_VMSPLICE = 309 - SYS_MOVE_PAGES = 310 - SYS_GETCPU = 311 - SYS_EPOLL_PWAIT = 312 - SYS_UTIMES = 313 - SYS_FALLOCATE = 314 - SYS_UTIMENSAT = 315 - SYS_SIGNALFD = 316 - SYS_TIMERFD = 317 - SYS_EVENTFD = 318 - SYS_TIMERFD_CREATE = 319 - SYS_TIMERFD_SETTIME = 320 - SYS_TIMERFD_GETTIME = 321 - SYS_SIGNALFD4 = 322 - SYS_EVENTFD2 = 323 - SYS_INOTIFY_INIT1 = 324 - SYS_PIPE2 = 325 - SYS_DUP3 = 326 - SYS_EPOLL_CREATE1 = 327 - SYS_PREADV = 328 - SYS_PWRITEV = 329 - SYS_RT_TGSIGQUEUEINFO = 330 - SYS_PERF_EVENT_OPEN = 331 - SYS_FANOTIFY_INIT = 332 - SYS_FANOTIFY_MARK = 333 - SYS_PRLIMIT64 = 334 - SYS_NAME_TO_HANDLE_AT = 335 - SYS_OPEN_BY_HANDLE_AT = 336 - SYS_CLOCK_ADJTIME = 337 - SYS_SYNCFS = 338 - SYS_SETNS = 339 - SYS_PROCESS_VM_READV = 340 - SYS_PROCESS_VM_WRITEV = 341 - SYS_S390_RUNTIME_INSTR = 342 - SYS_KCMP = 343 - SYS_FINIT_MODULE = 344 - SYS_SCHED_SETATTR = 345 - SYS_SCHED_GETATTR = 346 - SYS_RENAMEAT2 = 347 - SYS_SECCOMP = 348 - SYS_GETRANDOM = 349 - SYS_MEMFD_CREATE = 350 - SYS_BPF = 351 - SYS_S390_PCI_MMIO_WRITE = 352 - SYS_S390_PCI_MMIO_READ = 353 - SYS_EXECVEAT = 354 - SYS_USERFAULTFD = 355 - SYS_MEMBARRIER = 356 - SYS_RECVMMSG = 357 - SYS_SENDMMSG = 358 - SYS_SOCKET = 359 - SYS_SOCKETPAIR = 360 - SYS_BIND = 361 - SYS_CONNECT = 362 - SYS_LISTEN = 363 - SYS_ACCEPT4 = 364 - SYS_GETSOCKOPT = 365 - SYS_SETSOCKOPT = 366 - SYS_GETSOCKNAME = 367 - SYS_GETPEERNAME = 368 - SYS_SENDTO = 369 - SYS_SENDMSG = 370 - SYS_RECVFROM = 371 - SYS_RECVMSG = 372 - SYS_SHUTDOWN = 373 - SYS_MLOCK2 = 374 - SYS_SELECT = 142 - SYS_GETRLIMIT = 191 - SYS_LCHOWN = 198 - SYS_GETUID = 199 - SYS_GETGID = 200 - SYS_GETEUID = 201 - SYS_GETEGID = 202 - SYS_SETREUID = 203 - SYS_SETREGID = 204 - SYS_GETGROUPS = 205 - SYS_SETGROUPS = 206 - SYS_FCHOWN = 207 - SYS_SETRESUID = 208 - SYS_GETRESUID = 209 - SYS_SETRESGID = 210 - SYS_GETRESGID = 211 - SYS_CHOWN = 212 - SYS_SETUID = 213 - SYS_SETGID = 214 - SYS_SETFSUID = 215 - SYS_SETFSGID = 216 - SYS_NEWFSTATAT = 293 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go deleted file mode 100644 index 46b5bee1d..000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ /dev/null @@ -1,348 +0,0 @@ -// mksysnum_linux.pl /usr/include/sparc64-linux-gnu/asm/unistd.h -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT - -// +build sparc64,linux - -package unix - -const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECV = 11 - SYS_CHDIR = 12 - SYS_CHOWN = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BRK = 17 - SYS_PERFCTR = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_CAPGET = 21 - SYS_CAPSET = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_VMSPLICE = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_SIGALTSTACK = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_STAT = 38 - SYS_SENDFILE = 39 - SYS_LSTAT = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_UMOUNT2 = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_MEMORY_ORDERING = 52 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_FSTAT = 62 - SYS_FSTAT64 = 63 - SYS_GETPAGESIZE = 64 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_PREAD64 = 67 - SYS_PWRITE64 = 68 - SYS_MMAP = 71 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_VHANGUP = 76 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_SETHOSTNAME = 88 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_ACCEPT = 99 - SYS_GETPRIORITY = 100 - SYS_RT_SIGRETURN = 101 - SYS_RT_SIGACTION = 102 - SYS_RT_SIGPROCMASK = 103 - SYS_RT_SIGPENDING = 104 - SYS_RT_SIGTIMEDWAIT = 105 - SYS_RT_SIGQUEUEINFO = 106 - SYS_RT_SIGSUSPEND = 107 - SYS_SETRESUID = 108 - SYS_GETRESUID = 109 - SYS_SETRESGID = 110 - SYS_GETRESGID = 111 - SYS_RECVMSG = 113 - SYS_SENDMSG = 114 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_GETCWD = 119 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_RECVFROM = 125 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_TRUNCATE = 129 - SYS_FTRUNCATE = 130 - SYS_FLOCK = 131 - SYS_LSTAT64 = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_STAT64 = 139 - SYS_SENDFILE64 = 140 - SYS_GETPEERNAME = 141 - SYS_FUTEX = 142 - SYS_GETTID = 143 - SYS_GETRLIMIT = 144 - SYS_SETRLIMIT = 145 - SYS_PIVOT_ROOT = 146 - SYS_PRCTL = 147 - SYS_PCICONFIG_READ = 148 - SYS_PCICONFIG_WRITE = 149 - SYS_GETSOCKNAME = 150 - SYS_INOTIFY_INIT = 151 - SYS_INOTIFY_ADD_WATCH = 152 - SYS_POLL = 153 - SYS_GETDENTS64 = 154 - SYS_INOTIFY_RM_WATCH = 156 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UMOUNT = 159 - SYS_SCHED_SET_AFFINITY = 160 - SYS_SCHED_GET_AFFINITY = 161 - SYS_GETDOMAINNAME = 162 - SYS_SETDOMAINNAME = 163 - SYS_UTRAP_INSTALL = 164 - SYS_QUOTACTL = 165 - SYS_SET_TID_ADDRESS = 166 - SYS_MOUNT = 167 - SYS_USTAT = 168 - SYS_SETXATTR = 169 - SYS_LSETXATTR = 170 - SYS_FSETXATTR = 171 - SYS_GETXATTR = 172 - SYS_LGETXATTR = 173 - SYS_GETDENTS = 174 - SYS_SETSID = 175 - SYS_FCHDIR = 176 - SYS_FGETXATTR = 177 - SYS_LISTXATTR = 178 - SYS_LLISTXATTR = 179 - SYS_FLISTXATTR = 180 - SYS_REMOVEXATTR = 181 - SYS_LREMOVEXATTR = 182 - SYS_SIGPENDING = 183 - SYS_QUERY_MODULE = 184 - SYS_SETPGID = 185 - SYS_FREMOVEXATTR = 186 - SYS_TKILL = 187 - SYS_EXIT_GROUP = 188 - SYS_UNAME = 189 - SYS_INIT_MODULE = 190 - SYS_PERSONALITY = 191 - SYS_REMAP_FILE_PAGES = 192 - SYS_EPOLL_CREATE = 193 - SYS_EPOLL_CTL = 194 - SYS_EPOLL_WAIT = 195 - SYS_IOPRIO_SET = 196 - SYS_GETPPID = 197 - SYS_SIGACTION = 198 - SYS_SGETMASK = 199 - SYS_SSETMASK = 200 - SYS_SIGSUSPEND = 201 - SYS_OLDLSTAT = 202 - SYS_USELIB = 203 - SYS_READDIR = 204 - SYS_READAHEAD = 205 - SYS_SOCKETCALL = 206 - SYS_SYSLOG = 207 - SYS_LOOKUP_DCOOKIE = 208 - SYS_FADVISE64 = 209 - SYS_FADVISE64_64 = 210 - SYS_TGKILL = 211 - SYS_WAITPID = 212 - SYS_SWAPOFF = 213 - SYS_SYSINFO = 214 - SYS_IPC = 215 - SYS_SIGRETURN = 216 - SYS_CLONE = 217 - SYS_IOPRIO_GET = 218 - SYS_ADJTIMEX = 219 - SYS_SIGPROCMASK = 220 - SYS_CREATE_MODULE = 221 - SYS_DELETE_MODULE = 222 - SYS_GET_KERNEL_SYMS = 223 - SYS_GETPGID = 224 - SYS_BDFLUSH = 225 - SYS_SYSFS = 226 - SYS_AFS_SYSCALL = 227 - SYS_SETFSUID = 228 - SYS_SETFSGID = 229 - SYS__NEWSELECT = 230 - SYS_SPLICE = 232 - SYS_STIME = 233 - SYS_STATFS64 = 234 - SYS_FSTATFS64 = 235 - SYS__LLSEEK = 236 - SYS_MLOCK = 237 - SYS_MUNLOCK = 238 - SYS_MLOCKALL = 239 - SYS_MUNLOCKALL = 240 - SYS_SCHED_SETPARAM = 241 - SYS_SCHED_GETPARAM = 242 - SYS_SCHED_SETSCHEDULER = 243 - SYS_SCHED_GETSCHEDULER = 244 - SYS_SCHED_YIELD = 245 - SYS_SCHED_GET_PRIORITY_MAX = 246 - SYS_SCHED_GET_PRIORITY_MIN = 247 - SYS_SCHED_RR_GET_INTERVAL = 248 - SYS_NANOSLEEP = 249 - SYS_MREMAP = 250 - SYS__SYSCTL = 251 - SYS_GETSID = 252 - SYS_FDATASYNC = 253 - SYS_NFSSERVCTL = 254 - SYS_SYNC_FILE_RANGE = 255 - SYS_CLOCK_SETTIME = 256 - SYS_CLOCK_GETTIME = 257 - SYS_CLOCK_GETRES = 258 - SYS_CLOCK_NANOSLEEP = 259 - SYS_SCHED_GETAFFINITY = 260 - SYS_SCHED_SETAFFINITY = 261 - SYS_TIMER_SETTIME = 262 - SYS_TIMER_GETTIME = 263 - SYS_TIMER_GETOVERRUN = 264 - SYS_TIMER_DELETE = 265 - SYS_TIMER_CREATE = 266 - SYS_IO_SETUP = 268 - SYS_IO_DESTROY = 269 - SYS_IO_SUBMIT = 270 - SYS_IO_CANCEL = 271 - SYS_IO_GETEVENTS = 272 - SYS_MQ_OPEN = 273 - SYS_MQ_UNLINK = 274 - SYS_MQ_TIMEDSEND = 275 - SYS_MQ_TIMEDRECEIVE = 276 - SYS_MQ_NOTIFY = 277 - SYS_MQ_GETSETATTR = 278 - SYS_WAITID = 279 - SYS_TEE = 280 - SYS_ADD_KEY = 281 - SYS_REQUEST_KEY = 282 - SYS_KEYCTL = 283 - SYS_OPENAT = 284 - SYS_MKDIRAT = 285 - SYS_MKNODAT = 286 - SYS_FCHOWNAT = 287 - SYS_FUTIMESAT = 288 - SYS_FSTATAT64 = 289 - SYS_UNLINKAT = 290 - SYS_RENAMEAT = 291 - SYS_LINKAT = 292 - SYS_SYMLINKAT = 293 - SYS_READLINKAT = 294 - SYS_FCHMODAT = 295 - SYS_FACCESSAT = 296 - SYS_PSELECT6 = 297 - SYS_PPOLL = 298 - SYS_UNSHARE = 299 - SYS_SET_ROBUST_LIST = 300 - SYS_GET_ROBUST_LIST = 301 - SYS_MIGRATE_PAGES = 302 - SYS_MBIND = 303 - SYS_GET_MEMPOLICY = 304 - SYS_SET_MEMPOLICY = 305 - SYS_KEXEC_LOAD = 306 - SYS_MOVE_PAGES = 307 - SYS_GETCPU = 308 - SYS_EPOLL_PWAIT = 309 - SYS_UTIMENSAT = 310 - SYS_SIGNALFD = 311 - SYS_TIMERFD_CREATE = 312 - SYS_EVENTFD = 313 - SYS_FALLOCATE = 314 - SYS_TIMERFD_SETTIME = 315 - SYS_TIMERFD_GETTIME = 316 - SYS_SIGNALFD4 = 317 - SYS_EVENTFD2 = 318 - SYS_EPOLL_CREATE1 = 319 - SYS_DUP3 = 320 - SYS_PIPE2 = 321 - SYS_INOTIFY_INIT1 = 322 - SYS_ACCEPT4 = 323 - SYS_PREADV = 324 - SYS_PWRITEV = 325 - SYS_RT_TGSIGQUEUEINFO = 326 - SYS_PERF_EVENT_OPEN = 327 - SYS_RECVMMSG = 328 - SYS_FANOTIFY_INIT = 329 - SYS_FANOTIFY_MARK = 330 - SYS_PRLIMIT64 = 331 - SYS_NAME_TO_HANDLE_AT = 332 - SYS_OPEN_BY_HANDLE_AT = 333 - SYS_CLOCK_ADJTIME = 334 - SYS_SYNCFS = 335 - SYS_SENDMMSG = 336 - SYS_SETNS = 337 - SYS_PROCESS_VM_READV = 338 - SYS_PROCESS_VM_WRITEV = 339 - SYS_KERN_FEATURES = 340 - SYS_KCMP = 341 - SYS_FINIT_MODULE = 342 - SYS_SCHED_SETATTR = 343 - SYS_SCHED_GETATTR = 344 - SYS_RENAMEAT2 = 345 - SYS_SECCOMP = 346 - SYS_GETRANDOM = 347 - SYS_MEMFD_CREATE = 348 - SYS_BPF = 349 - SYS_EXECVEAT = 350 - SYS_MEMBARRIER = 351 - SYS_USERFAULTFD = 352 - SYS_BIND = 353 - SYS_LISTEN = 354 - SYS_SETSOCKOPT = 355 - SYS_MLOCK2 = 356 - SYS_COPY_FILE_RANGE = 357 - SYS_PREADV2 = 358 - SYS_PWRITEV2 = 359 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 044657878..6bedd16f6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -455,8 +455,3 @@ type Termios struct { Ispeed uint64 Ospeed uint64 } - -const ( - AT_FDCWD = -0x2 - AT_SYMLINK_NOFOLLOW = 0x20 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_386.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_386.go new file mode 100644 index 000000000..b7e7ff088 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_386.go @@ -0,0 +1,437 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_dragonfly.go + +// +build 386,dragonfly + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + S_IFMT = 0xf000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +type Stat_t struct { + Ino uint64 + Nlink uint32 + Dev uint32 + Mode uint16 + Padding1 uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare1 int64 + Qspare2 int64 +} + +type Statfs_t struct { + Spare2 int32 + Bsize int32 + Iosize int32 + Blocks int32 + Bfree int32 + Bavail int32 + Files int32 + Ffree int32 + Fsid Fsid + Owner uint32 + Type int32 + Flags int32 + Syncwrites int32 + Asyncwrites int32 + Fstypename [16]int8 + Mntonname [80]int8 + Syncreads int32 + Asyncreads int32 + Spares1 int16 + Mntfromname [80]int8 + Spares2 int16 + Spare [2]int32 +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Namlen uint16 + Type uint8 + Unused1 uint8 + Unused2 uint32 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [12]int8 + Rcf uint16 + Route [16]uint16 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Data int32 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0x68 + SizeofIfData = 0x58 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x5c + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Data IfData +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Pad_cgo_0 [2]byte + Mtu uint32 + Metric uint32 + Link_state uint32 + Baudrate uint64 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Hwassist uint32 + Unused uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Pad_cgo_0 [2]byte +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Pad_cgo_0 [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint32 + Mtu uint32 + Pksent uint32 + Expire uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Recvpipe uint32 + Hopcount uint32 + Mssopt uint16 + Pad uint16 + Msl uint32 + Iwmaxsegs uint32 + Iwcapsegs uint32 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x8 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Pad_cgo_0 [2]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 8cf30947b..330c0e635 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -1,7 +1,8 @@ -// +build 386,freebsd // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go +// +build 386,freebsd + package unix const ( @@ -139,15 +140,6 @@ type Fsid struct { Val [2]int32 } -const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 -) - type RawSockaddrInet4 struct { Len uint8 Family uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index e5feb207b..93395924c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -1,7 +1,8 @@ -// +build amd64,freebsd // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go +// +build amd64,freebsd + package unix const ( @@ -139,15 +140,6 @@ type Fsid struct { Val [2]int32 } -const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 -) - type RawSockaddrInet4 struct { Len uint8 Family uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index a3631053c..9a58381b4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,7 +1,8 @@ -// +build 386,linux // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go +// +build 386,linux + package unix const ( @@ -151,15 +152,6 @@ type Flock_t struct { Pid int32 } -const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 -) - type RawSockaddrInet4 struct { Family uint16 Port uint16 @@ -197,19 +189,6 @@ type RawSockaddrNetlink struct { Groups uint32 } -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -332,8 +311,6 @@ const ( SizeofSockaddrUnix = 0x6e SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrCAN = 0x10 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -598,38 +575,18 @@ type EpollEvent struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x200 - AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + AT_REMOVEDIR = 0x200 ) -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 - POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 -) - -type Sigset_t struct { - X__val [16]uint64 -} - type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Line uint8 - Cc [19]uint8 - Ispeed uint32 - Ospeed uint32 + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [32]uint8 + Pad_cgo_0 [3]byte + Ispeed uint32 + Ospeed uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 0573e6cd2..f1937a627 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,7 +1,8 @@ -// +build amd64,linux // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go +// +build amd64,linux + package unix const ( @@ -153,15 +154,6 @@ type Flock_t struct { Pad_cgo_1 [4]byte } -const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 -) - type RawSockaddrInet4 struct { Family uint16 Port uint16 @@ -199,19 +191,6 @@ type RawSockaddrNetlink struct { Groups uint32 } -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -336,8 +315,6 @@ const ( SizeofSockaddrUnix = 0x6e SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrCAN = 0x10 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -616,38 +593,18 @@ type EpollEvent struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x200 - AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + AT_REMOVEDIR = 0x200 ) -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 - POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 -) - -type Sigset_t struct { - X__val [16]uint64 -} - type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Line uint8 - Cc [19]uint8 - Ispeed uint32 - Ospeed uint32 + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [32]uint8 + Pad_cgo_0 [3]byte + Ispeed uint32 + Ospeed uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 0578b5396..c8a0de45a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,6 +1,7 @@ -// +build arm,linux // Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_linux.go | go run mkpost.go +// cgo -godefs types_linux.go + +// +build arm,linux package unix @@ -155,15 +156,6 @@ type Flock_t struct { Pad_cgo_1 [4]byte } -const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 -) - type RawSockaddrInet4 struct { Family uint16 Port uint16 @@ -201,19 +193,6 @@ type RawSockaddrNetlink struct { Groups uint32 } -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte -} - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -336,8 +315,6 @@ const ( SizeofSockaddrUnix = 0x6e SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrCAN = 0x10 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -587,38 +564,120 @@ type EpollEvent struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x200 - AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + AT_REMOVEDIR = 0x200 ) -type PollFd struct { - Fd int32 - Events int16 - Revents int16 +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [32]uint8 + Pad_cgo_0 [3]byte + Ispeed uint32 + Ospeed uint32 } const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 - POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 + VINTR = 0x0 + VQUIT = 0x1 + VERASE = 0x2 + VKILL = 0x3 + VEOF = 0x4 + VTIME = 0x5 + VMIN = 0x6 + VSWTC = 0x7 + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VEOL = 0xb + VREPRINT = 0xc + VDISCARD = 0xd + VWERASE = 0xe + VLNEXT = 0xf + VEOL2 = 0x10 + IGNBRK = 0x1 + BRKINT = 0x2 + IGNPAR = 0x4 + PARMRK = 0x8 + INPCK = 0x10 + ISTRIP = 0x20 + INLCR = 0x40 + IGNCR = 0x80 + ICRNL = 0x100 + IUCLC = 0x200 + IXON = 0x400 + IXANY = 0x800 + IXOFF = 0x1000 + IMAXBEL = 0x2000 + IUTF8 = 0x4000 + OPOST = 0x1 + OLCUC = 0x2 + ONLCR = 0x4 + OCRNL = 0x8 + ONOCR = 0x10 + ONLRET = 0x20 + OFILL = 0x40 + OFDEL = 0x80 + B0 = 0x0 + B50 = 0x1 + B75 = 0x2 + B110 = 0x3 + B134 = 0x4 + B150 = 0x5 + B200 = 0x6 + B300 = 0x7 + B600 = 0x8 + B1200 = 0x9 + B1800 = 0xa + B2400 = 0xb + B4800 = 0xc + B9600 = 0xd + B19200 = 0xe + B38400 = 0xf + CSIZE = 0x30 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSTOPB = 0x40 + CREAD = 0x80 + PARENB = 0x100 + PARODD = 0x200 + HUPCL = 0x400 + CLOCAL = 0x800 + B57600 = 0x1001 + B115200 = 0x1002 + B230400 = 0x1003 + B460800 = 0x1004 + B500000 = 0x1005 + B576000 = 0x1006 + B921600 = 0x1007 + B1000000 = 0x1008 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + ISIG = 0x1 + ICANON = 0x2 + XCASE = 0x4 + ECHO = 0x8 + ECHOE = 0x10 + ECHOK = 0x20 + ECHONL = 0x40 + NOFLSH = 0x80 + TOSTOP = 0x100 + ECHOCTL = 0x200 + ECHOPRT = 0x400 + ECHOKE = 0x800 + FLUSHO = 0x1000 + PENDIN = 0x4000 + IEXTEN = 0x8000 + TCGETS = 0x5401 + TCSETS = 0x5402 ) - -type Sigset_t struct { - X__val [16]uint64 -} - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Line uint8 - Cc [19]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 808e04669..f989a3605 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,7 +1,8 @@ -// +build arm64,linux // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- -fsigned-char types_linux.go +// +build arm64,linux + package unix const ( @@ -200,19 +201,6 @@ type RawSockaddrNetlink struct { Groups uint32 } -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -337,8 +325,6 @@ const ( SizeofSockaddrUnix = 0x6e SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrCAN = 0x10 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -588,7 +574,6 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 - PadFd int32 Fd int32 Pad int32 } @@ -596,37 +581,17 @@ type EpollEvent struct { const ( AT_FDCWD = -0x64 AT_REMOVEDIR = 0x200 - AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 - POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 -) - -type Sigset_t struct { - X__val [16]uint64 -} - type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Line uint8 - Cc [19]uint8 - Ispeed uint32 - Ospeed uint32 + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [32]uint8 + Pad_cgo_0 [3]byte + Ispeed uint32 + Ospeed uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go deleted file mode 100644 index 73e4b76c0..000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ /dev/null @@ -1,635 +0,0 @@ -// +build mips64,linux -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_linux.go - -package unix - -const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 - PathMax = 0x1000 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 -) - -type Timespec struct { - Sec int64 - Nsec int64 -} - -type Timeval struct { - Sec int64 - Usec int64 -} - -type Timex struct { - Modes uint32 - Pad_cgo_0 [4]byte - Offset int64 - Freq int64 - Maxerror int64 - Esterror int64 - Status int32 - Pad_cgo_1 [4]byte - Constant int64 - Precision int64 - Tolerance int64 - Time Timeval - Tick int64 - Ppsfreq int64 - Jitter int64 - Shift int32 - Pad_cgo_2 [4]byte - Stabil int64 - Jitcnt int64 - Calcnt int64 - Errcnt int64 - Stbcnt int64 - Tai int32 - Pad_cgo_3 [44]byte -} - -type Time_t int64 - -type Tms struct { - Utime int64 - Stime int64 - Cutime int64 - Cstime int64 -} - -type Utimbuf struct { - Actime int64 - Modtime int64 -} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int64 - Ixrss int64 - Idrss int64 - Isrss int64 - Minflt int64 - Majflt int64 - Nswap int64 - Inblock int64 - Oublock int64 - Msgsnd int64 - Msgrcv int64 - Nsignals int64 - Nvcsw int64 - Nivcsw int64 -} - -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - -type Stat_t struct { - Dev uint32 - Pad1 [3]int32 - Ino uint64 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint32 - Pad2 [3]uint32 - Size int64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Blksize uint32 - Pad4 uint32 - Blocks int64 -} - -type Statfs_t struct { - Type int64 - Bsize int64 - Frsize int64 - Blocks uint64 - Bfree uint64 - Files uint64 - Ffree uint64 - Bavail uint64 - Fsid Fsid - Namelen int64 - Flags int64 - Spare [5]int64 -} - -type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - Pad_cgo_0 [5]byte -} - -type Fsid struct { - X__val [2]int32 -} - -type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte -} - -const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 -) - -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte -} - -type RawSockaddr struct { - Family uint16 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [96]int8 -} - -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - -type Iovec struct { - Base *byte - Len uint64 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type Msghdr struct { - Name *byte - Namelen uint32 - Pad_cgo_0 [4]byte - Iov *Iovec - Iovlen uint64 - Control *byte - Controllen uint64 - Flags int32 - Pad_cgo_1 [4]byte -} - -type Cmsghdr struct { - Len uint64 - Level int32 - Type int32 -} - -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Pad_cgo_0 [2]byte - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrCAN = 0x10 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 -) - -const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x27 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 -) - -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Pad_cgo_0 [6]byte - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - -type PtraceRegs struct { - Regs [102]uint64 - U_tsize uint64 - U_dsize uint64 - U_ssize uint64 - Start_code uint64 - Start_data uint64 - Start_stack uint64 - Signal int64 - U_ar0 uint64 - Magic uint64 - U_comm [32]int8 -} - -type FdSet struct { - Bits [16]int64 -} - -type Sysinfo_t struct { - Uptime int64 - Loads [3]uint64 - Totalram uint64 - Freeram uint64 - Sharedram uint64 - Bufferram uint64 - Totalswap uint64 - Freeswap uint64 - Procs uint16 - Pad uint16 - Pad_cgo_0 [4]byte - Totalhigh uint64 - Freehigh uint64 - Unit uint32 - X_f [0]int8 - Pad_cgo_1 [4]byte -} - -type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 -} - -type Ustat_t struct { - Tfree int32 - Pad_cgo_0 [4]byte - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - Pad_cgo_1 [4]byte -} - -type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 -} - -const ( - AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x200 - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 - POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 -) - -type Sigset_t struct { - X__val [16]uint64 -} - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Line uint8 - Cc [32]uint8 - Pad_cgo_0 [3]byte -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go deleted file mode 100644 index 479ca3e1b..000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ /dev/null @@ -1,635 +0,0 @@ -// +build mips64le,linux -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_linux.go - -package unix - -const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 - PathMax = 0x1000 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 -) - -type Timespec struct { - Sec int64 - Nsec int64 -} - -type Timeval struct { - Sec int64 - Usec int64 -} - -type Timex struct { - Modes uint32 - Pad_cgo_0 [4]byte - Offset int64 - Freq int64 - Maxerror int64 - Esterror int64 - Status int32 - Pad_cgo_1 [4]byte - Constant int64 - Precision int64 - Tolerance int64 - Time Timeval - Tick int64 - Ppsfreq int64 - Jitter int64 - Shift int32 - Pad_cgo_2 [4]byte - Stabil int64 - Jitcnt int64 - Calcnt int64 - Errcnt int64 - Stbcnt int64 - Tai int32 - Pad_cgo_3 [44]byte -} - -type Time_t int64 - -type Tms struct { - Utime int64 - Stime int64 - Cutime int64 - Cstime int64 -} - -type Utimbuf struct { - Actime int64 - Modtime int64 -} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int64 - Ixrss int64 - Idrss int64 - Isrss int64 - Minflt int64 - Majflt int64 - Nswap int64 - Inblock int64 - Oublock int64 - Msgsnd int64 - Msgrcv int64 - Nsignals int64 - Nvcsw int64 - Nivcsw int64 -} - -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - -type Stat_t struct { - Dev uint32 - Pad1 [3]int32 - Ino uint64 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint32 - Pad2 [3]uint32 - Size int64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Blksize uint32 - Pad4 uint32 - Blocks int64 -} - -type Statfs_t struct { - Type int64 - Bsize int64 - Frsize int64 - Blocks uint64 - Bfree uint64 - Files uint64 - Ffree uint64 - Bavail uint64 - Fsid Fsid - Namelen int64 - Flags int64 - Spare [5]int64 -} - -type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - Pad_cgo_0 [5]byte -} - -type Fsid struct { - X__val [2]int32 -} - -type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte -} - -const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 -) - -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte -} - -type RawSockaddr struct { - Family uint16 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [96]int8 -} - -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - -type Iovec struct { - Base *byte - Len uint64 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type Msghdr struct { - Name *byte - Namelen uint32 - Pad_cgo_0 [4]byte - Iov *Iovec - Iovlen uint64 - Control *byte - Controllen uint64 - Flags int32 - Pad_cgo_1 [4]byte -} - -type Cmsghdr struct { - Len uint64 - Level int32 - Type int32 -} - -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Pad_cgo_0 [2]byte - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrCAN = 0x10 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 -) - -const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x27 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 -) - -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Pad_cgo_0 [6]byte - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - -type PtraceRegs struct { - Regs [102]uint64 - U_tsize uint64 - U_dsize uint64 - U_ssize uint64 - Start_code uint64 - Start_data uint64 - Start_stack uint64 - Signal int64 - U_ar0 uint64 - Magic uint64 - U_comm [32]int8 -} - -type FdSet struct { - Bits [16]int64 -} - -type Sysinfo_t struct { - Uptime int64 - Loads [3]uint64 - Totalram uint64 - Freeram uint64 - Sharedram uint64 - Bufferram uint64 - Totalswap uint64 - Freeswap uint64 - Procs uint16 - Pad uint16 - Pad_cgo_0 [4]byte - Totalhigh uint64 - Freehigh uint64 - Unit uint32 - X_f [0]int8 - Pad_cgo_1 [4]byte -} - -type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 -} - -type Ustat_t struct { - Tfree int32 - Pad_cgo_0 [4]byte - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - Pad_cgo_1 [4]byte -} - -type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 -} - -const ( - AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x200 - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 - POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 -) - -type Sigset_t struct { - X__val [16]uint64 -} - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Line uint8 - Cc [32]uint8 - Pad_cgo_0 [3]byte -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 2db548b90..808203d07 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,7 +1,8 @@ -// +build ppc64,linux // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go +// +build ppc64,linux + package unix const ( @@ -201,19 +202,6 @@ type RawSockaddrNetlink struct { Groups uint32 } -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte -} - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -338,8 +326,6 @@ const ( SizeofSockaddrUnix = 0x6e SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrCAN = 0x10 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -597,46 +583,25 @@ type Ustat_t struct { } type EpollEvent struct { - Events uint32 - X_padFd int32 - Fd int32 - Pad int32 + Events uint32 + Fd int32 + Pad int32 } const ( AT_FDCWD = -0x64 AT_REMOVEDIR = 0x200 - AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 - POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 -) - -type Sigset_t struct { - X__val [16]uint64 -} - type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [19]uint8 - Line uint8 - Ispeed uint32 - Ospeed uint32 + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [32]uint8 + Pad_cgo_0 [3]byte + Ispeed uint32 + Ospeed uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 4bfdcc0ac..d4a689faf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,7 +1,8 @@ -// +build ppc64le,linux // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go +// +build ppc64le,linux + package unix const ( @@ -201,19 +202,6 @@ type RawSockaddrNetlink struct { Groups uint32 } -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte -} - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -338,8 +326,6 @@ const ( SizeofSockaddrUnix = 0x6e SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrCAN = 0x10 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -597,46 +583,25 @@ type Ustat_t struct { } type EpollEvent struct { - Events uint32 - X_padFd int32 - Fd int32 - Pad int32 + Events uint32 + Fd int32 + Pad int32 } const ( AT_FDCWD = -0x64 AT_REMOVEDIR = 0x200 - AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 - POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 -) - -type Sigset_t struct { - X__val [16]uint64 -} - type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [19]uint8 - Line uint8 - Ispeed uint32 - Ospeed uint32 + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [32]uint8 + Pad_cgo_0 [3]byte + Ispeed uint32 + Ospeed uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go deleted file mode 100644 index 435cd792f..000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ /dev/null @@ -1,657 +0,0 @@ -// +build s390x,linux -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- -fsigned-char types_linux.go - -package unix - -const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 - PathMax = 0x1000 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 -) - -type Timespec struct { - Sec int64 - Nsec int64 -} - -type Timeval struct { - Sec int64 - Usec int64 -} - -type Timex struct { - Modes uint32 - _ [4]byte - Offset int64 - Freq int64 - Maxerror int64 - Esterror int64 - Status int32 - _ [4]byte - Constant int64 - Precision int64 - Tolerance int64 - Time Timeval - Tick int64 - Ppsfreq int64 - Jitter int64 - Shift int32 - _ [4]byte - Stabil int64 - Jitcnt int64 - Calcnt int64 - Errcnt int64 - Stbcnt int64 - Tai int32 - _ [44]byte -} - -type Time_t int64 - -type Tms struct { - Utime int64 - Stime int64 - Cutime int64 - Cstime int64 -} - -type Utimbuf struct { - Actime int64 - Modtime int64 -} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int64 - Ixrss int64 - Idrss int64 - Isrss int64 - Minflt int64 - Majflt int64 - Nswap int64 - Inblock int64 - Oublock int64 - Msgsnd int64 - Msgrcv int64 - Nsignals int64 - Nvcsw int64 - Nivcsw int64 -} - -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - -type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint32 - Uid uint32 - Gid uint32 - _ int32 - Rdev uint64 - Size int64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Blksize int64 - Blocks int64 - _ [3]int64 -} - -type Statfs_t struct { - Type uint32 - Bsize uint32 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Namelen uint32 - Frsize uint32 - Flags uint32 - Spare [4]uint32 - _ [4]byte -} - -type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - _ [5]byte -} - -type Fsid struct { - _ [2]int32 -} - -type Flock_t struct { - Type int16 - Whence int16 - _ [4]byte - Start int64 - Len int64 - Pid int32 - _ [4]byte -} - -const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x6 - FADV_NOREUSE = 0x7 -) - -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte -} - -type RawSockaddr struct { - Family uint16 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [96]int8 -} - -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - -type Iovec struct { - Base *byte - Len uint64 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type Msghdr struct { - Name *byte - Namelen uint32 - _ [4]byte - Iov *Iovec - Iovlen uint64 - Control *byte - Controllen uint64 - Flags int32 - _ [4]byte -} - -type Cmsghdr struct { - Len uint64 - Level int32 - Type int32 -} - -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - _ [2]byte - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrCAN = 0x10 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 -) - -const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x27 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 -) - -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - _ [6]byte - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - -type PtraceRegs struct { - Psw PtracePsw - Gprs [16]uint64 - Acrs [16]uint32 - Orig_gpr2 uint64 - Fp_regs PtraceFpregs - Per_info PtracePer - Ieee_instruction_pointer uint64 -} - -type PtracePsw struct { - Mask uint64 - Addr uint64 -} - -type PtraceFpregs struct { - Fpc uint32 - _ [4]byte - Fprs [16]float64 -} - -type PtracePer struct { - _ [0]uint64 - _ [24]byte - _ [8]byte - Starting_addr uint64 - Ending_addr uint64 - Perc_atmid uint16 - _ [6]byte - Address uint64 - Access_id uint8 - _ [7]byte -} - -type FdSet struct { - Bits [16]int64 -} - -type Sysinfo_t struct { - Uptime int64 - Loads [3]uint64 - Totalram uint64 - Freeram uint64 - Sharedram uint64 - Bufferram uint64 - Totalswap uint64 - Freeswap uint64 - Procs uint16 - Pad uint16 - _ [4]byte - Totalhigh uint64 - Freehigh uint64 - Unit uint32 - _ [0]int8 - _ [4]byte -} - -type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 -} - -type Ustat_t struct { - Tfree int32 - _ [4]byte - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - _ [4]byte -} - -type EpollEvent struct { - Events uint32 - _ int32 - Fd int32 - Pad int32 -} - -const ( - AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x200 - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 - POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 -) - -type Sigset_t struct { - X__val [16]uint64 -} - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Line uint8 - Cc [19]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go deleted file mode 100644 index 439f96914..000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ /dev/null @@ -1,648 +0,0 @@ -// +build sparc64,linux -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_linux.go | go run mkpost.go - -package unix - -const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 - PathMax = 0x1000 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 -) - -type Timespec struct { - Sec int64 - Nsec int64 -} - -type Timeval struct { - Sec int64 - Usec int32 - Pad_cgo_0 [4]byte -} - -type Timex struct { - Modes uint32 - Pad_cgo_0 [4]byte - Offset int64 - Freq int64 - Maxerror int64 - Esterror int64 - Status int32 - Pad_cgo_1 [4]byte - Constant int64 - Precision int64 - Tolerance int64 - Time Timeval - Tick int64 - Ppsfreq int64 - Jitter int64 - Shift int32 - Pad_cgo_2 [4]byte - Stabil int64 - Jitcnt int64 - Calcnt int64 - Errcnt int64 - Stbcnt int64 - Tai int32 - Pad_cgo_3 [44]byte -} - -type Time_t int64 - -type Tms struct { - Utime int64 - Stime int64 - Cutime int64 - Cstime int64 -} - -type Utimbuf struct { - Actime int64 - Modtime int64 -} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int64 - Ixrss int64 - Idrss int64 - Isrss int64 - Minflt int64 - Majflt int64 - Nswap int64 - Inblock int64 - Oublock int64 - Msgsnd int64 - Msgrcv int64 - Nsignals int64 - Nvcsw int64 - Nivcsw int64 -} - -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - -type Stat_t struct { - Dev uint64 - X__pad1 uint16 - Pad_cgo_0 [6]byte - Ino uint64 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint64 - X__pad2 uint16 - Pad_cgo_1 [6]byte - Size int64 - Blksize int64 - Blocks int64 - Atim Timespec - Mtim Timespec - Ctim Timespec - X__glibc_reserved4 uint64 - X__glibc_reserved5 uint64 -} - -type Statfs_t struct { - Type int64 - Bsize int64 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Namelen int64 - Frsize int64 - Flags int64 - Spare [4]int64 -} - -type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - Pad_cgo_0 [5]byte -} - -type Fsid struct { - X__val [2]int32 -} - -type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - X__glibc_reserved int16 - Pad_cgo_1 [2]byte -} - -const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 -) - -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte -} - -type RawSockaddr struct { - Family uint16 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [96]int8 -} - -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - -type Iovec struct { - Base *byte - Len uint64 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type Msghdr struct { - Name *byte - Namelen uint32 - Pad_cgo_0 [4]byte - Iov *Iovec - Iovlen uint64 - Control *byte - Controllen uint64 - Flags int32 - Pad_cgo_1 [4]byte -} - -type Cmsghdr struct { - Len uint64 - Level int32 - Type int32 -} - -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Pad_cgo_0 [2]byte - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrCAN = 0x10 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 -) - -const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2a - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 -) - -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Pad_cgo_0 [6]byte - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - -type PtraceRegs struct { - Regs [16]uint64 - Tstate uint64 - Tpc uint64 - Tnpc uint64 - Y uint32 - Magic uint32 -} - -type ptracePsw struct { -} - -type ptraceFpregs struct { -} - -type ptracePer struct { -} - -type FdSet struct { - Bits [16]int64 -} - -type Sysinfo_t struct { - Uptime int64 - Loads [3]uint64 - Totalram uint64 - Freeram uint64 - Sharedram uint64 - Bufferram uint64 - Totalswap uint64 - Freeswap uint64 - Procs uint16 - Pad uint16 - Pad_cgo_0 [4]byte - Totalhigh uint64 - Freehigh uint64 - Unit uint32 - X_f [0]int8 - Pad_cgo_1 [4]byte -} - -type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 -} - -type Ustat_t struct { - Tfree int32 - Pad_cgo_0 [4]byte - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - Pad_cgo_1 [4]byte -} - -type EpollEvent struct { - Events uint32 - X_padFd int32 - Fd int32 - Pad int32 -} - -const ( - AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x200 - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 - POLLRDHUP = 0x800 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 -) - -type Sigset_t struct { - X__val [16]uint64 -} - -const _SC_PAGESIZE = 0x1e - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Line uint8 - Cc [19]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 02777e4d8..45e9f4222 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -1,6 +1,7 @@ -// +build amd64,solaris // Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_solaris.go | go run mkpost.go +// cgo -godefs types_solaris.go + +// +build amd64,solaris package unix @@ -10,8 +11,6 @@ const ( sizeofInt = 0x4 sizeofLong = 0x8 sizeofLongLong = 0x8 - PathMax = 0x400 - MaxHostNameLen = 0x100 ) type ( @@ -36,18 +35,6 @@ type Timeval32 struct { Usec int32 } -type Tms struct { - Utime int64 - Stime int64 - Cutime int64 - Cstime int64 -} - -type Utimbuf struct { - Actime int64 - Modtime int64 -} - type Rusage struct { Utime Timeval Stime Timeval @@ -243,30 +230,6 @@ type FdSet struct { Bits [1024]int64 } -type Utsname struct { - Sysname [257]int8 - Nodename [257]int8 - Release [257]int8 - Version [257]int8 - Machine [257]int8 -} - -type Ustat_t struct { - Tfree int64 - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - Pad_cgo_0 [4]byte -} - -const ( - AT_FDCWD = 0xffd19553 - AT_SYMLINK_NOFOLLOW = 0x1000 - AT_SYMLINK_FOLLOW = 0x2000 - AT_REMOVEDIR = 0x1 - AT_EACCESS = 0x4 -) - const ( SizeofIfMsghdr = 0x54 SizeofIfData = 0x44 @@ -394,8 +357,6 @@ type BpfHdr struct { Pad_cgo_0 [2]byte } -const _SC_PAGESIZE = 0xb - type Termios struct { Iflag uint32 Oflag uint32 @@ -404,20 +365,3 @@ type Termios struct { Cc [19]uint8 Pad_cgo_0 [1]byte } - -type Termio struct { - Iflag uint16 - Oflag uint16 - Cflag uint16 - Lflag uint16 - Line int8 - Cc [8]uint8 - Pad_cgo_0 [1]byte -} - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} diff --git a/vendor/golang.org/x/sys/windows/asm.s b/vendor/golang.org/x/sys/windows/asm.s new file mode 100644 index 000000000..d4ca868f1 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm.s @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·use(SB),NOSPLIT,$0 + RET diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 0f6204674..7f9f05f93 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -31,10 +31,6 @@ type DLL struct { } // LoadDLL loads DLL file into memory. -// -// Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -114,8 +110,6 @@ func (p *Proc) Addr() uintptr { return p.addr } -//go:uintptrescapes - // Call executes procedure p with arguments a. It will panic, if more then 15 arguments // are supplied. // @@ -168,48 +162,29 @@ func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { // call to its Handle method or to one of its // LazyProc's Addr method. type LazyDLL struct { + mu sync.Mutex + dll *DLL // non nil once DLL is loaded Name string - - // System determines whether the DLL must be loaded from the - // Windows System directory, bypassing the normal DLL search - // path. - System bool - - mu sync.Mutex - dll *DLL // non nil once DLL is loaded } // Load loads DLL file d.Name into memory. It returns an error if fails. // Load will not try to load DLL, if it is already loaded into memory. func (d *LazyDLL) Load() error { // Non-racy version of: - // if d.dll != nil { - if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil { - return nil - } - d.mu.Lock() - defer d.mu.Unlock() - if d.dll != nil { - return nil - } - - // kernel32.dll is special, since it's where LoadLibraryEx comes from. - // The kernel already special-cases its name, so it's always - // loaded from system32. - var dll *DLL - var err error - if d.Name == "kernel32.dll" { - dll, err = LoadDLL(d.Name) - } else { - dll, err = loadLibraryEx(d.Name, d.System) - } - if err != nil { - return err + // if d.dll == nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) == nil { + d.mu.Lock() + defer d.mu.Unlock() + if d.dll == nil { + dll, e := LoadDLL(d.Name) + if e != nil { + return e + } + // Non-racy version of: + // d.dll = dll + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) + } } - - // Non-racy version of: - // d.dll = dll - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) return nil } @@ -237,19 +212,11 @@ func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } -// NewLazySystemDLL is like NewLazyDLL, but will only -// search Windows System directory for the DLL if name is -// a base name (like "advapi32.dll"). -func NewLazySystemDLL(name string) *LazyDLL { - return &LazyDLL{Name: name, System: true} -} - // A LazyProc implements access to a procedure inside a LazyDLL. // It delays the lookup until the Addr method is called. type LazyProc struct { - Name string - mu sync.Mutex + Name string l *LazyDLL proc *Proc } @@ -295,8 +262,6 @@ func (p *LazyProc) Addr() uintptr { return p.proc.Addr() } -//go:uintptrescapes - // Call executes procedure p with arguments a. It will panic, if more then 15 arguments // are supplied. // @@ -308,71 +273,3 @@ func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { p.mustFind() return p.proc.Call(a...) } - -var canDoSearchSystem32Once struct { - sync.Once - v bool -} - -func initCanDoSearchSystem32() { - // https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says: - // "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows - // Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on - // systems that have KB2533623 installed. To determine whether the - // flags are available, use GetProcAddress to get the address of the - // AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories - // function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_* - // flags can be used with LoadLibraryEx." - canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil) -} - -func canDoSearchSystem32() bool { - canDoSearchSystem32Once.Do(initCanDoSearchSystem32) - return canDoSearchSystem32Once.v -} - -func isBaseName(name string) bool { - for _, c := range name { - if c == ':' || c == '/' || c == '\\' { - return false - } - } - return true -} - -// loadLibraryEx wraps the Windows LoadLibraryEx function. -// -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx -// -// If name is not an absolute path, LoadLibraryEx searches for the DLL -// in a variety of automatic locations unless constrained by flags. -// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx -func loadLibraryEx(name string, system bool) (*DLL, error) { - loadDLL := name - var flags uintptr - if system { - if canDoSearchSystem32() { - const LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 - flags = LOAD_LIBRARY_SEARCH_SYSTEM32 - } else if isBaseName(name) { - // WindowsXP or unpatched Windows machine - // trying to load "foo.dll" out of the system - // folder, but LoadLibraryEx doesn't support - // that yet on their system, so emulate it. - windir, _ := Getenv("WINDIR") // old var; apparently works on XP - if windir == "" { - return nil, errString("%WINDIR% not defined") - } - loadDLL = windir + "\\System32\\" + name - } - } - h, err := LoadLibraryEx(loadDLL, 0, flags) - if err != nil { - return nil, err - } - return &DLL{Name: name, Handle: h}, nil -} - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/env_unset.go b/vendor/golang.org/x/sys/windows/env_unset.go index 4ed03aeef..999ffac43 100644 --- a/vendor/golang.org/x/sys/windows/env_unset.go +++ b/vendor/golang.org/x/sys/windows/env_unset.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build windows // +build go1.4 package windows diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go deleted file mode 100644 index e1c88c9c7..000000000 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go deleted file mode 100644 index 0ac95ffe7..000000000 --- a/vendor/golang.org/x/sys/windows/registry/mksyscall.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package registry - -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/registry_test.go b/vendor/golang.org/x/sys/windows/registry/registry_test.go index 9c1b7820e..6547a45b2 100644 --- a/vendor/golang.org/x/sys/windows/registry/registry_test.go +++ b/vendor/golang.org/x/sys/windows/registry/registry_test.go @@ -701,19 +701,17 @@ func TestGetMUIStringValue(t *testing.T) { } defer timezoneK.Close() - type testType struct { + var tests = []struct { + key registry.Key name string want string - } - var tests = []testType{ - {"MUI_Std", syscall.UTF16ToString(dtzi.StandardName[:])}, - } - if dtzi.DynamicDaylightTimeDisabled == 0 { - tests = append(tests, testType{"MUI_Dlt", syscall.UTF16ToString(dtzi.DaylightName[:])}) + }{ + {timezoneK, "MUI_Std", syscall.UTF16ToString(dtzi.StandardName[:])}, + {timezoneK, "MUI_Dlt", syscall.UTF16ToString(dtzi.DaylightName[:])}, } for _, test := range tests { - got, err := timezoneK.GetMUIStringValue(test.name) + got, err := test.key.GetMUIStringValue(test.name) if err != nil { t.Error("GetMUIStringValue:", err) } diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go index a6525dac5..5426cae90 100644 --- a/vendor/golang.org/x/sys/windows/registry/syscall.go +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -8,6 +8,8 @@ package registry import "syscall" +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go + const ( _REG_OPTION_NON_VOLATILE = 0 diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go index 71d4e15ba..ac68810ec 100644 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -108,7 +108,7 @@ func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) if len(data) == 0 { return "", typ, nil } - u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:] + u := (*[1 << 10]uint16)(unsafe.Pointer(&data[0]))[:] return syscall.UTF16ToString(u), typ, nil } @@ -185,7 +185,7 @@ func ExpandString(value string) (string, error) { return "", err } if n <= uint32(len(r)) { - u := (*[1 << 29]uint16)(unsafe.Pointer(&r[0]))[:] + u := (*[1 << 15]uint16)(unsafe.Pointer(&r[0]))[:] return syscall.UTF16ToString(u), nil } r = make([]uint16, n) @@ -208,7 +208,7 @@ func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err err if len(data) == 0 { return nil, typ, nil } - p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:len(data)/2] + p := (*[1 << 24]uint16)(unsafe.Pointer(&data[0]))[:len(data)/2] if len(p) == 0 { return nil, typ, nil } @@ -296,7 +296,7 @@ func (k Key) setStringValue(name string, valtype uint32, value string) error { if err != nil { return err } - buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] + buf := (*[1 << 10]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] return k.setValue(name, valtype, buf) } @@ -326,7 +326,7 @@ func (k Key) SetStringsValue(name string, value []string) error { ss += s + "\x00" } v := utf16.Encode([]rune(ss + "\x00")) - buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] + buf := (*[1 << 10]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] return k.setValue(name, MULTI_SZ, buf) } diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go index 0fa24c6db..9c17675a2 100644 --- a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -2,17 +2,14 @@ package registry -import ( - "golang.org/x/sys/windows" - "syscall" - "unsafe" -) +import "unsafe" +import "syscall" var _ unsafe.Pointer var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modadvapi32 = syscall.NewLazyDLL("advapi32.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go index da8ceb6ed..4d7e72ec4 100644 --- a/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go +++ b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go @@ -85,15 +85,12 @@ func (m *Mgr) CreateService(name, exepath string, c Config, args ...string) (*Se if c.ErrorControl == 0 { c.ErrorControl = ErrorNormal } - if c.ServiceType == 0 { - c.ServiceType = windows.SERVICE_WIN32_OWN_PROCESS - } s := syscall.EscapeArg(exepath) for _, v := range args { s += " " + syscall.EscapeArg(v) } h, err := windows.CreateService(m.Handle, toPtr(name), toPtr(c.DisplayName), - windows.SERVICE_ALL_ACCESS, c.ServiceType, + windows.SERVICE_ALL_ACCESS, windows.SERVICE_WIN32_OWN_PROCESS, c.StartType, c.ErrorControl, toPtr(s), toPtr(c.LoadOrderGroup), nil, toStringBlock(c.Dependencies), toPtr(c.ServiceStartName), toPtr(c.Password)) if err != nil { diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 4e2fbe86e..a8cc609b3 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -23,6 +23,7 @@ package windows // import "golang.org/x/sys/windows" import ( "syscall" + "unsafe" ) // ByteSliceFromString returns a NUL-terminated slice of bytes @@ -69,3 +70,8 @@ func (ts *Timespec) Nano() int64 { func (tv *Timeval) Nano() int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 } + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +//go:noescape +func use(p unsafe.Pointer) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 592d73e03..441c193cf 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -14,6 +14,8 @@ import ( "unsafe" ) +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go + type Handle uintptr const InvalidHandle = ^Handle(0) @@ -82,7 +84,6 @@ func NewCallbackCDecl(fn interface{}) uintptr //sys GetLastError() (lasterr error) //sys LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW -//sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW //sys FreeLibrary(handle Handle) (err error) //sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) //sys GetVersion() (ver uint32, err error) @@ -104,7 +105,6 @@ func NewCallbackCDecl(fn interface{}) uintptr //sys RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW //sys DeleteFile(path *uint16) (err error) = DeleteFileW //sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW -//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) @@ -369,7 +369,7 @@ func Rename(oldpath, newpath string) (err error) { if err != nil { return err } - return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) + return MoveFile(from, to) } func ComputerName() (name string, err error) { @@ -528,9 +528,6 @@ const socket_error = uintptr(^uint32(0)) //sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo //sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes //sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW -//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses -//sys GetACP() (acp uint32) = kernel32.GetACP -//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 3ff8f5253..e130ddd08 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -2,25 +2,23 @@ package windows -import ( - "syscall" - "unsafe" -) +import "unsafe" +import "syscall" var _ unsafe.Pointer var ( - modadvapi32 = NewLazySystemDLL("advapi32.dll") - modkernel32 = NewLazySystemDLL("kernel32.dll") - modshell32 = NewLazySystemDLL("shell32.dll") - modmswsock = NewLazySystemDLL("mswsock.dll") - modcrypt32 = NewLazySystemDLL("crypt32.dll") - modws2_32 = NewLazySystemDLL("ws2_32.dll") - moddnsapi = NewLazySystemDLL("dnsapi.dll") - modiphlpapi = NewLazySystemDLL("iphlpapi.dll") - modsecur32 = NewLazySystemDLL("secur32.dll") - modnetapi32 = NewLazySystemDLL("netapi32.dll") - moduserenv = NewLazySystemDLL("userenv.dll") + modadvapi32 = syscall.NewLazyDLL("advapi32.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + modshell32 = syscall.NewLazyDLL("shell32.dll") + modmswsock = syscall.NewLazyDLL("mswsock.dll") + modcrypt32 = syscall.NewLazyDLL("crypt32.dll") + modws2_32 = syscall.NewLazyDLL("ws2_32.dll") + moddnsapi = syscall.NewLazyDLL("dnsapi.dll") + modiphlpapi = syscall.NewLazyDLL("iphlpapi.dll") + modsecur32 = syscall.NewLazyDLL("secur32.dll") + modnetapi32 = syscall.NewLazyDLL("netapi32.dll") + moduserenv = syscall.NewLazyDLL("userenv.dll") procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") @@ -41,7 +39,6 @@ var ( procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") procGetLastError = modkernel32.NewProc("GetLastError") procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") - procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") procFreeLibrary = modkernel32.NewProc("FreeLibrary") procGetProcAddress = modkernel32.NewProc("GetProcAddress") procGetVersion = modkernel32.NewProc("GetVersion") @@ -63,7 +60,6 @@ var ( procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") procDeleteFileW = modkernel32.NewProc("DeleteFileW") procMoveFileW = modkernel32.NewProc("MoveFileW") - procMoveFileExW = modkernel32.NewProc("MoveFileExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") @@ -173,9 +169,6 @@ var ( procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") - procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") - procGetACP = modkernel32.NewProc("GetACP") - procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") procTranslateNameW = modsecur32.NewProc("TranslateNameW") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") @@ -433,28 +426,6 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { return } -func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return - } - return _LoadLibraryEx(_p0, zero, flags) -} - -func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func FreeLibrary(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -729,18 +700,6 @@ func MoveFile(from *uint16, to *uint16) (err error) { return } -func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func GetComputerName(buf *uint16, n *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) if r1 == 0 { @@ -2037,33 +1996,6 @@ func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferL return } -func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) - acp = uint32(r0) - return -} - -func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) - nwrite = int32(r0) - if nwrite == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) if r1&0xff == 0 { diff --git a/vendor/golang.org/x/sys/windows/ztypes_windows.go b/vendor/golang.org/x/sys/windows/ztypes_windows.go index 1fe19d1d7..ea600f6a9 100644 --- a/vendor/golang.org/x/sys/windows/ztypes_windows.go +++ b/vendor/golang.org/x/sys/windows/ztypes_windows.go @@ -1135,108 +1135,3 @@ const ( ComputerNamePhysicalDnsFullyQualified = 7 ComputerNameMax = 8 ) - -const ( - MOVEFILE_REPLACE_EXISTING = 0x1 - MOVEFILE_COPY_ALLOWED = 0x2 - MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 - MOVEFILE_WRITE_THROUGH = 0x8 - MOVEFILE_CREATE_HARDLINK = 0x10 - MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 -) - -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 - -const ( - IF_TYPE_OTHER = 1 - IF_TYPE_ETHERNET_CSMACD = 6 - IF_TYPE_ISO88025_TOKENRING = 9 - IF_TYPE_PPP = 23 - IF_TYPE_SOFTWARE_LOOPBACK = 24 - IF_TYPE_ATM = 37 - IF_TYPE_IEEE80211 = 71 - IF_TYPE_TUNNEL = 131 - IF_TYPE_IEEE1394 = 144 -) - -type SocketAddress struct { - Sockaddr *syscall.RawSockaddrAny - SockaddrLength int32 -} - -type IpAdapterUnicastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterUnicastAddress - Address SocketAddress - PrefixOrigin int32 - SuffixOrigin int32 - DadState int32 - ValidLifetime uint32 - PreferredLifetime uint32 - LeaseLifetime uint32 - OnLinkPrefixLength uint8 -} - -type IpAdapterAnycastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterAnycastAddress - Address SocketAddress -} - -type IpAdapterMulticastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterMulticastAddress - Address SocketAddress -} - -type IpAdapterDnsServerAdapter struct { - Length uint32 - Reserved uint32 - Next *IpAdapterDnsServerAdapter - Address SocketAddress -} - -type IpAdapterPrefix struct { - Length uint32 - Flags uint32 - Next *IpAdapterPrefix - Address SocketAddress - PrefixLength uint32 -} - -type IpAdapterAddresses struct { - Length uint32 - IfIndex uint32 - Next *IpAdapterAddresses - AdapterName *byte - FirstUnicastAddress *IpAdapterUnicastAddress - FirstAnycastAddress *IpAdapterAnycastAddress - FirstMulticastAddress *IpAdapterMulticastAddress - FirstDnsServerAddress *IpAdapterDnsServerAdapter - DnsSuffix *uint16 - Description *uint16 - FriendlyName *uint16 - PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte - PhysicalAddressLength uint32 - Flags uint32 - Mtu uint32 - IfType uint32 - OperStatus uint32 - Ipv6IfIndex uint32 - ZoneIndices [16]uint32 - FirstPrefix *IpAdapterPrefix - /* more fields might be present here. */ -} - -const ( - IfOperStatusUp = 1 - IfOperStatusDown = 2 - IfOperStatusTesting = 3 - IfOperStatusUnknown = 4 - IfOperStatusDormant = 5 - IfOperStatusNotPresent = 6 - IfOperStatusLowerLayerDown = 7 -) diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go index 48cd9e59a..d7031b694 100644 --- a/vendor/golang.org/x/text/internal/gen/code.go +++ b/vendor/golang.org/x/text/internal/gen/code.go @@ -197,16 +197,27 @@ func (w *CodeWriter) WriteString(s string) { // When starting on its own line, go fmt indents line 2+ an extra level. n, max := maxWidth, maxWidth-4 + // As per https://golang.org/issue/18078, the compiler has trouble + // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, + // for large N. We insert redundant, explicit parentheses to work around + // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + + // ... + s127) + etc + (etc + ... + sN). + explicitParens, extraComment := len(s) > 128*1024, "" + if explicitParens { + w.printf(`(`) + extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" + } + // Print "" +\n, if a string does not start on its own line. b := w.buf.Bytes() if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { - w.printf("\"\" + // Size: %d bytes\n", len(s)) + w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) n, max = maxWidth, maxWidth } w.printf(`"`) - for sz, p := 0, 0; p < len(s); { + for sz, p, nLines := 0, 0, 0; p < len(s); { var r rune r, sz = utf8.DecodeRuneInString(s[p:]) out := s[p : p+sz] @@ -223,6 +234,10 @@ func (w *CodeWriter) WriteString(s string) { chars = len(out) } if n -= chars; n < 0 { + nLines++ + if explicitParens && nLines&63 == 63 { + w.printf("\") + (\"") + } w.printf("\" +\n\"") n = max - len(out) } @@ -230,6 +245,9 @@ func (w *CodeWriter) WriteString(s string) { p += sz } w.printf(`"`) + if explicitParens { + w.printf(`)`) + } } // WriteSlice writes a slice value. diff --git a/vendor/golang.org/x/text/secure/precis/options.go b/vendor/golang.org/x/text/secure/precis/options.go index 4934f8b97..488f0b1f7 100644 --- a/vendor/golang.org/x/text/secure/precis/options.go +++ b/vendor/golang.org/x/text/secure/precis/options.go @@ -20,6 +20,7 @@ type options struct { foldWidth bool // Enforcement options + asciiLower bool cases transform.SpanningTransformer disallow runes.Set norm transform.SpanningTransformer @@ -123,6 +124,7 @@ func Norm(f norm.Form) Option { // provided to determine the type of case folding used. func FoldCase(opts ...cases.Option) Option { return func(o *options) { + o.asciiLower = true o.cases = cases.Fold(opts...) } } @@ -131,6 +133,7 @@ func FoldCase(opts ...cases.Option) Option { // provided to determine the type of case folding used. func LowerCase(opts ...cases.Option) Option { return func(o *options) { + o.asciiLower = true if len(opts) == 0 { o.cases = cases.Lower(language.Und, cases.HandleFinalSigma(false)) return diff --git a/vendor/golang.org/x/text/secure/precis/profile.go b/vendor/golang.org/x/text/secure/precis/profile.go index 081f555a6..1d7898d47 100644 --- a/vendor/golang.org/x/text/secure/precis/profile.go +++ b/vendor/golang.org/x/text/secure/precis/profile.go @@ -118,9 +118,49 @@ var ( // TODO: make this a method on profile. func (b *buffers) enforce(p *Profile, src []byte, comparing bool) (str []byte, err error) { - // TODO: ASCII fast path, if options allow. b.src = src + ascii := true + for _, c := range src { + if c >= utf8.RuneSelf { + ascii = false + break + } + } + // ASCII fast path. + if ascii { + for _, f := range p.options.additional { + if err = b.apply(f()); err != nil { + return nil, err + } + } + switch { + case p.options.asciiLower || (comparing && p.options.ignorecase): + for i, c := range b.src { + if 'A' <= c && c <= 'Z' { + b.src[i] = c ^ 1<<5 + } + } + case p.options.cases != nil: + b.apply(p.options.cases) + } + c := checker{p: p} + if _, err := c.span(b.src, true); err != nil { + return nil, err + } + if p.disallow != nil { + for _, c := range b.src { + if p.disallow.Contains(rune(c)) { + return nil, errDisallowedRune + } + } + } + if p.options.disallowEmpty && len(b.src) == 0 { + return nil, errEmptyString + } + return b.src, nil + } + // These transforms are applied in the order defined in // https://tools.ietf.org/html/rfc7564#section-7 diff --git a/vendor/golang.org/x/text/unicode/runenames/tables.go b/vendor/golang.org/x/text/unicode/runenames/tables.go index a73a4965d..0e9db911b 100644 --- a/vendor/golang.org/x/text/unicode/runenames/tables.go +++ b/vendor/golang.org/x/text/unicode/runenames/tables.go @@ -4570,7 +4570,7 @@ var table1 = []uint16{ // 31130 elements 0x1477, 0x148d, } // Size: 62284 bytes -const data string = "" + // Size: 787597 bytes +const data string = ("" + // Size: 787597 bytes; the redundant, explicit parens are for https://golang.org/issue/18078 "<" + "Private Use>& /dev/null & + # - mkdir ${HOME}/elasticsearch + # - wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ES_VERSION}.tar.gz + # - tar -xzvf elasticsearch-${ES_VERSION}.tar.gz -C ${HOME}/elasticsearch + # - ls -alFR ${HOME}/elasticsearch + # - cp -r config/* ${HOME}/elasticsearch/elasticsearch-${ES_VERSION}/config/ + # - cat ${HOME}/elasticsearch/elasticsearch-${ES_VERSION}/config/elasticsearch.yml + # - ${HOME}/elasticsearch/elasticsearch-${ES_VERSION}/bin/elasticsearch >& /dev/null & + - mkdir -p /tmp/elasticsearch/config + - cp -r config/* /tmp/elasticsearch/config/ + - sudo sysctl -w vm.max_map_count=262144 + - docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "/tmp/elasticsearch/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.1 elasticsearch >& /dev/null & - sleep 15 diff --git a/vendor/gopkg.in/olivere/elastic.v3/CHANGELOG-5.0.md b/vendor/gopkg.in/olivere/elastic.v3/CHANGELOG-5.0.md new file mode 100644 index 000000000..161c6a1ce --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/CHANGELOG-5.0.md @@ -0,0 +1,195 @@ +# Changes in Elastic 5.0 + +## Enforce context.Context in PerformRequest and Do + +We enforce the usage of `context.Context` everywhere you execute a request. +You need to change all your `Do()` calls to pass a context: `Do(ctx)`. +This enables automatic request cancelation and many other patterns. + +If you don't need this, simply pass `context.TODO()` or `context.Background()`. + +## Warmers removed + +Warmers are no longer necessary and have been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_index_apis.html#_warmers). + +## Optimize removed + +Optimize was deprecated in ES 2.0 and has been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_rest_api_changes.html#_literal__optimize_literal_endpoint_removed). +Use [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html) instead. + +## Missing Query removed + +The `missing` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-exists-query.html#_literal_missing_literal_query). +Use `exists` query with `must_not` in `bool` query instead. + +## And Query removed + +The `and` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use `must` clauses in a `bool` query instead. + +## Not Query removed + +TODO Is it removed? + +## Or Query removed + +The `or` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use `should` clauses in a `bool` query instead. + +## Filtered Query removed + +The `filtered` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use `bool` query instead, which supports `filter` clauses too. + +## Limit Query removed + +The `limit` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use the `terminate_after` parameter instead. + +# Template Query removed + +The `template` query has been [deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-template-query.html). You should use +Search Templates instead. + +We remove it from Elastic 5.0 as the 5.0 update is already a good opportunity +to get rid of old stuff. + +## `_timestamp` and `_ttl` removed + +Both of these fields were deprecated and are now [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_mapping_changes.html#_literal__timestamp_literal_and_literal__ttl_literal). + +## Search template Put/Delete API returns `acknowledged` only + +The response type for Put/Delete search templates has changed. +It only returns a single `acknowledged` flag now. + +## Fields has been renamed to Stored Fields + +The `fields` parameter has been renamed to `stored_fields`. +See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fields_literal_parameter). + +## Fielddatafields has been renamed to Docvaluefields + +The `fielddata_fields` parameter [has been renamed](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fielddata_fields_literal_parameter) +to `docvalue_fields`. + +## Type exists endpoint changed + +The endpoint for checking whether a type exists has been changed from +`HEAD {index}/{type}` to `HEAD {index}/_mapping/{type}`. +See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking_50_rest_api_changes.html#_literal_head_index_type_literal_replaced_with_literal_head_index__mapping_type_literal). + +## Refresh parameter changed + +The `?refresh` parameter previously could be a boolean value. It indicated +whether changes made by a request (e.g. by the Bulk API) should be immediately +visible in search, or not. Using `refresh=true` had the positive effect of +immediately seeing the changes when searching; the negative effect is that +it is a rather big performance hit. + +With 5.0, you now have the choice between these 3 values. + +* `"true"` - Refresh immediately +* `"false"` - Do not refresh (the default value) +* `"wait_for"` - Wait until ES made the document visible in search + +See [?refresh](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-refresh.html) in the documentation. + +Notice that `true` and `false` (the boolean values) are no longer available +now in Elastic. You must use a string instead, with one of the above values. + +## ReindexerService removed + +The `ReindexerService` was a custom solution that was started in the ES 1.x era +to automate reindexing data, from one index to another or even between clusters. + +ES 2.3 introduced its own [Reindex API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html) +so we're going to remove our custom solution and ask you to use the native reindexer. + +The `ReindexService` is available via `client.Reindex()` (which used to point +to the custom reindexer). + +## Delete By Query back in core + +The [Delete By Query API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html) +was moved into a plugin in 2.0. Now its back in core with a complete rewrite based on the Bulk API. + +It has it's own endpoint at `/_delete_by_query`. + +Delete By Query, Reindex, and Update By Query are very similar under the hood. + +## Reindex, Delete By Query, and Update By Query response changed + +The response from the above APIs changed a bit. E.g. the `retries` value +used to be an `int64` and returns separate values for `bulk` and `search` now: + +``` +// Old +{ + ... + "retries": 123, + ... +} +``` + +``` +// New +{ + ... + "retries": { + "bulk": 123, + "search": 0 + }, + ... +} +``` + +## ScanService removed + +The `ScanService` is removed. Use the (new) `ScrollService` instead. + +## New ScrollService + +There was confusion around `ScanService` and `ScrollService` doing basically +the same. One was returning slices and didn't support all query details, the +other returned one document after another and wasn't safe for concurrent use. +So we merged the two and merged it into a new `ScrollService` that +removes all the problems with the older services. + +In other words: +If you used `ScanService`, switch to `ScrollService`. +If you used the old `ScrollService`, you might need to fix some things but +overall it should just work. + +Changes: +- We replaced `elastic.EOS` with `io.EOF` to indicate the "end of scroll". + +TODO Not implemented yet + +## Suggesters + +They have been [completely rewritten in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html). + +Some changes: +- Suggesters no longer have an [output](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html#_simpler_completion_indexing). + +TODO Fix all structural changes in suggesters + +## Percolator + +Percolator has [changed considerably](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_percolator.html). + +Elastic 5.0 adds the new +[Percolator Query](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html) +which can be used in combination with the new +[Percolator type](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/percolator.html). + +The Percolate service is removed from Elastic 5.0. + +## Remove Consistency, add WaitForActiveShards + +The `consistency` parameter has been removed in a lot of places, e.g. the Bulk, +Index, Delete, Delete-by-Query, Reindex, Update, and Update-by-Query API. + +It has been replaced by a somewhat similar `wait_for_active_shards` parameter. +See https://github.com/elastic/elasticsearch/pull/19454. diff --git a/vendor/gopkg.in/olivere/elastic.v3/README.md b/vendor/gopkg.in/olivere/elastic.v3/README.md index 5251b5aed..af9a2765e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/README.md +++ b/vendor/gopkg.in/olivere/elastic.v3/README.md @@ -3,8 +3,8 @@ Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the [Go](http://www.golang.org/) programming language. -[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v3)](https://travis-ci.org/olivere/elastic) -[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v3) +[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v5)](https://travis-ci.org/olivere/elastic) +[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v5) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE) See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic. @@ -12,40 +12,58 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for additional informati ## Releases -**The release branches (e.g. [`release-branch.v3`](https://github.com/olivere/elastic/tree/release-branch.v3)) are actively being worked on and can break at any time. If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).** +**The release branches (e.g. [`release-branch.v5`](https://github.com/olivere/elastic/tree/release-branch.v5)) +are actively being worked on and can break at any time. +If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).** Here's the version matrix: Elasticsearch version | Elastic version -| Package URL ----------------------|------------------|------------ +5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5)) 2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) 1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) 0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) **Example:** -You have installed Elasticsearch 2.1.1 and want to use Elastic. As listed above, you should use Elastic 3.0. So you first install the stable release of Elastic 3.0 from gopkg.in. +You have installed Elasticsearch 5.0.0 and want to use Elastic. +As listed above, you should use Elastic 5.0. +So you first install the stable release of Elastic 5.0 from gopkg.in. ```sh -$ go get gopkg.in/olivere/elastic.v3 +$ go get gopkg.in/olivere/elastic.v5 ``` You then import it with this import path: ```go -import "gopkg.in/olivere/elastic.v3" +import elastic "gopkg.in/olivere/elastic.v5" ``` +### Elastic 5.0 + +Elastic 5.0 targets Elasticsearch 5.0.0 and later. Elasticsearch 5.0.0 was +[released on 26th October 2016](https://www.elastic.co/blog/elasticsearch-5-0-0-released). + +Notice that there are will be a lot of [breaking changes in Elasticsearch 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking-changes-5.0.html) +and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v5/CHANGELOG-5.0.md) +as we did in the transition from Elastic 2.0 (for Elasticsearch 1.x) to Elastic 3.0 (for Elasticsearch 2.x). + +Furthermore, the jump in version numbers will give us a chance to be in sync with the Elastic Stack. + ### Elastic 3.0 -Elastic 3.0 targets Elasticsearch 2.0 and later. Elasticsearch 2.0.0 was [released on 28th October 2015](https://www.elastic.co/blog/elasticsearch-2-0-0-released). +Elastic 3.0 targets Elasticsearch 2.x and is published via [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3). -Notice that there are a lot of [breaking changes in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html) and we used this as an opportunity to [clean up and refactor Elastic as well](https://github.com/olivere/elastic/blob/release-branch.v3/CHANGELOG-3.0.md). +Elastic 3.0 will only get critical bug fixes. You should update to a recent version. ### Elastic 2.0 Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2). +Elastic 2.0 will only get critical bug fixes. You should update to a recent version. + ### Elastic 1.0 Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic @@ -68,7 +86,7 @@ to rewrite your application big time. More often than not it's renaming APIs and adding/removing features so that Elastic is in sync with Elasticsearch. Elastic has been used in production with the following Elasticsearch versions: -0.90, 1.0-1.7. Furthermore, we use [Travis CI](https://travis-ci.org/) +0.90, 1.0-1.7, and 2.0-2.4.1. Furthermore, we use [Travis CI](https://travis-ci.org/) to test Elastic with the most recent versions of Elasticsearch and Go. See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml) file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic) @@ -83,7 +101,8 @@ Having said that, I hope you find the project useful. ## Getting Started -The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). The client connects to Elasticsearch on `http://127.0.0.1:9200` by default. +The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). +The client connects to Elasticsearch on `http://127.0.0.1:9200` by default. You typically create one client for your app. Here's a complete example of creating a client, creating an index, adding a document, executing a search etc. @@ -93,7 +112,6 @@ creating a client, creating an index, adding a document, executing a search etc. client, err := elastic.NewClient() if err != nil { // Handle error - panic(err) } // Create an index @@ -161,7 +179,6 @@ if searchResult.Hits.TotalHits > 0 { err := json.Unmarshal(*hit.Source, &t) if err != nil { // Deserialization failed - panic(err) } // Work with tweet @@ -205,6 +222,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Search - [x] Search Template +- [ ] Multi Search Template - [ ] Search Shards API - [x] Suggesters - [x] Term Suggester @@ -216,7 +234,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [ ] Search Exists API - [ ] Validate API - [x] Explain API -- [x] Percolator API +- [ ] Profile API - [x] Field Stats API ### Aggregations @@ -226,6 +244,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Cardinality - [x] Extended Stats - [x] Geo Bounds + - [ ] Geo Centroid - [x] Max - [x] Min - [x] Percentiles @@ -245,7 +264,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [ ] GeoHash Grid - [x] Global - [x] Histogram - - [x] IPv4 Range + - [x] IP Range - [x] Missing - [x] Nested - [x] Range @@ -259,11 +278,16 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Max Bucket - [x] Min Bucket - [x] Sum Bucket + - [ ] Stats Bucket + - [ ] Extended Stats Bucket + - [ ] Percentiles Bucket - [x] Moving Average - [x] Cumulative Sum - [x] Bucket Script - [x] Bucket Selector - [x] Serial Differencing +- [ ] Matrix Aggregations + - [ ] Matrix Stats - [x] Aggregation Metadata ### Indices APIs @@ -273,6 +297,8 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Get Index - [x] Indices Exists - [x] Open / Close Index +- [x] Shrink Index +- [ ] Rollover Index - [x] Put Mapping - [x] Get Mapping - [ ] Get Field Mapping @@ -282,15 +308,15 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Get Settings - [ ] Analyze - [x] Index Templates -- [x] Warmers +- [ ] Shadow Replica Indices - [x] Indices Stats - [ ] Indices Segments - [ ] Indices Recovery +- [ ] Indices Shard Stores - [ ] Clear Cache - [x] Flush - [x] Refresh -- [x] Optimize -- [ ] Shadow Replica Indices +- [x] Force Merge - [ ] Upgrade ### cat APIs @@ -304,13 +330,16 @@ The cat APIs are not implemented as of now. We think they are better suited for - [ ] cat health - [ ] cat indices - [ ] cat master +- [ ] cat nodeattrs - [ ] cat nodes - [ ] cat pending tasks - [ ] cat plugins - [ ] cat recovery +- [ ] cat repositories - [ ] cat thread pool - [ ] cat shards - [ ] cat segments +- [ ] cat snapshots ### Cluster APIs @@ -320,10 +349,11 @@ The cat APIs are not implemented as of now. We think they are better suited for - [ ] Pending Cluster Tasks - [ ] Cluster Reroute - [ ] Cluster Update Settings -- [ ] Nodes Stats +- [x] Nodes Stats - [x] Nodes Info - [x] Task Management API - [ ] Nodes hot_threads +- [ ] Cluster Allocation Explain API ### Query DSL @@ -331,6 +361,8 @@ The cat APIs are not implemented as of now. We think they are better suited for - [x] Inner hits - Full text queries - [x] Match Query + - [x] Match Phrase Query + - [x] Match Phrase Prefix Query - [x] Multi Match Query - [x] Common Terms Query - [x] Query String Query @@ -340,7 +372,6 @@ The cat APIs are not implemented as of now. We think they are better suited for - [x] Terms Query - [x] Range Query - [x] Exists Query - - [x] Missing Query - [x] Prefix Query - [x] Wildcard Query - [x] Regexp Query @@ -354,15 +385,11 @@ The cat APIs are not implemented as of now. We think they are better suited for - [x] Function Score Query - [x] Boosting Query - [x] Indices Query - - [x] And Query (deprecated) - - [x] Not Query - - [x] Or Query (deprecated) - - [ ] Filtered Query (deprecated) - - [ ] Limit Query (deprecated) - Joining queries - [x] Nested Query - [x] Has Child Query - [x] Has Parent Query + - [ ] Parent Id Query - Geo queries - [ ] GeoShape Query - [x] Geo Bounding Box Query @@ -374,6 +401,7 @@ The cat APIs are not implemented as of now. We think they are better suited for - [x] More Like This Query - [x] Template Query - [x] Script Query + - [x] Percolate Query - Span queries - [ ] Span Term Query - [ ] Span Multi Term Query @@ -383,6 +411,9 @@ The cat APIs are not implemented as of now. We think they are better suited for - [ ] Span Not Query - [ ] Span Containing Query - [ ] Span Within Query + - [ ] Span Field Masking Query +- [ ] Minimum Should Match +- [ ] Multi Term Query Rewrite ### Modules @@ -394,12 +425,15 @@ The cat APIs are not implemented as of now. We think they are better suited for - [x] Sort by field - [x] Sort by geo distance - [x] Sort by script +- [x] Sort by doc -### Scan +### Scrolling -Scrolling through documents (e.g. `search_type=scan`) are implemented via -the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well. +Scrolling is supported via a `ScrollService`. It supports an iterator-like interface. +The `ClearScroll` API is implemented as well. +A pattern for [efficiently scrolling in parallel](https://github.com/olivere/elastic/wiki/ScrollParallel) +is described in the [Wiki](https://github.com/olivere/elastic/wiki). ## How to contribute @@ -408,9 +442,9 @@ Read [the contribution guidelines](https://github.com/olivere/elastic/blob/maste ## Credits Thanks a lot for the great folks working hard on -[Elasticsearch](http://www.elasticsearch.org/) +[Elasticsearch](https://www.elastic.co/products/elasticsearch) and -[Go](http://www.golang.org/). +[Go](https://golang.org/). Elastic uses portions of the [uritemplates](https://github.com/jtacoma/uritemplates) library diff --git a/vendor/gopkg.in/olivere/elastic.v3/acknowledged_response.go b/vendor/gopkg.in/olivere/elastic.v3/acknowledged_response.go new file mode 100644 index 000000000..83f954f44 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/acknowledged_response.go @@ -0,0 +1,11 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AcknowledgedResponse is returned from various APIs. It simply indicates +// whether the operation is ack'd or not. +type AcknowledgedResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff.go b/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff.go index f6d7ad9a0..23381cedc 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff.go +++ b/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff_test.go b/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff_test.go index 9b5bcf0e1..8f602e637 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/backoff/backoff_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/backoff/retry.go b/vendor/gopkg.in/olivere/elastic.v3/backoff/retry.go index 701e03ccc..249b640b4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/backoff/retry.go +++ b/vendor/gopkg.in/olivere/elastic.v3/backoff/retry.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/backoff/retry_test.go b/vendor/gopkg.in/olivere/elastic.v3/backoff/retry_test.go index 0dd45404b..578c7a23d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/backoff/retry_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/backoff/retry_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk.go b/vendor/gopkg.in/olivere/elastic.v3/bulk.go index d0bf5811e..6dc08d6d9 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/bulk.go +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // BulkService allows for batching bulk requests and sending them to @@ -24,17 +24,20 @@ import ( // reuse BulkService to send many batches. You do not have to create a new // BulkService for each batch. // -// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-bulk.html +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-bulk.html // for more details. type BulkService struct { client *Client - index string - typ string - requests []BulkableRequest - timeout string - refresh *bool - pretty bool + index string + typ string + requests []BulkableRequest + pipeline string + timeout string + refresh string + routing string + waitForActiveShards string + pretty bool // estimated bulk size in bytes, up to the request index sizeInBytesCursor sizeInBytes int64 @@ -77,11 +80,35 @@ func (s *BulkService) Timeout(timeout string) *BulkService { return s } -// Refresh tells Elasticsearch to make the bulk requests -// available to search immediately after being processed. Normally, this -// only happens after a specified refresh interval. -func (s *BulkService) Refresh(refresh bool) *BulkService { - s.refresh = &refresh +// Refresh controls when changes made by this request are made visible +// to search. The allowed values are: "true" (refresh the relevant +// primary and replica shards immediately), "wait_for" (wait for the +// changes to be made visible by a refresh before applying), or "false" +// (no refresh related actions). +func (s *BulkService) Refresh(refresh string) *BulkService { + s.refresh = refresh + return s +} + +// Routing specifies the routing value. +func (s *BulkService) Routing(routing string) *BulkService { + s.routing = routing + return s +} + +// Pipeline specifies the pipeline id to preprocess incoming documents with. +func (s *BulkService) Pipeline(pipeline string) *BulkService { + s.pipeline = pipeline + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active +// before proceeding with the bulk operation. Defaults to 1, meaning the +// primary shard only. Set to `all` for all shard copies, otherwise set to +// any non-negative value less than or equal to the total number of copies +// for the shard (number of replicas + 1). +func (s *BulkService) WaitForActiveShards(waitForActiveShards string) *BulkService { + s.waitForActiveShards = waitForActiveShards return s } @@ -152,14 +179,7 @@ func (s *BulkService) bodyAsString() (string, error) { // Do sends the batched requests to Elasticsearch. Note that, when successful, // you can reuse the BulkService for the next batch as the list of bulk // requests is cleared on success. -func (s *BulkService) Do() (*BulkResponse, error) { - return s.DoC(nil) -} - -// DoC sends the batched requests to Elasticsearch. Note that, when successful, -// you can reuse the BulkService for the next batch as the list of bulk -// requests is cleared on success. -func (s *BulkService) DoC(ctx context.Context) (*BulkResponse, error) { +func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) { // No actions? if s.NumberOfActions() == 0 { return nil, errors.New("elastic: No bulk actions to commit") @@ -198,15 +218,24 @@ func (s *BulkService) DoC(ctx context.Context) (*BulkResponse, error) { if s.pretty { params.Set("pretty", fmt.Sprintf("%v", s.pretty)) } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + if s.pipeline != "" { + params.Set("pipeline", s.pipeline) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.routing != "" { + params.Set("routing", s.routing) } if s.timeout != "" { params.Set("timeout", s.timeout) } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } // Get response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, body) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_delete_request.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_delete_request.go index 4625dd499..c475c6d63 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/bulk_delete_request.go +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_delete_request.go @@ -12,9 +12,9 @@ import ( // -- Bulk delete request -- -// BulkDeleteRequest is a bulk request to remove a document from Elasticsearch. +// BulkDeleteRequest is a request to remove a document from Elasticsearch. // -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-bulk.html // for details. type BulkDeleteRequest struct { BulkableRequest @@ -23,7 +23,6 @@ type BulkDeleteRequest struct { id string parent string routing string - refresh *bool version int64 // default is MATCH_ANY versionType string // default is "internal" @@ -73,15 +72,6 @@ func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest { return r } -// Refresh indicates whether to update the shards immediately after -// the delete has been processed. Deleted documents will disappear -// in search immediately at the cost of slower bulk performance. -func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest { - r.refresh = &refresh - r.source = nil - return r -} - // Version indicates the version to be deleted as part of an optimistic // concurrency model. func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest { @@ -141,9 +131,6 @@ func (r *BulkDeleteRequest) Source() ([]string, error) { if r.versionType != "" { deleteCommand["_version_type"] = r.versionType } - if r.refresh != nil { - deleteCommand["refresh"] = *r.refresh - } source["delete"] = deleteCommand body, err := json.Marshal(source) diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request.go index d96b9ee9a..6e9e0951f 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request.go +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request.go @@ -10,24 +10,24 @@ import ( "strings" ) -// BulkIndexRequest is a bulk request to add a document to Elasticsearch. +// BulkIndexRequest is a request to add a document to Elasticsearch. // -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-bulk.html // for details. type BulkIndexRequest struct { BulkableRequest - index string - typ string - id string - opType string - routing string - parent string - timestamp string - ttl int64 - refresh *bool - version int64 // default is MATCH_ANY - versionType string // default is "internal" - doc interface{} + index string + typ string + id string + opType string + routing string + parent string + version int64 // default is MATCH_ANY + versionType string // default is "internal" + doc interface{} + pipeline string + retryOnConflict *int + ttl string source []string } @@ -87,34 +87,6 @@ func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest { return r } -// Timestamp can be used to index a document with a timestamp. -// This is deprecated as of 2.0.0-beta2; you should use a normal date field -// and set its value explicitly. -func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest { - r.timestamp = timestamp - r.source = nil - return r -} - -// Ttl (time to live) sets an expiration date for the document. Expired -// documents will be expunged automatically. -// This is deprecated as of 2.0.0-beta2 and will be replaced by a different -// implementation in a future version. -func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest { - r.ttl = ttl - r.source = nil - return r -} - -// Refresh indicates whether to update the shards immediately after -// the request has been processed. Newly added documents will appear -// in search immediately at the cost of slower bulk performance. -func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest { - r.refresh = &refresh - r.source = nil - return r -} - // Version indicates the version of the document as part of an optimistic // concurrency model. func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest { @@ -141,6 +113,27 @@ func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest { return r } +// RetryOnConflict specifies how often to retry in case of a version conflict. +func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexRequest { + r.retryOnConflict = &retryOnConflict + r.source = nil + return r +} + +// TTL is an expiration time for the document. +func (r *BulkIndexRequest) TTL(ttl string) *BulkIndexRequest { + r.ttl = ttl + r.source = nil + return r +} + +// Pipeline to use while processing the request. +func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest { + r.pipeline = pipeline + r.source = nil + return r +} + // String returns the on-wire representation of the index request, // concatenated as a single string. func (r *BulkIndexRequest) String() string { @@ -183,20 +176,20 @@ func (r *BulkIndexRequest) Source() ([]string, error) { if r.parent != "" { indexCommand["_parent"] = r.parent } - if r.timestamp != "" { - indexCommand["_timestamp"] = r.timestamp - } - if r.ttl > 0 { - indexCommand["_ttl"] = r.ttl - } if r.version > 0 { indexCommand["_version"] = r.version } if r.versionType != "" { indexCommand["_version_type"] = r.versionType } - if r.refresh != nil { - indexCommand["refresh"] = *r.refresh + if r.retryOnConflict != nil { + indexCommand["_retry_on_conflict"] = *r.retryOnConflict + } + if r.ttl != "" { + indexCommand["_ttl"] = r.ttl + } + if r.pipeline != "" { + indexCommand["pipeline"] = r.pipeline } command[r.opType] = indexCommand line, err := json.Marshal(command) diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request_test.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request_test.go index 983d9461b..fe95bd65c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_index_request_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -41,6 +41,33 @@ func TestBulkIndexRequestSerialization(t *testing.T) { `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, }, }, + // #3 + { + Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").RetryOnConflict(42). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_retry_on_conflict":42,"_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #4 + { + Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").Pipeline("my_pipeline"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_type":"tweet","pipeline":"my_pipeline"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #5 + { + Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").TTL("1m"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_ttl":"1m","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, } for i, test := range tests { diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_processor.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_processor.go index c833e9a15..b69d9b89c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/bulk_processor.go +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_processor.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -9,7 +9,9 @@ import ( "sync/atomic" "time" - "gopkg.in/olivere/elastic.v3/backoff" + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/backoff" ) // BulkProcessorService allows to easily process bulk requests. It allows setting @@ -464,7 +466,7 @@ func (w *bulkWorker) commit() error { // via exponential backoff commitFunc := func() error { var err error - res, err = w.service.Do() + res, err = w.service.Do(context.Background()) return err } // notifyFunc will be called if retry fails diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_processor_test.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_processor_test.go index 84afcf9d9..89e096322 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/bulk_processor_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_processor_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,6 +10,8 @@ import ( "sync/atomic" "testing" "time" + + "golang.org/x/net/context" ) func TestBulkProcessorDefaults(t *testing.T) { @@ -157,11 +159,11 @@ func TestBulkProcessorBasedOnFlushInterval(t *testing.T) { } // Check number of documents that were bulk indexed - _, err = p.c.Flush(testIndexName).Do() + _, err = p.c.Flush(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } - count, err := p.c.Count(testIndexName).Do() + count, err := p.c.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -241,11 +243,11 @@ func TestBulkProcessorClose(t *testing.T) { } // Check number of documents that were bulk indexed - _, err = p.c.Flush(testIndexName).Do() + _, err = p.c.Flush(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } - count, err := p.c.Count(testIndexName).Do() + count, err := p.c.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -314,11 +316,11 @@ func TestBulkProcessorFlush(t *testing.T) { } // Check number of documents that were bulk indexed - _, err = p.c.Flush(testIndexName).Do() + _, err = p.c.Flush(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } - count, err := p.c.Count(testIndexName).Do() + count, err := p.c.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -407,11 +409,11 @@ func testBulkProcessor(t *testing.T, numDocs int, svc *BulkProcessorService) { } // Check number of documents that were bulk indexed - _, err = p.c.Flush(testIndexName).Do() + _, err = p.c.Flush(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } - count, err := p.c.Count(testIndexName).Do() + count, err := p.c.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_request.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_request.go index 315b535ca..ce3bf0768 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/bulk_request.go +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_request.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,7 +10,7 @@ import ( // -- Bulkable request (index/update/delete) -- -// Generic interface to bulkable requests. +// BulkableRequest is a generic interface to bulkable requests. type BulkableRequest interface { fmt.Stringer Source() ([]string, error) diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_test.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_test.go index d5cc8f1bb..5a57871cb 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/bulk_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,11 +7,12 @@ package elastic import ( "encoding/json" "testing" + + "golang.org/x/net/context" ) func TestBulk(t *testing.T) { - //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndex(t) + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} @@ -29,7 +30,7 @@ func TestBulk(t *testing.T) { t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions()) } - bulkResponse, err := bulkRequest.Do() + bulkResponse, err := bulkRequest.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -42,7 +43,7 @@ func TestBulk(t *testing.T) { } // Document with Id="1" should not exist - exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -51,7 +52,7 @@ func TestBulk(t *testing.T) { } // Document with Id="2" should exist - exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do() + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -73,7 +74,7 @@ func TestBulk(t *testing.T) { t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions()) } - bulkResponse, err = bulkRequest.Do() + bulkResponse, err = bulkRequest.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -86,7 +87,7 @@ func TestBulk(t *testing.T) { } // Document with Id="1" should have a retweets count of 42 - doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do() + doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -111,13 +112,13 @@ func TestBulk(t *testing.T) { // Update with script update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). RetryOnConflict(3). - Script(NewScript("ctx._source.retweets += v").Param("v", 1)) + Script(NewScript("ctx._source.retweets += params.v").Param("v", 1)) bulkRequest = client.Bulk() bulkRequest = bulkRequest.Add(update2Req) if bulkRequest.NumberOfActions() != 1 { t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions()) } - bulkResponse, err = bulkRequest.Do() + bulkResponse, err = bulkRequest.Refresh("wait_for").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -130,7 +131,7 @@ func TestBulk(t *testing.T) { } // Document with Id="1" should have a retweets count of 43 - doc, err = client.Get().Index(testIndexName).Type("tweet").Id("2").Do() + doc, err = client.Get().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -171,7 +172,7 @@ func TestBulkWithIndexSetOnClient(t *testing.T) { t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions()) } - bulkResponse, err := bulkRequest.Do() + bulkResponse, err := bulkRequest.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -180,7 +181,7 @@ func TestBulkWithIndexSetOnClient(t *testing.T) { } // Document with Id="1" should not exist - exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -189,7 +190,7 @@ func TestBulkWithIndexSetOnClient(t *testing.T) { } // Document with Id="2" should exist - exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do() + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -241,7 +242,7 @@ func TestBulkRequestsSerialization(t *testing.T) { } // Run the bulk request - bulkResponse, err := bulkRequest.Do() + bulkResponse, err := bulkRequest.Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request.go index 725f7346d..7e7e65fbd 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request.go +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request.go @@ -10,9 +10,9 @@ import ( "strings" ) -// Bulk request to update a document in Elasticsearch. +// BulkUpdateRequest is a request to update a document in Elasticsearch. // -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-bulk.html // for details. type BulkUpdateRequest struct { BulkableRequest @@ -26,13 +26,10 @@ type BulkUpdateRequest struct { version int64 // default is MATCH_ANY versionType string // default is "internal" retryOnConflict *int - refresh *bool upsert interface{} docAsUpsert *bool detectNoop *bool doc interface{} - ttl int64 - timestamp string source []string } @@ -112,15 +109,6 @@ func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest { return r } -// Refresh indicates whether to update the shards immediately after -// the request has been processed. Updated documents will appear -// in search immediately at the cost of slower bulk performance. -func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest { - r.refresh = &refresh - r.source = nil - return r -} - // Doc specifies the updated document. func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest { r.doc = doc @@ -156,22 +144,6 @@ func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest { return r } -// Ttl specifies the time to live, and optional expiry time. -// This is deprecated as of 2.0.0-beta2. -func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest { - r.ttl = ttl - r.source = nil - return r -} - -// Timestamp specifies a timestamp for the document. -// This is deprecated as of 2.0.0-beta2. -func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest { - r.timestamp = timestamp - r.source = nil - return r -} - // String returns the on-wire representation of the update request, // concatenated as a single string. func (r *BulkUpdateRequest) String() string { @@ -236,21 +208,12 @@ func (r BulkUpdateRequest) Source() ([]string, error) { if r.parent != "" { updateCommand["_parent"] = r.parent } - if r.timestamp != "" { - updateCommand["_timestamp"] = r.timestamp - } - if r.ttl > 0 { - updateCommand["_ttl"] = r.ttl - } if r.version > 0 { updateCommand["_version"] = r.version } if r.versionType != "" { updateCommand["_version_type"] = r.versionType } - if r.refresh != nil { - updateCommand["refresh"] = *r.refresh - } if r.retryOnConflict != nil { updateCommand["_retry_on_conflict"] = *r.retryOnConflict } diff --git a/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request_test.go b/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request_test.go index c2883e2d9..7b8afa586 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/bulk_update_request_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/clear_scroll.go b/vendor/gopkg.in/olivere/elastic.v3/clear_scroll.go index a73ac0723..1310e169b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/clear_scroll.go +++ b/vendor/gopkg.in/olivere/elastic.v3/clear_scroll.go @@ -69,12 +69,7 @@ func (s *ClearScrollService) Validate() error { } // Do executes the operation. -func (s *ClearScrollService) Do() (*ClearScrollResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *ClearScrollService) DoC(ctx context.Context) (*ClearScrollResponse, error) { +func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -90,7 +85,7 @@ func (s *ClearScrollService) DoC(ctx context.Context) (*ClearScrollResponse, err body := strings.Join(s.scrollId, ",") // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "DELETE", path, params, body) + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/clear_scroll_test.go b/vendor/gopkg.in/olivere/elastic.v3/clear_scroll_test.go index 3bd4d83a1..15e3a4414 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/clear_scroll_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/clear_scroll_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,6 +7,8 @@ package elastic import ( _ "net/http" "testing" + + "golang.org/x/net/context" ) func TestClearScroll(t *testing.T) { @@ -18,55 +20,55 @@ func TestClearScroll(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Match all should return all documents - res, err := client.Scroll(testIndexName).Size(1).Do() + res, err := client.Scroll(testIndexName).Size(1).Do(context.TODO()) if err != nil { t.Fatal(err) } if res == nil { - t.Errorf("expected results != nil; got nil") + t.Fatal("expected results != nil; got nil") } if res.ScrollId == "" { - t.Errorf("expected scrollId in results; got %q", res.ScrollId) + t.Fatalf("expected scrollId in results; got %q", res.ScrollId) } // Search should succeed - _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do() + _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do(context.TODO()) if err != nil { t.Fatal(err) } // Clear scroll id - clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do() + clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do(context.TODO()) if err != nil { t.Fatal(err) } if clearScrollRes == nil { - t.Error("expected results != nil; got nil") + t.Fatal("expected results != nil; got nil") } // Search result should fail - _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do() + _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do(context.TODO()) if err == nil { t.Fatalf("expected scroll to fail") } @@ -76,7 +78,7 @@ func TestClearScrollValidate(t *testing.T) { client := setupTestClient(t) // No scroll id -> fail with error - res, err := NewClearScrollService(client).Do() + res, err := NewClearScrollService(client).Do(context.TODO()) if err == nil { t.Fatalf("expected ClearScroll to fail without scroll ids") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/client.go b/vendor/gopkg.in/olivere/elastic.v3/client.go index fd0a450a6..24d12e51b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/client.go +++ b/vendor/gopkg.in/olivere/elastic.v3/client.go @@ -24,7 +24,7 @@ import ( const ( // Version is the current version of Elastic. - Version = "3.0.59" + Version = "5.0.13" // DefaultUrl is the default endpoint of Elasticsearch on the local machine. // It is used e.g. when initializing a new Client without a specific URL. @@ -549,7 +549,7 @@ func SetTraceLog(logger Logger) ClientOptionFunc { } } -// SendGetBodyAs specifies the HTTP method to use when sending a GET request +// SetSendGetBodyAs specifies the HTTP method to use when sending a GET request // with a body. It is GET by default. func SetSendGetBodyAs(httpMethod string) ClientOptionFunc { return func(c *Client) error { @@ -715,8 +715,8 @@ func (c *Client) sniff(timeout time.Duration) error { } // Use all available URLs provided to sniff the cluster. - urlsMap := make(map[string]bool) var urls []string + urlsMap := make(map[string]bool) // Add all URLs provided on startup for _, url := range c.urls { @@ -796,17 +796,9 @@ func (c *Client) sniffNode(url string) []*conn { var info NodesInfoResponse if err := json.NewDecoder(res.Body).Decode(&info); err == nil { if len(info.Nodes) > 0 { - switch c.scheme { - case "https": - for nodeID, node := range info.Nodes { - url := c.extractHostname("https", node.HTTPSAddress) - if url != "" { - nodes = append(nodes, newConn(nodeID, url)) - } - } - default: - for nodeID, node := range info.Nodes { - url := c.extractHostname("http", node.HTTPAddress) + for nodeID, node := range info.Nodes { + if node.HTTP != nil && len(node.HTTP.PublishAddress) > 0 { + url := c.extractHostname(c.scheme, node.HTTP.PublishAddress) if url != "" { nodes = append(nodes, newConn(nodeID, url)) } @@ -843,11 +835,10 @@ func (c *Client) extractHostname(scheme, address string) string { func (c *Client) updateConns(conns []*conn) { c.connsMu.Lock() - var newConns []*conn - // Build up new connections: // If we find an existing connection, use that (including no. of failures etc.). // If we find a new connection, add it. + var newConns []*conn for _, conn := range conns { var found bool for _, oldConn := range c.conns { @@ -911,34 +902,52 @@ func (c *Client) healthcheck(timeout time.Duration, force bool) { conns := c.conns c.connsMu.RUnlock() - timeoutInMillis := int64(timeout / time.Millisecond) - for _, conn := range conns { - params := make(url.Values) - params.Set("timeout", fmt.Sprintf("%dms", timeoutInMillis)) - req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode()) - if err == nil { + // Run the HEAD request against ES with a timeout + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Goroutine executes the HTTP request, returns an error and sets status + var status int + errc := make(chan error, 1) + go func(url string) { + req, err := NewRequest("HEAD", url) + if err != nil { + errc <- err + return + } if basicAuth { req.SetBasicAuth(basicAuthUsername, basicAuthPassword) } res, err := c.c.Do((*http.Request)(req)) - if err == nil { + if res != nil { + status = res.StatusCode if res.Body != nil { - defer res.Body.Close() - } - if res.StatusCode >= 200 && res.StatusCode < 300 { - conn.MarkAsAlive() - } else { - conn.MarkAsDead() - c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode) + res.Body.Close() } - } else { - c.errorf("elastic: %s is dead", conn.URL()) - conn.MarkAsDead() } - } else { + errc <- err + }(conn.URL()) + + // Wait for the Goroutine (or its timeout) + select { + case <-ctx.Done(): // timeout c.errorf("elastic: %s is dead", conn.URL()) conn.MarkAsDead() + break + case err := <-errc: + if err != nil { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + break + } + if status >= 200 && status < 300 { + conn.MarkAsAlive() + } else { + conn.MarkAsDead() + c.errorf("elastic: %s is dead [status=%d]", conn.URL(), status) + } + break } } } @@ -1041,20 +1050,7 @@ func (c *Client) mustActiveConn() error { // Optionally, a list of HTTP error codes to ignore can be passed. // This is necessary for services that expect e.g. HTTP status 404 as a // valid outcome (Exists, IndicesExists, IndicesTypeExists). -func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) { - return c.PerformRequestC(nil, method, path, params, body, ignoreErrors...) -} - -// PerformRequestC does a HTTP request to Elasticsearch. -// It returns a response and an error on failure. -// -// Optionally, a list of HTTP error codes to ignore can be passed. -// This is necessary for services that expect e.g. HTTP status 404 as a -// valid outcome (Exists, IndicesExists, IndicesTypeExists). -// -// If ctx is not nil, it uses the ctxhttp to do the request, -// enabling both request cancelation as well as timeout. -func (c *Client) PerformRequestC(ctx context.Context, method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) { +func (c *Client) PerformRequest(ctx context.Context, method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) { start := time.Now().UTC() c.mu.RLock() @@ -1132,12 +1128,7 @@ func (c *Client) PerformRequestC(ctx context.Context, method, path string, param c.dumpRequest((*http.Request)(req)) // Get response - var res *http.Response - if ctx == nil { - res, err = c.c.Do((*http.Request)(req)) - } else { - res, err = ctxhttp.Do(ctx, c.c, (*http.Request)(req)) - } + res, err := ctxhttp.Do(ctx, c.c, (*http.Request)(req)) if err != nil { retries-- if retries <= 0 { @@ -1238,28 +1229,11 @@ func (c *Client) BulkProcessor() *BulkProcessorService { return NewBulkProcessorService(c) } -// Reindex returns a service that will reindex documents from a source -// index into a target index. -// -// Notice that this Reindexer is an Elastic-specific solution that pre-dated -// the Reindex API introduced in Elasticsearch 2.3.0 (see ReindexTask). -// -// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html -// for more information about reindexing. -func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer { - return NewReindexer(c, sourceIndex, CopyToTargetIndex(targetIndex)) -} - -// ReindexTask copies data from a source index into a destination index. -// -// The Reindex API has been introduced in Elasticsearch 2.3.0. Notice that -// there is a Elastic-specific Reindexer that pre-dates the Reindex API from -// Elasticsearch. If you rely on that, use the ReindexerService via -// Client.Reindex. +// Reindex copies data from a source index into a destination index. // // See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html // for details on the Reindex API. -func (c *Client) ReindexTask() *ReindexService { +func (c *Client) Reindex() *ReindexService { return NewReindexService(c) } @@ -1304,12 +1278,6 @@ func (c *Client) Explain(index, typ, id string) *ExplainService { return NewExplainService(c).Index(index).Type(typ).Id(id) } -// Percolate allows to send a document and return matching queries. -// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html. -func (c *Client) Percolate() *PercolateService { - return NewPercolateService(c) -} - // TODO Search Template // TODO Search Shards API // TODO Search Exists API @@ -1325,15 +1293,8 @@ func (c *Client) Exists() *ExistsService { return NewExistsService(c) } -// Scan through documents. Use this to iterate inside a server process -// where the results will be processed without returning them to a client. -func (c *Client) Scan(indices ...string) *ScanService { - return NewScanService(c).Index(indices...) -} - // Scroll through documents. Use this to efficiently scroll through results -// while returning the results to a client. Use Scan when you don't need -// to return requests to a client (i.e. not paginating via request/response). +// while returning the results to a client. func (c *Client) Scroll(indices ...string) *ScrollService { return NewScrollService(c).Index(indices...) } @@ -1360,6 +1321,17 @@ func (c *Client) IndexExists(indices ...string) *IndicesExistsService { return NewIndicesExistsService(c).Index(indices) } +// ShrinkIndex returns a service to shrink one index into another. +func (c *Client) ShrinkIndex(source, target string) *IndicesShrinkService { + return NewIndicesShrinkService(c).Source(source).Target(target) +} + +// RolloverIndex rolls an alias over to a new index when the existing index +// is considered to be too large or too old. +func (c *Client) RolloverIndex(alias string) *IndicesRolloverService { + return NewIndicesRolloverService(c).Alias(alias) +} + // TypeExists allows to check if one or more types exist in one or more indices. func (c *Client) TypeExists() *IndicesExistsTypeService { return NewIndicesExistsTypeService(c) @@ -1397,12 +1369,6 @@ func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService return NewIndicesPutSettingsService(c).Index(indices...) } -// Optimize asks Elasticsearch to optimize one or more indices. -// Optimize is deprecated as of Elasticsearch 2.1 and replaced by Forcemerge. -func (c *Client) Optimize(indices ...string) *OptimizeService { - return NewOptimizeService(c).Index(indices...) -} - // Forcemerge optimizes one or more indices. // It replaces the deprecated Optimize API. func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService { @@ -1482,21 +1448,6 @@ func (c *Client) PutMapping() *IndicesPutMappingService { return NewIndicesPutMappingService(c) } -// GetWarmer gets one or more warmers by name. -func (c *Client) GetWarmer() *IndicesGetWarmerService { - return NewIndicesGetWarmerService(c) -} - -// PutWarmer registers a warmer. -func (c *Client) PutWarmer() *IndicesPutWarmerService { - return NewIndicesPutWarmerService(c) -} - -// DeleteWarmer deletes one or more warmers. -func (c *Client) DeleteWarmer() *IndicesDeleteWarmerService { - return NewIndicesDeleteWarmerService(c) -} - // -- cat APIs -- // TODO cat aliases @@ -1514,6 +1465,30 @@ func (c *Client) DeleteWarmer() *IndicesDeleteWarmerService { // TODO cat shards // TODO cat segments +// -- Ingest APIs -- + +// IngestPutPipeline adds pipelines and updates existing pipelines in +// the cluster. +func (c *Client) IngestPutPipeline(id string) *IngestPutPipelineService { + return NewIngestPutPipelineService(c).Id(id) +} + +// IngestGetPipeline returns pipelines based on ID. +func (c *Client) IngestGetPipeline(ids ...string) *IngestGetPipelineService { + return NewIngestGetPipelineService(c).Id(ids...) +} + +// IngestDeletePipeline deletes a pipeline by ID. +func (c *Client) IngestDeletePipeline(id string) *IngestDeletePipelineService { + return NewIngestDeletePipelineService(c).Id(id) +} + +// IngestSimulatePipeline executes a specific pipeline against the set of +// documents provided in the body of the request. +func (c *Client) IngestSimulatePipeline() *IngestSimulatePipelineService { + return NewIngestSimulatePipelineService(c) +} + // -- Cluster APIs -- // ClusterHealth retrieves the health of the cluster. @@ -1574,7 +1549,7 @@ func (c *Client) TasksList() *TasksListService { // ElasticsearchVersion returns the version number of Elasticsearch // running on the given URL. func (c *Client) ElasticsearchVersion(url string) (string, error) { - res, _, err := c.Ping(url).Do() + res, _, err := c.Ping(url).Do(context.Background()) if err != nil { return "", err } @@ -1583,7 +1558,7 @@ func (c *Client) ElasticsearchVersion(url string) (string, error) { // IndexNames returns the names of all indices in the cluster. func (c *Client) IndexNames() ([]string, error) { - res, err := c.IndexGetSettings().Index("_all").Do() + res, err := c.IndexGetSettings().Index("_all").Do(context.Background()) if err != nil { return nil, err } @@ -1610,7 +1585,7 @@ func (c *Client) Ping(url string) *PingService { // If the cluster will have the given state within the timeout, nil is returned. // If the request timed out, ErrTimeout is returned. func (c *Client) WaitForStatus(status string, timeout string) error { - health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do() + health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do(context.Background()) if err != nil { return err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/client_test.go b/vendor/gopkg.in/olivere/elastic.v3/client_test.go index 7920fd72c..c9e9ff327 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/client_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/client_test.go @@ -170,7 +170,7 @@ func TestClientSniffDisabled(t *testing.T) { } // Make two requests, so that both connections are being used for i := 0; i < len(client.conns); i++ { - client.Flush().Do() + client.Flush().Do(context.TODO()) } // The first connection (127.0.0.1:9200) should now be okay. if i, found := findConn("http://127.0.0.1:9200", client.conns...); !found { @@ -202,7 +202,7 @@ func TestClientWillMarkConnectionsAsAliveWhenAllAreDead(t *testing.T) { } // Make a request, so that the connections is marked as dead. - client.Flush().Do() + client.Flush().Do(context.TODO()) // The connection should now be marked as dead. if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { @@ -214,7 +214,7 @@ func TestClientWillMarkConnectionsAsAliveWhenAllAreDead(t *testing.T) { } // Now send another request and the connection should be marked as alive again. - client.Flush().Do() + client.Flush().Do(context.TODO()) if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") @@ -687,29 +687,7 @@ func TestPerformRequest(t *testing.T) { if err != nil { t.Fatal(err) } - res, err := client.PerformRequest("GET", "/", nil, nil) - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected response to be != nil") - } - - ret := new(PingResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - t.Fatalf("expected no error on decode; got: %v", err) - } - if ret.ClusterName == "" { - t.Errorf("expected cluster name; got: %q", ret.ClusterName) - } -} - -func TestPerformRequestC(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - res, err := client.PerformRequestC(context.Background(), "GET", "/", nil, nil) + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil) if err != nil { t.Fatal(err) } @@ -731,7 +709,7 @@ func TestPerformRequestWithSimpleClient(t *testing.T) { if err != nil { t.Fatal(err) } - res, err := client.PerformRequest("GET", "/", nil, nil) + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil) if err != nil { t.Fatal(err) } @@ -757,7 +735,7 @@ func TestPerformRequestWithLogger(t *testing.T) { t.Fatal(err) } - res, err := client.PerformRequest("GET", "/", nil, nil) + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil) if err != nil { t.Fatal(err) } @@ -796,7 +774,7 @@ func TestPerformRequestWithLoggerAndTracer(t *testing.T) { t.Fatal(err) } - res, err := client.PerformRequest("GET", "/", nil, nil) + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil) if err != nil { t.Fatal(err) } @@ -839,7 +817,7 @@ func TestPerformRequestWithCustomLogger(t *testing.T) { t.Fatal(err) } - res, err := client.PerformRequest("GET", "/", nil, nil) + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil) if err != nil { t.Fatal(err) } @@ -887,7 +865,7 @@ func (tr *failingTransport) RoundTrip(r *http.Request) (*http.Response, error) { func TestPerformRequestRetryOnHttpError(t *testing.T) { var numFailedReqs int fail := func(r *http.Request) (*http.Response, error) { - numFailedReqs++ + numFailedReqs += 1 //return &http.Response{Request: r, StatusCode: 400}, nil return nil, errors.New("request failed") } @@ -902,7 +880,7 @@ func TestPerformRequestRetryOnHttpError(t *testing.T) { t.Fatal(err) } - res, err := client.PerformRequest("GET", "/fail", nil, nil) + res, err := client.PerformRequest(context.TODO(), "GET", "/fail", nil, nil) if err == nil { t.Fatal("expected error") } @@ -918,7 +896,7 @@ func TestPerformRequestRetryOnHttpError(t *testing.T) { func TestPerformRequestNoRetryOnValidButUnsuccessfulHttpStatus(t *testing.T) { var numFailedReqs int fail := func(r *http.Request) (*http.Response, error) { - numFailedReqs++ + numFailedReqs += 1 return &http.Response{Request: r, StatusCode: 500}, nil } @@ -932,7 +910,7 @@ func TestPerformRequestNoRetryOnValidButUnsuccessfulHttpStatus(t *testing.T) { t.Fatal(err) } - res, err := client.PerformRequest("GET", "/fail", nil, nil) + res, err := client.PerformRequest(context.TODO(), "GET", "/fail", nil, nil) if err == nil { t.Fatal("expected error") } @@ -961,7 +939,7 @@ func TestPerformRequestWithSetBodyError(t *testing.T) { if err != nil { t.Fatal(err) } - res, err := client.PerformRequest("GET", "/", nil, failingBody{}) + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, failingBody{}) if err == nil { t.Fatal("expected error") } @@ -981,8 +959,8 @@ func (tr *sleepingTransport) RoundTrip(r *http.Request) (*http.Response, error) return http.DefaultTransport.RoundTrip(r) } -func TestPerformRequestCWithCancel(t *testing.T) { - tr := &sleepingTransport{timeout: 5 * time.Second} +func TestPerformRequestWithCancel(t *testing.T) { + tr := &sleepingTransport{timeout: 3 * time.Second} httpClient := &http.Client{Transport: tr} client, err := NewSimpleClient(SetHttpClient(httpClient), SetMaxRetries(0)) @@ -998,7 +976,7 @@ func TestPerformRequestCWithCancel(t *testing.T) { resc := make(chan result, 1) go func() { - res, err := client.PerformRequestC(ctx, "GET", "/", nil, nil) + res, err := client.PerformRequest(ctx, "GET", "/", nil, nil) resc <- result{res: res, err: err} }() select { @@ -1015,8 +993,8 @@ func TestPerformRequestCWithCancel(t *testing.T) { } } -func TestPerformRequestCWithTimeout(t *testing.T) { - tr := &sleepingTransport{timeout: 5 * time.Second} +func TestPerformRequestWithTimeout(t *testing.T) { + tr := &sleepingTransport{timeout: 3 * time.Second} httpClient := &http.Client{Transport: tr} client, err := NewSimpleClient(SetHttpClient(httpClient), SetMaxRetries(0)) @@ -1032,7 +1010,7 @@ func TestPerformRequestCWithTimeout(t *testing.T) { resc := make(chan result, 1) go func() { - res, err := client.PerformRequestC(ctx, "GET", "/", nil, nil) + res, err := client.PerformRequest(ctx, "GET", "/", nil, nil) resc <- result{res: res, err: err} }() select { diff --git a/vendor/gopkg.in/olivere/elastic.v3/cluster-test/cluster-test.go b/vendor/gopkg.in/olivere/elastic.v3/cluster-test/cluster-test.go index 8b38bae6c..112a60bad 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/cluster-test/cluster-test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/cluster-test/cluster-test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -17,7 +17,9 @@ import ( "sync/atomic" "time" - elastic "gopkg.in/olivere/elastic.v3" + "golang.org/x/net/context" + + elastic "gopkg.in/olivere/elastic.v5" ) type Tweet struct { @@ -226,13 +228,15 @@ func (t *TestCase) setup() error { } t.client = client + ctx := context.Background() + // Use the IndexExists service to check if a specified index exists. - exists, err := t.client.IndexExists(t.index).Do() + exists, err := t.client.IndexExists(t.index).Do(ctx) if err != nil { return err } if exists { - deleteIndex, err := t.client.DeleteIndex(t.index).Do() + deleteIndex, err := t.client.DeleteIndex(t.index).Do(ctx) if err != nil { return err } @@ -242,7 +246,7 @@ func (t *TestCase) setup() error { } // Create a new index. - createIndex, err := t.client.CreateIndex(t.index).Do() + createIndex, err := t.client.CreateIndex(t.index).Do(ctx) if err != nil { return err } @@ -257,7 +261,7 @@ func (t *TestCase) setup() error { Type("tweet"). Id("1"). BodyJson(tweet1). - Do() + Do(ctx) if err != nil { return err } @@ -269,13 +273,13 @@ func (t *TestCase) setup() error { Type("tweet"). Id("2"). BodyString(tweet2). - Do() + Do(ctx) if err != nil { return err } // Flush to make sure the documents got written. - _, err = t.client.Flush().Index(t.index).Do() + _, err = t.client.Flush().Index(t.index).Do(ctx) if err != nil { return err } @@ -284,6 +288,8 @@ func (t *TestCase) setup() error { } func (t *TestCase) search() { + ctx := context.Background() + // Loop forever to check for connection issues for { // Get tweet with specified ID @@ -291,7 +297,7 @@ func (t *TestCase) search() { Index(t.index). Type("tweet"). Id("1"). - Do() + Do(ctx) if err != nil { //failf("Get failed: %v", err) t.runCh <- RunInfo{Success: false} @@ -311,7 +317,7 @@ func (t *TestCase) search() { Sort("user", true). // sort by "user" field, ascending From(0).Size(10). // take documents 0-9 Pretty(true). // pretty print request and response JSON - Do() // execute + Do(ctx) // execute if err != nil { //failf("Search failed: %v\n", err) t.runCh <- RunInfo{Success: false} diff --git a/vendor/gopkg.in/olivere/elastic.v3/cluster_health.go b/vendor/gopkg.in/olivere/elastic.v3/cluster_health.go index d62d09268..14694c714 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/cluster_health.go +++ b/vendor/gopkg.in/olivere/elastic.v3/cluster_health.go @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // ClusterHealthService allows to get a very simple status on the health of the cluster. @@ -167,12 +167,7 @@ func (s *ClusterHealthService) Validate() error { } // Do executes the operation. -func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *ClusterHealthService) DoC(ctx context.Context) (*ClusterHealthResponse, error) { +func (s *ClusterHealthService) Do(ctx context.Context) (*ClusterHealthResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -185,7 +180,7 @@ func (s *ClusterHealthService) DoC(ctx context.Context) (*ClusterHealthResponse, } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/cluster_health_test.go b/vendor/gopkg.in/olivere/elastic.v3/cluster_health_test.go index d0f4818ae..d98706415 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/cluster_health_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/cluster_health_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,13 +7,15 @@ package elastic import ( "net/url" "testing" + + "golang.org/x/net/context" ) func TestClusterHealth(t *testing.T) { client := setupTestClientAndCreateIndex(t) // Get cluster health - res, err := client.ClusterHealth().Index(testIndexName).Level("shards").Pretty(true).Do() + res, err := client.ClusterHealth().Index(testIndexName).Level("shards").Pretty(true).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -77,7 +79,7 @@ func TestClusterHealthWaitForStatus(t *testing.T) { client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) // Ensure preconditions are met: A green cluster. - health, err := client.ClusterHealth().Do() + health, err := client.ClusterHealth().Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -86,7 +88,7 @@ func TestClusterHealthWaitForStatus(t *testing.T) { } // Cluster health on an index that does not exist should never get to yellow - health, err = client.ClusterHealth().Index("no-such-index").WaitForStatus("yellow").Timeout("1s").Do() + health, err = client.ClusterHealth().Index("no-such-index").WaitForStatus("yellow").Timeout("1s").Do(context.TODO()) if err == nil { t.Fatalf("expected timeout error; got: %v", err) } @@ -98,7 +100,7 @@ func TestClusterHealthWaitForStatus(t *testing.T) { } // Cluster wide health - health, err = client.ClusterHealth().WaitForGreenStatus().Timeout("10s").Do() + health, err = client.ClusterHealth().WaitForGreenStatus().Timeout("10s").Do(context.TODO()) if err != nil { t.Fatalf("expected no error; got: %v", err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/cluster_state.go b/vendor/gopkg.in/olivere/elastic.v3/cluster_state.go index bfb8ebbf4..1c287890e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/cluster_state.go +++ b/vendor/gopkg.in/olivere/elastic.v3/cluster_state.go @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // ClusterStateService allows to get a comprehensive state information of the whole cluster. @@ -70,7 +70,7 @@ func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterSt return s } -// FlatSettings indicates whether to return settings in flat format (default: false). +// FlatSettings, when set, returns settings in flat format (default: false). func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService { s.flatSettings = &flatSettings return s @@ -153,12 +153,7 @@ func (s *ClusterStateService) Validate() error { } // Do executes the operation. -func (s *ClusterStateService) Do() (*ClusterStateResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *ClusterStateService) DoC(ctx context.Context) (*ClusterStateResponse, error) { +func (s *ClusterStateService) Do(ctx context.Context) (*ClusterStateResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -171,7 +166,7 @@ func (s *ClusterStateService) DoC(ctx context.Context) (*ClusterStateResponse, e } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/cluster_state_test.go b/vendor/gopkg.in/olivere/elastic.v3/cluster_state_test.go index e73a8eeb7..63fd601b7 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/cluster_state_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/cluster_state_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,13 +7,15 @@ package elastic import ( "net/url" "testing" + + "golang.org/x/net/context" ) func TestClusterState(t *testing.T) { client := setupTestClientAndCreateIndex(t) // Get cluster state - res, err := client.ClusterState().Index("_all").Metric("_all").Pretty(true).Do() + res, err := client.ClusterState().Index("_all").Metric("_all").Pretty(true).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/cluster_stats.go b/vendor/gopkg.in/olivere/elastic.v3/cluster_stats.go index c1d864f79..82402d9d6 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/cluster_stats.go +++ b/vendor/gopkg.in/olivere/elastic.v3/cluster_stats.go @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // ClusterStatsService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-stats.html. @@ -95,12 +95,7 @@ func (s *ClusterStatsService) Validate() error { } // Do executes the operation. -func (s *ClusterStatsService) Do() (*ClusterStatsResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *ClusterStatsService) DoC(ctx context.Context) (*ClusterStatsResponse, error) { +func (s *ClusterStatsService) Do(ctx context.Context) (*ClusterStatsResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -113,7 +108,7 @@ func (s *ClusterStatsService) DoC(ctx context.Context) (*ClusterStatsResponse, e } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/cluster_stats_test.go b/vendor/gopkg.in/olivere/elastic.v3/cluster_stats_test.go index e61066ec4..c044b7c0a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/cluster_stats_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/cluster_stats_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,13 +7,15 @@ package elastic import ( "net/url" "testing" + + "golang.org/x/net/context" ) func TestClusterStats(t *testing.T) { client := setupTestClientAndCreateIndex(t) // Get cluster stats - res, err := client.ClusterStats().Do() + res, err := client.ClusterStats().Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/config/elasticsearch.yml b/vendor/gopkg.in/olivere/elastic.v3/config/elasticsearch.yml index 321cf9c3d..9923cfe4f 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/config/elasticsearch.yml +++ b/vendor/gopkg.in/olivere/elastic.v3/config/elasticsearch.yml @@ -1,4 +1,4 @@ -bootstrap.ignore_system_bootstrap_checks: true +# bootstrap.ignore_system_bootstrap_checks: true discovery.zen.minimum_master_nodes: 1 @@ -8,8 +8,6 @@ network.host: network.publish_host: _local_ -# Disable disk space warnings -cluster.routing.allocation.disk.threshold_enabled: false # Enable scripting as described here: https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html script.inline: true diff --git a/vendor/gopkg.in/olivere/elastic.v3/config/jvm.options b/vendor/gopkg.in/olivere/elastic.v3/config/jvm.options new file mode 100644 index 000000000..d97fbc9ec --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/config/jvm.options @@ -0,0 +1,100 @@ +## JVM configuration + +################################################################ +## IMPORTANT: JVM heap size +################################################################ +## +## You should always set the min and max JVM heap +## size to the same value. For example, to set +## the heap to 4 GB, set: +## +## -Xms4g +## -Xmx4g +## +## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html +## for more information +## +################################################################ + +# Xms represents the initial size of total heap space +# Xmx represents the maximum size of total heap space + +-Xms2g +-Xmx2g + +################################################################ +## Expert settings +################################################################ +## +## All settings below this section are considered +## expert settings. Don't tamper with them unless +## you understand what you are doing +## +################################################################ + +## GC configuration +-XX:+UseConcMarkSweepGC +-XX:CMSInitiatingOccupancyFraction=75 +-XX:+UseCMSInitiatingOccupancyOnly + +## optimizations + +# disable calls to System#gc +-XX:+DisableExplicitGC + +# pre-touch memory pages used by the JVM during initialization +-XX:+AlwaysPreTouch + +## basic + +# force the server VM +-server + +# set to headless, just in case +-Djava.awt.headless=true + +# ensure UTF-8 encoding by default (e.g. filenames) +-Dfile.encoding=UTF-8 + +# use our provided JNA always versus the system one +-Djna.nosys=true + +# flags to keep Netty from being unsafe +-Dio.netty.noUnsafe=true +-Dio.netty.noKeySetOptimization=true + +# log4j 2 +-Dlog4j.shutdownHookEnabled=false +-Dlog4j2.disable.jmx=true +-Dlog4j.skipJansi=true + +## heap dumps + +# generate a heap dump when an allocation from the Java heap fails +# heap dumps are created in the working directory of the JVM +-XX:+HeapDumpOnOutOfMemoryError + +# specify an alternative path for heap dumps +# ensure the directory exists and has sufficient space +#-XX:HeapDumpPath=${heap.dump.path} + +## GC logging + +#-XX:+PrintGCDetails +#-XX:+PrintGCTimeStamps +#-XX:+PrintGCDateStamps +#-XX:+PrintClassHistogram +#-XX:+PrintTenuringDistribution +#-XX:+PrintGCApplicationStoppedTime + +# log GC status to a file with time stamps +# ensure the directory exists +#-Xloggc:${loggc} + +# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON. +# If documents were already indexed with unquoted fields in a previous version +# of Elasticsearch, some operations may throw errors. +# +# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided +# only for migration purposes. +#-Delasticsearch.json.allow_unquoted_field_names=true diff --git a/vendor/gopkg.in/olivere/elastic.v3/config/log4j2.properties b/vendor/gopkg.in/olivere/elastic.v3/config/log4j2.properties new file mode 100644 index 000000000..9a3147f5a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/config/log4j2.properties @@ -0,0 +1,74 @@ +status = error + +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n + +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs}.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n +appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.rolling.ref = rolling + +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log +appender.deprecation_rolling.layout.type = PatternLayout +appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n +appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 1GB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 4 + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.additivity = false + +appender.index_search_slowlog_rolling.type = RollingFile +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log +appender.index_search_slowlog_rolling.layout.type = PatternLayout +appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log +appender.index_search_slowlog_rolling.policies.type = Policies +appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_search_slowlog_rolling.policies.time.interval = 1 +appender.index_search_slowlog_rolling.policies.time.modulate = true + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +appender.index_indexing_slowlog_rolling.type = RollingFile +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log +appender.index_indexing_slowlog_rolling.policies.type = Policies +appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling.policies.time.interval = 1 +appender.index_indexing_slowlog_rolling.policies.time.modulate = true + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false diff --git a/vendor/gopkg.in/olivere/elastic.v3/config/logging.yml b/vendor/gopkg.in/olivere/elastic.v3/config/logging.yml deleted file mode 100644 index c2681ac28..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/config/logging.yml +++ /dev/null @@ -1,15 +0,0 @@ -# you can override this using by setting a system property, for example -Des.logger.level=DEBUG -es.logger.level: INFO -rootLogger: ${es.logger.level}, console -logger: - # log action execution errors for easier debugging - action: DEBUG - # reduce the logging for aws, too much is logged under the default INFO - com.amazonaws: WARN - -appender: - console: - type: console - layout: - type: consolePattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" diff --git a/vendor/gopkg.in/olivere/elastic.v3/config/scripts/.gitkeep b/vendor/gopkg.in/olivere/elastic.v3/config/scripts/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/gopkg.in/olivere/elastic.v3/connection.go b/vendor/gopkg.in/olivere/elastic.v3/connection.go index ef0208c23..0f27a8756 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/connection.go +++ b/vendor/gopkg.in/olivere/elastic.v3/connection.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -67,7 +67,7 @@ func (c *conn) MarkAsDead() { utcNow := time.Now().UTC() c.deadSince = &utcNow } - c.failures++ + c.failures += 1 c.Unlock() } diff --git a/vendor/gopkg.in/olivere/elastic.v3/count.go b/vendor/gopkg.in/olivere/elastic.v3/count.go index 741a3b6cf..459ea0bff 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/count.go +++ b/vendor/gopkg.in/olivere/elastic.v3/count.go @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // CountService is a convenient service for determining the @@ -258,11 +258,7 @@ func (s *CountService) Validate() error { } // Do executes the operation. -func (s *CountService) Do() (int64, error) { - return s.DoC(nil) -} - -func (s *CountService) DoC(ctx context.Context) (int64, error) { +func (s *CountService) Do(ctx context.Context) (int64, error) { // Check pre-conditions if err := s.Validate(); err != nil { return 0, err @@ -291,7 +287,7 @@ func (s *CountService) DoC(ctx context.Context) (int64, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, body) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return 0, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/count_test.go b/vendor/gopkg.in/olivere/elastic.v3/count_test.go index bfc2a2955..c4703343e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/count_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/count_test.go @@ -1,10 +1,14 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestCountURL(t *testing.T) { client := setupTestClientAndCreateIndex(t) @@ -55,28 +59,28 @@ func TestCount(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Count documents - count, err := client.Count(testIndexName).Do() + count, err := client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -85,7 +89,7 @@ func TestCount(t *testing.T) { } // Count documents - count, err = client.Count(testIndexName).Type("tweet").Do() + count, err = client.Count(testIndexName).Type("tweet").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -94,7 +98,7 @@ func TestCount(t *testing.T) { } // Count documents - count, err = client.Count(testIndexName).Type("gezwitscher").Do() + count, err = client.Count(testIndexName).Type("gezwitscher").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -104,7 +108,7 @@ func TestCount(t *testing.T) { // Count with query query := NewTermQuery("user", "olivere") - count, err = client.Count(testIndexName).Query(query).Do() + count, err = client.Count(testIndexName).Query(query).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -114,7 +118,7 @@ func TestCount(t *testing.T) { // Count with query and type query = NewTermQuery("user", "olivere") - count, err = client.Count(testIndexName).Type("tweet").Query(query).Do() + count, err = client.Count(testIndexName).Type("tweet").Query(query).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/decoder.go b/vendor/gopkg.in/olivere/elastic.v3/decoder.go index 765a5be30..9cd2cf720 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/decoder.go +++ b/vendor/gopkg.in/olivere/elastic.v3/decoder.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/decoder_test.go b/vendor/gopkg.in/olivere/elastic.v3/decoder_test.go index 832b8dddc..507cae819 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/decoder_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/decoder_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -9,6 +9,8 @@ import ( "encoding/json" "sync/atomic" "testing" + + "golang.org/x/net/context" ) type decoder struct { @@ -36,7 +38,7 @@ func TestDecoder(t *testing.T) { Type("tweet"). Id("1"). BodyJson(&tweet). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/delete.go b/vendor/gopkg.in/olivere/elastic.v3/delete.go index 7adb11e3a..c49c9f5d5 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/delete.go +++ b/vendor/gopkg.in/olivere/elastic.v3/delete.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,28 +10,27 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // DeleteService allows to delete a typed JSON document from a specified // index based on its id. // -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-delete.html // for details. type DeleteService struct { - client *Client - pretty bool - id string - index string - typ string - routing string - timeout string - version interface{} - versionType string - consistency string - parent string - refresh *bool - replication string + client *Client + pretty bool + id string + index string + typ string + routing string + timeout string + version interface{} + versionType string + waitForActiveShards string + parent string + refresh string } // NewDeleteService creates a new DeleteService. @@ -59,12 +58,6 @@ func (s *DeleteService) Index(index string) *DeleteService { return s } -// Replication specifies a replication type. -func (s *DeleteService) Replication(replication string) *DeleteService { - s.replication = replication - return s -} - // Routing is a specific routing value. func (s *DeleteService) Routing(routing string) *DeleteService { s.routing = routing @@ -89,9 +82,13 @@ func (s *DeleteService) VersionType(versionType string) *DeleteService { return s } -// Consistency defines a specific write consistency setting for the operation. -func (s *DeleteService) Consistency(consistency string) *DeleteService { - s.consistency = consistency +// WaitForActiveShards sets the number of shard copies that must be active +// before proceeding with the delete operation. Defaults to 1, meaning the +// primary shard only. Set to `all` for all shard copies, otherwise set to +// any non-negative value less than or equal to the total number of copies +// for the shard (number of replicas + 1). +func (s *DeleteService) WaitForActiveShards(waitForActiveShards string) *DeleteService { + s.waitForActiveShards = waitForActiveShards return s } @@ -102,8 +99,8 @@ func (s *DeleteService) Parent(parent string) *DeleteService { } // Refresh the index after performing the operation. -func (s *DeleteService) Refresh(refresh bool) *DeleteService { - s.refresh = &refresh +func (s *DeleteService) Refresh(refresh string) *DeleteService { + s.refresh = refresh return s } @@ -130,11 +127,8 @@ func (s *DeleteService) buildURL() (string, url.Values, error) { if s.pretty { params.Set("pretty", "1") } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) - } - if s.replication != "" { - params.Set("replication", s.replication) + if s.refresh != "" { + params.Set("refresh", s.refresh) } if s.routing != "" { params.Set("routing", s.routing) @@ -148,8 +142,8 @@ func (s *DeleteService) buildURL() (string, url.Values, error) { if s.versionType != "" { params.Set("version_type", s.versionType) } - if s.consistency != "" { - params.Set("consistency", s.consistency) + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) } if s.parent != "" { params.Set("parent", s.parent) @@ -176,12 +170,7 @@ func (s *DeleteService) Validate() error { } // Do executes the operation. -func (s *DeleteService) Do() (*DeleteResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *DeleteService) DoC(ctx context.Context) (*DeleteResponse, error) { +func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -194,7 +183,7 @@ func (s *DeleteService) DoC(ctx context.Context) (*DeleteResponse, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "DELETE", path, params, nil) + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/delete_by_query.go b/vendor/gopkg.in/olivere/elastic.v3/delete_by_query.go index 7e6347ec9..a8d8028ab 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/delete_by_query.go +++ b/vendor/gopkg.in/olivere/elastic.v3/delete_by_query.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,28 +11,58 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // DeleteByQueryService deletes documents that match a query. // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html. type DeleteByQueryService struct { - client *Client - indices []string - types []string - analyzer string - consistency string - defaultOper string - df string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - replication string - routing string - timeout string - pretty bool - q string - query Query + client *Client + index []string + typ []string + query Query + body interface{} + xSource []string + xSourceExclude []string + xSourceInclude []string + analyzer string + analyzeWildcard *bool + allowNoIndices *bool + conflicts string + defaultOperator string + df string + docvalueFields []string + expandWildcards string + explain *bool + from *int + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + preference string + q string + refresh string + requestCache *bool + requestsPerSecond *int + routing []string + scroll string + scrollSize *int + searchTimeout string + searchType string + size *int + sort []string + stats []string + storedFields []string + suggestField string + suggestMode string + suggestSize *int + suggestText string + terminateAfter *int + timeout string + trackScores *bool + version *bool + waitForActiveShards string + waitForCompletion *bool + pretty bool } // NewDeleteByQueryService creates a new DeleteByQueryService. @@ -46,20 +76,33 @@ func NewDeleteByQueryService(client *Client) *DeleteByQueryService { } // Index sets the indices on which to perform the delete operation. -func (s *DeleteByQueryService) Index(indices ...string) *DeleteByQueryService { - if s.indices == nil { - s.indices = make([]string, 0) - } - s.indices = append(s.indices, indices...) +func (s *DeleteByQueryService) Index(index ...string) *DeleteByQueryService { + s.index = append(s.index, index...) return s } // Type limits the delete operation to the given types. -func (s *DeleteByQueryService) Type(types ...string) *DeleteByQueryService { - if s.types == nil { - s.types = make([]string, 0) - } - s.types = append(s.types, types...) +func (s *DeleteByQueryService) Type(typ ...string) *DeleteByQueryService { + s.typ = append(s.typ, typ...) + return s +} + +// XSource is true or false to return the _source field or not, +// or a list of fields to return. +func (s *DeleteByQueryService) XSource(xSource ...string) *DeleteByQueryService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// XSourceExclude represents a list of fields to exclude from the returned _source field. +func (s *DeleteByQueryService) XSourceExclude(xSourceExclude ...string) *DeleteByQueryService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// XSourceInclude represents a list of fields to extract and return from the _source field. +func (s *DeleteByQueryService) XSourceInclude(xSourceInclude ...string) *DeleteByQueryService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) return s } @@ -69,16 +112,45 @@ func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService { return s } -// Consistency represents the specific write consistency setting for the operation. -// It can be one, quorum, or all. -func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService { - s.consistency = consistency +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *DeleteByQueryService) AnalyzeWildcard(analyzeWildcard bool) *DeleteByQueryService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices (including the _all string +// or when no indices have been specified). +func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService { + s.allowNoIndices = &allow + return s +} + +// Conflicts indicates what to do when the process detects version conflicts. +// Possible values are "proceed" and "abort". +func (s *DeleteByQueryService) Conflicts(conflicts string) *DeleteByQueryService { + s.conflicts = conflicts + return s +} + +// AbortOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("abort"). +func (s *DeleteByQueryService) AbortOnVersionConflict() *DeleteByQueryService { + s.conflicts = "abort" + return s +} + +// ProceedOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("proceed"). +func (s *DeleteByQueryService) ProceedOnVersionConflict() *DeleteByQueryService { + s.conflicts = "proceed" return s } // DefaultOperator for query string query (AND or OR). func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService { - s.defaultOper = defaultOperator + s.defaultOperator = defaultOperator return s } @@ -95,6 +167,32 @@ func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryS return s } +// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit. +func (s *DeleteByQueryService) DocvalueFields(docvalueFields ...string) *DeleteByQueryService { + s.docvalueFields = docvalueFields + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. It can be "open" or "closed". +func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService { + s.expandWildcards = expand + return s +} + +// Explain specifies whether to return detailed information about score +// computation as part of a hit. +func (s *DeleteByQueryService) Explain(explain bool) *DeleteByQueryService { + s.explain = &explain + return s +} + +// From is the starting offset (default: 0). +func (s *DeleteByQueryService) From(from int) *DeleteByQueryService { + s.from = &from + return s +} + // IgnoreUnavailable indicates whether specified concrete indices should be // ignored when unavailable (missing or closed). func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService { @@ -102,24 +200,23 @@ func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryServ return s } -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices (including the _all string -// or when no indices have been specified). -func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService { - s.allowNoIndices = &allow +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *DeleteByQueryService) Lenient(lenient bool) *DeleteByQueryService { + s.lenient = &lenient return s } -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. It can be "open" or "closed". -func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService { - s.expandWildcards = expand +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *DeleteByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *DeleteByQueryService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms return s } -// Replication sets a specific replication type (sync or async). -func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService { - s.replication = replication +// Preference specifies the node or shard the operation should be performed on +// (default: random). +func (s *DeleteByQueryService) Preference(preference string) *DeleteByQueryService { + s.preference = preference return s } @@ -137,172 +234,416 @@ func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService { return s } -// Routing sets a specific routing value. -func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService { - s.routing = routing +// Query sets the query programmatically. +func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService { + s.query = query + return s +} + +// Refresh indicates whether the effected indexes should be refreshed. +func (s *DeleteByQueryService) Refresh(refresh string) *DeleteByQueryService { + s.refresh = refresh + return s +} + +// RequestCache specifies if request cache should be used for this request +// or not, defaults to index level setting. +func (s *DeleteByQueryService) RequestCache(requestCache bool) *DeleteByQueryService { + s.requestCache = &requestCache + return s +} + +// RequestsPerSecond sets the throttle on this request in sub-requests per second. +// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. +func (s *DeleteByQueryService) RequestsPerSecond(requestsPerSecond int) *DeleteByQueryService { + s.requestsPerSecond = &requestsPerSecond + return s +} + +// Routing is a list of specific routing values. +func (s *DeleteByQueryService) Routing(routing ...string) *DeleteByQueryService { + s.routing = append(s.routing, routing...) + return s +} + +// Scroll specifies how long a consistent view of the index should be maintained +// for scrolled search. +func (s *DeleteByQueryService) Scroll(scroll string) *DeleteByQueryService { + s.scroll = scroll + return s +} + +// ScrollSize is the size on the scroll request powering the update_by_query. +func (s *DeleteByQueryService) ScrollSize(scrollSize int) *DeleteByQueryService { + s.scrollSize = &scrollSize return s } -// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms". +// SearchTimeout defines an explicit timeout for each search request. +// Defaults to no timeout. +func (s *DeleteByQueryService) SearchTimeout(searchTimeout string) *DeleteByQueryService { + s.searchTimeout = searchTimeout + return s +} + +// SearchType is the search operation type. Possible values are +// "query_then_fetch" and "dfs_query_then_fetch". +func (s *DeleteByQueryService) SearchType(searchType string) *DeleteByQueryService { + s.searchType = searchType + return s +} + +// Size represents the number of hits to return (default: 10). +func (s *DeleteByQueryService) Size(size int) *DeleteByQueryService { + s.size = &size + return s +} + +// Sort is a list of : pairs. +func (s *DeleteByQueryService) Sort(sort ...string) *DeleteByQueryService { + s.sort = append(s.sort, sort...) + return s +} + +// SortByField adds a sort order. +func (s *DeleteByQueryService) SortByField(field string, ascending bool) *DeleteByQueryService { + if ascending { + s.sort = append(s.sort, fmt.Sprintf("%s:asc", field)) + } else { + s.sort = append(s.sort, fmt.Sprintf("%s:desc", field)) + } + return s +} + +// Stats specifies specific tag(s) of the request for logging and statistical purposes. +func (s *DeleteByQueryService) Stats(stats ...string) *DeleteByQueryService { + s.stats = append(s.stats, stats...) + return s +} + +// StoredFields specifies the list of stored fields to return as part of a hit. +func (s *DeleteByQueryService) StoredFields(storedFields ...string) *DeleteByQueryService { + s.storedFields = storedFields + return s +} + +// SuggestField specifies which field to use for suggestions. +func (s *DeleteByQueryService) SuggestField(suggestField string) *DeleteByQueryService { + s.suggestField = suggestField + return s +} + +// SuggestMode specifies the suggest mode. Possible values are +// "missing", "popular", and "always". +func (s *DeleteByQueryService) SuggestMode(suggestMode string) *DeleteByQueryService { + s.suggestMode = suggestMode + return s +} + +// SuggestSize specifies how many suggestions to return in response. +func (s *DeleteByQueryService) SuggestSize(suggestSize int) *DeleteByQueryService { + s.suggestSize = &suggestSize + return s +} + +// SuggestText specifies the source text for which the suggestions should be returned. +func (s *DeleteByQueryService) SuggestText(suggestText string) *DeleteByQueryService { + s.suggestText = suggestText + return s +} + +// TerminateAfter indicates the maximum number of documents to collect +// for each shard, upon reaching which the query execution will terminate early. +func (s *DeleteByQueryService) TerminateAfter(terminateAfter int) *DeleteByQueryService { + s.terminateAfter = &terminateAfter + return s +} + +// Timeout is the time each individual bulk request should wait for shards +// that are unavailable. func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService { s.timeout = timeout return s } -// Pretty indents the JSON output from Elasticsearch. -func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService { - s.pretty = pretty +// TimeoutInMillis sets the timeout in milliseconds. +func (s *DeleteByQueryService) TimeoutInMillis(timeoutInMillis int) *DeleteByQueryService { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) return s } -// Query sets the query programmatically. -func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService { - s.query = query +// TrackScores indicates whether to calculate and return scores even if +// they are not used for sorting. +func (s *DeleteByQueryService) TrackScores(trackScores bool) *DeleteByQueryService { + s.trackScores = &trackScores return s } -// Do executes the delete-by-query operation. -func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) { - return s.DoC(nil) +// Version specifies whether to return document version as part of a hit. +func (s *DeleteByQueryService) Version(version bool) *DeleteByQueryService { + s.version = &version + return s } -// DoC executes the delete-by-query operation. -func (s *DeleteByQueryService) DoC(ctx context.Context) (*DeleteByQueryResult, error) { - var err error +// WaitForActiveShards sets the number of shard copies that must be active before proceeding +// with the update by query operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal +// to the total number of copies for the shard (number of replicas + 1). +func (s *DeleteByQueryService) WaitForActiveShards(waitForActiveShards string) *DeleteByQueryService { + s.waitForActiveShards = waitForActiveShards + return s +} - // Build url - path := "/" +// WaitForCompletion indicates if the request should block until the reindex is complete. +func (s *DeleteByQueryService) WaitForCompletion(waitForCompletion bool) *DeleteByQueryService { + s.waitForCompletion = &waitForCompletion + return s +} + +// Pretty indents the JSON output from Elasticsearch. +func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService { + s.pretty = pretty + return s +} + +// Body specifies the body of the request. It overrides data being specified via SearchService. +func (s *DeleteByQueryService) Body(body string) *DeleteByQueryService { + s.body = body + return s +} - // Indices part - var indexPart []string - for _, index := range s.indices { - index, err = uritemplates.Expand("{index}", map[string]string{ - "index": index, +// buildURL builds the URL for the operation. +func (s *DeleteByQueryService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_delete_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else { + path, err = uritemplates.Expand("/{index}/_delete_by_query", map[string]string{ + "index": strings.Join(s.index, ","), }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) } - if len(indexPart) > 0 { - path += strings.Join(indexPart, ",") + if err != nil { + return "", url.Values{}, err } - // Types part - var typesPart []string - for _, typ := range s.types { - typ, err = uritemplates.Expand("{type}", map[string]string{ - "type": typ, - }) - if err != nil { - return nil, err - } - typesPart = append(typesPart, typ) + // Add query string parameters + params := url.Values{} + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) } - if len(typesPart) > 0 { - path += "/" + strings.Join(typesPart, ",") + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) } - - // Search - path += "/_query" - - // Parameters - params := make(url.Values) if s.analyzer != "" { params.Set("analyzer", s.analyzer) } - if s.consistency != "" { - params.Set("consistency", s.consistency) + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) } - if s.defaultOper != "" { - params.Set("default_operator", s.defaultOper) + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) } if s.df != "" { params.Set("df", s.df) } + if s.explain != nil { + params.Set("explain", fmt.Sprintf("%v", *s.explain)) + } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) + } + if len(s.docvalueFields) > 0 { + params.Set("docvalue_fields", strings.Join(s.docvalueFields, ",")) + } + if s.from != nil { + params.Set("from", fmt.Sprintf("%d", *s.from)) + } if s.ignoreUnavailable != nil { params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) } if s.allowNoIndices != nil { params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) } + if s.conflicts != "" { + params.Set("conflicts", s.conflicts) + } if s.expandWildcards != "" { params.Set("expand_wildcards", s.expandWildcards) } - if s.replication != "" { - params.Set("replication", s.replication) + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if len(s.routing) > 0 { + params.Set("routing", strings.Join(s.routing, ",")) + } + if s.scroll != "" { + params.Set("scroll", s.scroll) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) } - if s.routing != "" { - params.Set("routing", s.routing) + if s.searchTimeout != "" { + params.Set("search_timeout", s.searchTimeout) + } + if s.size != nil { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if len(s.sort) > 0 { + params.Set("sort", strings.Join(s.sort, ",")) + } + if s.terminateAfter != nil { + params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter)) + } + if len(s.stats) > 0 { + params.Set("stats", strings.Join(s.stats, ",")) + } + if s.suggestField != "" { + params.Set("suggest_field", s.suggestField) + } + if s.suggestMode != "" { + params.Set("suggest_mode", s.suggestMode) + } + if s.suggestSize != nil { + params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize)) + } + if s.suggestText != "" { + params.Set("suggest_text", s.suggestText) } if s.timeout != "" { params.Set("timeout", s.timeout) } + if s.trackScores != nil { + params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", *s.version)) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.scrollSize != nil { + params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize)) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + if s.requestsPerSecond != nil { + params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) + } if s.pretty { params.Set("pretty", fmt.Sprintf("%v", s.pretty)) } - if s.q != "" { - params.Set("q", s.q) + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteByQueryService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the delete-by-query operation. +func (s *DeleteByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err } // Set body if there is a query set var body interface{} - if s.query != nil { + if s.body != nil { + body = s.body + } else if s.query != nil { src, err := s.query.Source() if err != nil { return nil, err } - query := make(map[string]interface{}) - query["query"] = src - body = query + body = map[string]interface{}{ + "query": src, + } } // Get response - res, err := s.client.PerformRequestC(ctx, "DELETE", path, params, body) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return nil, err } // Return result - ret := new(DeleteByQueryResult) + ret := new(BulkIndexByScrollResponse) if err := s.client.decoder.Decode(res.Body, ret); err != nil { return nil, err } return ret, nil } -// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService. -type DeleteByQueryResult struct { - Took int64 `json:"took"` - TimedOut bool `json:"timed_out"` - Indices map[string]IndexDeleteByQueryResult `json:"_indices"` - Failures []shardOperationFailure `json:"failures"` -} - -// IndexNames returns the names of the indices the DeleteByQuery touched. -func (res DeleteByQueryResult) IndexNames() []string { - var indices []string - for index := range res.Indices { - indices = append(indices, index) - } - return indices -} - -// All returns the index delete-by-query result of all indices. -func (res DeleteByQueryResult) All() IndexDeleteByQueryResult { - all, _ := res.Indices["_all"] - return all +// BulkIndexByScrollResponse is the outcome of executing Do with +// DeleteByQueryService and UpdateByQueryService. +type BulkIndexByScrollResponse struct { + Took int64 `json:"took"` + TimedOut bool `json:"timed_out"` + Total int64 `json:"total"` + Updated int64 `json:"updated"` + Created int64 `json:"created"` + Deleted int64 `json:"deleted"` + Batches int64 `json:"batches"` + VersionConflicts int64 `json:"version_conflicts"` + Noops int64 `json:"noops"` + Retries struct { + Bulk int64 `json:"bulk"` + Search int64 `json:"search"` + } `json:"retries"` + Throttled string `json:"throttled"` + ThrottledMillis int64 `json:"throttled_millis"` + RequestsPerSecond float64 `json:"requests_per_second"` + Canceled string `json:"canceled"` + ThrottledUntil string `json:"throttled_until"` + ThrottledUntilMillis int64 `json:"throttled_until_millis"` + Failures []bulkIndexByScrollResponseFailure `json:"failures"` } -// IndexDeleteByQueryResult is the result of a delete-by-query for a specific -// index. -type IndexDeleteByQueryResult struct { - // Found documents, matching the query. - Found int `json:"found"` - // Deleted documents, successfully, from the given index. - Deleted int `json:"deleted"` - // Missing documents when trying to delete them. - Missing int `json:"missing"` - // Failed documents to be deleted for the given index. - Failed int `json:"failed"` +type bulkIndexByScrollResponseFailure struct { + Index string `json:"index,omitempty"` + Type string `json:"type,omitempty"` + Id string `json:"id,omitempty"` + Status int `json:"status,omitempty"` + Shard int `json:"shard,omitempty"` + Node int `json:"node,omitempty"` + // TOOD "cause" contains exception details + // TOOD "reason" contains exception details } diff --git a/vendor/gopkg.in/olivere/elastic.v3/delete_by_query_test.go b/vendor/gopkg.in/olivere/elastic.v3/delete_by_query_test.go index 71b786f6e..b829a7e09 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/delete_by_query_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/delete_by_query_test.go @@ -1,50 +1,115 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" -func TestDeleteByQuery(t *testing.T) { - client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + "golang.org/x/net/context" +) - found, err := client.HasPlugin("delete-by-query") - if err != nil { - t.Fatal(err) +func TestDeleteByQueryBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Types []string + Expected string + ExpectErr bool + }{ + { + []string{}, + []string{}, + "", + true, + }, + { + []string{"index1"}, + []string{}, + "/index1/_delete_by_query", + false, + }, + { + []string{"index1", "index2"}, + []string{}, + "/index1%2Cindex2/_delete_by_query", + false, + }, + { + []string{}, + []string{"type1"}, + "", + true, + }, + { + []string{"index1"}, + []string{"type1"}, + "/index1/type1/_delete_by_query", + false, + }, + { + []string{"index1", "index2"}, + []string{"type1", "type2"}, + "/index1%2Cindex2/type1%2Ctype2/_delete_by_query", + false, + }, } - if !found { - t.Skip("DeleteByQuery in 2.0 is now a plugin (delete-by-query) and must be " + - "loaded in the configuration") + + for i, test := range tests { + builder := client.DeleteByQuery().Index(test.Indices...).Type(test.Types...) + err := builder.Validate() + if err != nil { + if !test.ExpectErr { + t.Errorf("case #%d: %v", i+1, err) + continue + } + } else { + // err == nil + if test.ExpectErr { + t.Errorf("case #%d: expected error", i+1) + continue + } + path, _, _ := builder.buildURL() + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } } +} + +func TestDeleteByQuery(t *testing.T) { + // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Count documents - count, err := client.Count(testIndexName).Do() + count, err := client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -54,7 +119,12 @@ func TestDeleteByQuery(t *testing.T) { // Delete all documents by sandrae q := NewTermQuery("user", "sandrae") - res, err := client.DeleteByQuery().Index(testIndexName).Type("tweet").Query(q).Do() + res, err := client.DeleteByQuery(). + Index(testIndexName). + Type("tweet"). + Query(q). + Pretty(true). + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -62,49 +132,12 @@ func TestDeleteByQuery(t *testing.T) { t.Fatalf("expected response != nil; got: %v", res) } - // Check response - if got, want := len(res.IndexNames()), 2; got != want { - t.Fatalf("expected %d indices; got: %d", want, got) - } - idx, found := res.Indices["_all"] - if !found { - t.Fatalf("expected to find index %q", "_all") - } - if got, want := idx.Found, 1; got != want { - t.Fatalf("expected Found = %v; got: %v", want, got) - } - if got, want := idx.Deleted, 1; got != want { - t.Fatalf("expected Deleted = %v; got: %v", want, got) - } - if got, want := idx.Missing, 0; got != want { - t.Fatalf("expected Missing = %v; got: %v", want, got) - } - if got, want := idx.Failed, 0; got != want { - t.Fatalf("expected Failed = %v; got: %v", want, got) - } - idx, found = res.Indices[testIndexName] - if !found { - t.Errorf("expected Found = true; got: %v", found) - } - if got, want := idx.Found, 1; got != want { - t.Fatalf("expected Found = %v; got: %v", want, got) - } - if got, want := idx.Deleted, 1; got != want { - t.Fatalf("expected Deleted = %v; got: %v", want, got) - } - if got, want := idx.Missing, 0; got != want { - t.Fatalf("expected Missing = %v; got: %v", want, got) - } - if got, want := idx.Failed, 0; got != want { - t.Fatalf("expected Failed = %v; got: %v", want, got) - } - // Flush and check count - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } - count, err = client.Count(testIndexName).Do() + count, err = client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/delete_template.go b/vendor/gopkg.in/olivere/elastic.v3/delete_template.go index 7d35fe990..a7ec9844e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/delete_template.go +++ b/vendor/gopkg.in/olivere/elastic.v3/delete_template.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,7 +10,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // DeleteTemplateService deletes a search template. More information can @@ -83,12 +83,7 @@ func (s *DeleteTemplateService) Validate() error { } // Do executes the operation. -func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *DeleteTemplateService) DoC(ctx context.Context) (*DeleteTemplateResponse, error) { +func (s *DeleteTemplateService) Do(ctx context.Context) (*AcknowledgedResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -101,24 +96,15 @@ func (s *DeleteTemplateService) DoC(ctx context.Context) (*DeleteTemplateRespons } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "DELETE", path, params, nil) + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) if err != nil { return nil, err } // Return operation response - ret := new(DeleteTemplateResponse) + ret := new(AcknowledgedResponse) if err := s.client.decoder.Decode(res.Body, ret); err != nil { return nil, err } return ret, nil } - -// DeleteTemplateResponse is the response of DeleteTemplateService.Do. -type DeleteTemplateResponse struct { - Found bool `json:"found"` - Index string `json:"_index"` - Type string `json:"_type"` - Id string `json:"_id"` - Version int `json:"_version"` -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/delete_template_test.go b/vendor/gopkg.in/olivere/elastic.v3/delete_template_test.go index 85bb7ad55..c0fe8f0cd 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/delete_template_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/delete_template_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,13 +6,15 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestDeleteTemplateValidate(t *testing.T) { client := setupTestClient(t) // No template id -> fail with error - res, err := NewDeleteTemplateService(client).Do() + res, err := NewDeleteTemplateService(client).Do(context.TODO()) if err == nil { t.Fatalf("expected Delete to fail without index name") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/delete_test.go b/vendor/gopkg.in/olivere/elastic.v3/delete_test.go index 418fdec7d..fd95f49d2 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/delete_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/delete_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,6 +6,8 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestDelete(t *testing.T) { @@ -16,28 +18,28 @@ func TestDelete(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Count documents - count, err := client.Count(testIndexName).Do() + count, err := client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -46,18 +48,18 @@ func TestDelete(t *testing.T) { } // Delete document 1 - res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do() + res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) if err != nil { t.Fatal(err) } if res.Found != true { t.Errorf("expected Found = true; got %v", res.Found) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } - count, err = client.Count(testIndexName).Do() + count, err = client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -66,7 +68,7 @@ func TestDelete(t *testing.T) { } // Delete non existent document 99 - res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh(true).Do() + res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh("true").Do(context.TODO()) if err == nil { t.Fatalf("expected error; got: %v", err) } @@ -77,7 +79,7 @@ func TestDelete(t *testing.T) { t.Fatalf("expected no response; got: %v", res) } - count, err = client.Count(testIndexName).Do() + count, err = client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -90,7 +92,7 @@ func TestDeleteValidate(t *testing.T) { client := setupTestClientAndCreateIndexAndAddDocs(t) // No index name -> fail with error - res, err := NewDeleteService(client).Type("tweet").Id("1").Do() + res, err := NewDeleteService(client).Type("tweet").Id("1").Do(context.TODO()) if err == nil { t.Fatalf("expected Delete to fail without index name") } @@ -99,7 +101,7 @@ func TestDeleteValidate(t *testing.T) { } // No type -> fail with error - res, err = NewDeleteService(client).Index(testIndexName).Id("1").Do() + res, err = NewDeleteService(client).Index(testIndexName).Id("1").Do(context.TODO()) if err == nil { t.Fatalf("expected Delete to fail without type") } @@ -108,7 +110,7 @@ func TestDeleteValidate(t *testing.T) { } // No id -> fail with error - res, err = NewDeleteService(client).Index(testIndexName).Type("tweet").Do() + res, err = NewDeleteService(client).Index(testIndexName).Type("tweet").Do(context.TODO()) if err == nil { t.Fatalf("expected Delete to fail without id") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/doc.go b/vendor/gopkg.in/olivere/elastic.v3/doc.go index 336a734de..38e9980f7 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/doc.go +++ b/vendor/gopkg.in/olivere/elastic.v3/doc.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/errors.go b/vendor/gopkg.in/olivere/elastic.v3/errors.go index 7694f7ac7..009123531 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/errors.go +++ b/vendor/gopkg.in/olivere/elastic.v3/errors.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -84,8 +84,9 @@ type ErrorDetails struct { func (e *Error) Error() string { if e.Details != nil && e.Details.Reason != "" { return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type) + } else { + return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status)) } - return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status)) } // IsNotFound returns true if the given error indicates that Elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v3/example_test.go b/vendor/gopkg.in/olivere/elastic.v3/example_test.go index 429b4cdec..540b9bccf 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/example_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/example_test.go @@ -7,16 +7,14 @@ package elastic_test import ( "encoding/json" "fmt" - "io" "log" "os" "reflect" "time" "golang.org/x/net/context" - "golang.org/x/sync/errgroup" - elastic "gopkg.in/olivere/elastic.v3" + elastic "gopkg.in/olivere/elastic.v5" ) type Tweet struct { @@ -44,7 +42,7 @@ func Example() { //client.SetTracer(log.New(os.Stdout, "", 0)) // Ping the Elasticsearch server to get e.g. the version number - info, code, err := client.Ping("http://127.0.0.1:9200").Do() + info, code, err := client.Ping("http://127.0.0.1:9200").Do(context.Background()) if err != nil { // Handle error panic(err) @@ -60,14 +58,14 @@ func Example() { fmt.Printf("Elasticsearch version %s", esversion) // Use the IndexExists service to check if a specified index exists. - exists, err := client.IndexExists("twitter").Do() + exists, err := client.IndexExists("twitter").Do(context.Background()) if err != nil { // Handle error panic(err) } if !exists { // Create a new index. - createIndex, err := client.CreateIndex("twitter").Do() + createIndex, err := client.CreateIndex("twitter").Do(context.Background()) if err != nil { // Handle error panic(err) @@ -84,7 +82,7 @@ func Example() { Type("tweet"). Id("1"). BodyJson(tweet1). - Do() + Do(context.Background()) if err != nil { // Handle error panic(err) @@ -98,7 +96,7 @@ func Example() { Type("tweet"). Id("2"). BodyString(tweet2). - Do() + Do(context.Background()) if err != nil { // Handle error panic(err) @@ -110,7 +108,7 @@ func Example() { Index("twitter"). Type("tweet"). Id("1"). - Do() + Do(context.Background()) if err != nil { // Handle error panic(err) @@ -120,7 +118,7 @@ func Example() { } // Flush to make sure the documents got written. - _, err = client.Flush().Index("twitter").Do() + _, err = client.Flush().Index("twitter").Do(context.Background()) if err != nil { panic(err) } @@ -128,12 +126,12 @@ func Example() { // Search with a term query termQuery := elastic.NewTermQuery("user", "olivere") searchResult, err := client.Search(). - Index("twitter"). // search in index "twitter" - Query(termQuery). // specify the query - Sort("user", true). // sort by "user" field, ascending - From(0).Size(10). // take documents 0-9 - Pretty(true). // pretty print request and response JSON - Do() // execute + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do(context.Background()) // execute if err != nil { // Handle error panic(err) @@ -184,7 +182,7 @@ func Example() { update, err := client.Update().Index("twitter").Type("tweet").Id("1"). Script(script). Upsert(map[string]interface{}{"retweets": 0}). - Do() + Do(context.Background()) if err != nil { // Handle error panic(err) @@ -194,7 +192,7 @@ func Example() { // ... // Delete an index. - deleteIndex, err := client.DeleteIndex("twitter").Do() + deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background()) if err != nil { // Handle error panic(err) @@ -257,7 +255,7 @@ func ExampleIndexExistsService() { panic(err) } // Use the IndexExists service to check if the index "twitter" exists. - exists, err := client.IndexExists("twitter").Do() + exists, err := client.IndexExists("twitter").Do(context.Background()) if err != nil { // Handle error panic(err) @@ -275,7 +273,7 @@ func ExampleCreateIndexService() { panic(err) } // Create a new index. - createIndex, err := client.CreateIndex("twitter").Do() + createIndex, err := client.CreateIndex("twitter").Do(context.Background()) if err != nil { // Handle error panic(err) @@ -293,7 +291,7 @@ func ExampleDeleteIndexService() { panic(err) } // Delete an index. - deleteIndex, err := client.DeleteIndex("twitter").Do() + deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background()) if err != nil { // Handle error panic(err) @@ -314,12 +312,12 @@ func ExampleSearchService() { // Search with a term query termQuery := elastic.NewTermQuery("user", "olivere") searchResult, err := client.Search(). - Index("twitter"). // search in index "twitter" - Query(termQuery). // specify the query - Sort("user", true). // sort by "user" field, ascending - From(0).Size(10). // take documents 0-9 - Pretty(true). // pretty print request and response JSON - Do() // execute + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do(context.Background()) // execute if err != nil { // Handle error panic(err) @@ -373,7 +371,7 @@ func ExampleAggregations() { SearchType("count"). // ... do not return hits, just the count Aggregation("timeline", timeline). // add our aggregation to the query Pretty(true). // pretty print request and response JSON - Do() // execute + Do(context.Background()) // execute if err != nil { // Handle error panic(err) @@ -405,7 +403,7 @@ func ExampleSearchResult() { } // Do a search - searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do() + searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do(context.Background()) if err != nil { panic(err) } @@ -449,92 +447,6 @@ func ExampleSearchResult() { } } -func ExampleScrollService() { - client, err := elastic.NewClient() - if err != nil { - panic(err) - } - - // This example illustrates how to use two goroutines to iterate - // through a result set via ScrollService. - // - // It uses the excellent golang.org/x/sync/errgroup package to do so. - // - // The first goroutine will Scroll through the result set and send - // individual results to a channel. - // - // The second goroutine will receive results from the channel and - // deserialize them. - // - // Feel free to add a third goroutine to do something with the - // deserialized results from the 2nd goroutine. - // - // Let's go. - - // 1st goroutine sends individual hits to channel. - hits := make(chan json.RawMessage) - g, ctx := errgroup.WithContext(context.Background()) - g.Go(func() error { - defer close(hits) - scroll := client.Scroll("twitter").Size(100) - for { - results, err := scroll.Do() - if err == io.EOF { - return nil // all results retrieved - } - if err != nil { - return err // something went wrong - } - - // Send the hits to the hits channel - for _, hit := range results.Hits.Hits { - hits <- *hit.Source - } - - // Check if we need to terminate early - select { - default: - case <-ctx.Done(): - return ctx.Err() - } - } - }) - - // 2nd goroutine receives hits and deserializes them. - // - // If you want, setup a number of goroutines handling deserialization in parallel. - g.Go(func() error { - for hit := range hits { - // Deserialize - var tw Tweet - err := json.Unmarshal(hit, &tw) - if err != nil { - return err - } - - // Do something with the tweet here, e.g. send it to another channel - // for further processing. - _ = tw - - // Terminate early? - select { - default: - case <-ctx.Done(): - return ctx.Err() - } - } - return nil - }) - - // Check whether any goroutines failed. - if err := g.Wait(); err != nil { - panic(err) - } - - // Done. - fmt.Print("Successfully processed tweets in parallel via ScrollService.\n") -} - func ExamplePutTemplateService() { client, err := elastic.NewClient() if err != nil { @@ -548,12 +460,12 @@ func ExamplePutTemplateService() { resp, err := client.PutTemplate(). Id("my-search-template"). // Name of the template BodyString(tmpl). // Search template itself - Do() // Execute + Do(context.Background()) // Execute if err != nil { panic(err) } - if resp.Created { - fmt.Println("search template created") + if resp.Acknowledged { + fmt.Println("search template creation acknowledged") } } @@ -564,7 +476,7 @@ func ExampleGetTemplateService() { } // Get template stored under "my-search-template" - resp, err := client.GetTemplate().Id("my-search-template").Do() + resp, err := client.GetTemplate().Id("my-search-template").Do(context.Background()) if err != nil { panic(err) } @@ -578,11 +490,11 @@ func ExampleDeleteTemplateService() { } // Delete template - resp, err := client.DeleteTemplate().Id("my-search-template").Do() + resp, err := client.DeleteTemplate().Id("my-search-template").Do(context.Background()) if err != nil { panic(err) } - if resp != nil && resp.Found { + if resp != nil && resp.Acknowledged { fmt.Println("template deleted") } } @@ -594,7 +506,7 @@ func ExampleClusterHealthService() { } // Get cluster health - res, err := client.ClusterHealth().Index("twitter").Do() + res, err := client.ClusterHealth().Index("twitter").Do(context.Background()) if err != nil { panic(err) } @@ -611,7 +523,7 @@ func ExampleClusterHealthService_WaitForGreen() { } // Wait for status green - res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do() + res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do(context.Background()) if err != nil { panic(err) } @@ -629,7 +541,7 @@ func ExampleClusterStateService() { } // Get cluster state - res, err := client.ClusterState().Metric("version").Do() + res, err := client.ClusterState().Metric("version").Do(context.Background()) if err != nil { panic(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/exists.go b/vendor/gopkg.in/olivere/elastic.v3/exists.go index 86b4e7702..c193197b9 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/exists.go +++ b/vendor/gopkg.in/olivere/elastic.v3/exists.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // ExistsService checks for the existence of a document using HEAD. @@ -26,7 +26,7 @@ type ExistsService struct { typ string preference string realtime *bool - refresh *bool + refresh string routing string parent string } @@ -70,8 +70,8 @@ func (s *ExistsService) Realtime(realtime bool) *ExistsService { } // Refresh the shard containing the document before performing the operation. -func (s *ExistsService) Refresh(refresh bool) *ExistsService { - s.refresh = &refresh +func (s *ExistsService) Refresh(refresh string) *ExistsService { + s.refresh = refresh return s } @@ -113,8 +113,8 @@ func (s *ExistsService) buildURL() (string, url.Values, error) { if s.realtime != nil { params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + if s.refresh != "" { + params.Set("refresh", s.refresh) } if s.routing != "" { params.Set("routing", s.routing) @@ -147,12 +147,7 @@ func (s *ExistsService) Validate() error { } // Do executes the operation. -func (s *ExistsService) Do() (bool, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *ExistsService) DoC(ctx context.Context) (bool, error) { +func (s *ExistsService) Do(ctx context.Context) (bool, error) { // Check pre-conditions if err := s.Validate(); err != nil { return false, err @@ -165,7 +160,7 @@ func (s *ExistsService) DoC(ctx context.Context) (bool, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "HEAD", path, params, nil, 404) + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) if err != nil { return false, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/exists_test.go b/vendor/gopkg.in/olivere/elastic.v3/exists_test.go index 58a4fe707..3f6d52bc6 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/exists_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/exists_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,12 +6,14 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestExists(t *testing.T) { client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) - exists, err := client.Exists().Index(testIndexName).Type("comment").Id("1").Parent("tweet").Do() + exists, err := client.Exists().Index(testIndexName).Type("comment").Id("1").Parent("tweet").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -24,7 +26,7 @@ func TestExistsValidate(t *testing.T) { client := setupTestClient(t) // No index -> fail with error - res, err := NewExistsService(client).Type("tweet").Id("1").Do() + res, err := NewExistsService(client).Type("tweet").Id("1").Do(context.TODO()) if err == nil { t.Fatalf("expected Delete to fail without index name") } @@ -33,7 +35,7 @@ func TestExistsValidate(t *testing.T) { } // No type -> fail with error - res, err = NewExistsService(client).Index(testIndexName).Id("1").Do() + res, err = NewExistsService(client).Index(testIndexName).Id("1").Do(context.TODO()) if err == nil { t.Fatalf("expected Delete to fail without index name") } @@ -42,7 +44,7 @@ func TestExistsValidate(t *testing.T) { } // No id -> fail with error - res, err = NewExistsService(client).Index(testIndexName).Type("tweet").Do() + res, err = NewExistsService(client).Index(testIndexName).Type("tweet").Do(context.TODO()) if err == nil { t.Fatalf("expected Delete to fail without index name") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/explain.go b/vendor/gopkg.in/olivere/elastic.v3/explain.go index 3d398f6be..39e252ee4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/explain.go +++ b/vendor/gopkg.in/olivere/elastic.v3/explain.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // ExplainService computes a score explanation for a query and @@ -278,12 +278,7 @@ func (s *ExplainService) Validate() error { } // Do executes the operation. -func (s *ExplainService) Do() (*ExplainResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *ExplainService) DoC(ctx context.Context) (*ExplainResponse, error) { +func (s *ExplainService) Do(ctx context.Context) (*ExplainResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -304,7 +299,7 @@ func (s *ExplainService) DoC(ctx context.Context) (*ExplainResponse, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, body) + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/explain_test.go b/vendor/gopkg.in/olivere/elastic.v3/explain_test.go index e799d6c52..3bae94b26 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/explain_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/explain_test.go @@ -1,10 +1,14 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestExplain(t *testing.T) { client := setupTestClientAndCreateIndex(t) @@ -17,8 +21,8 @@ func TestExplain(t *testing.T) { Type("tweet"). Id("1"). BodyJson(&tweet1). - Refresh(true). - Do() + Refresh("true"). + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -28,7 +32,7 @@ func TestExplain(t *testing.T) { // Explain query := NewTermQuery("user", "olivere") - expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do() + expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context.go b/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context.go index e13c9eb47..59a453c9e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context.go +++ b/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context_test.go b/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context_test.go index 2bb683d69..8c8dd47a5 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/fetch_source_context_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/field_stats.go b/vendor/gopkg.in/olivere/elastic.v3/field_stats.go index d5b011f80..a856dbcc3 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/field_stats.go +++ b/vendor/gopkg.in/olivere/elastic.v3/field_stats.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) const ( @@ -168,12 +168,7 @@ func (s *FieldStatsService) Validate() error { } // Do executes the operation. -func (s *FieldStatsService) Do() (*FieldStatsResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *FieldStatsService) DoC(ctx context.Context) (*FieldStatsResponse, error) { +func (s *FieldStatsService) Do(ctx context.Context) (*FieldStatsResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -194,7 +189,7 @@ func (s *FieldStatsService) DoC(ctx context.Context) (*FieldStatsResponse, error } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, body, http.StatusNotFound) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body, http.StatusNotFound) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/geo_point.go b/vendor/gopkg.in/olivere/elastic.v3/geo_point.go index a09351ca2..fb243671d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/geo_point.go +++ b/vendor/gopkg.in/olivere/elastic.v3/geo_point.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/geo_point_test.go b/vendor/gopkg.in/olivere/elastic.v3/geo_point_test.go index ebc28c2ec..1d085cd38 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/geo_point_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/geo_point_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/get.go b/vendor/gopkg.in/olivere/elastic.v3/get.go index c75d304c3..f2309f5b4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/get.go +++ b/vendor/gopkg.in/olivere/elastic.v3/get.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -12,7 +12,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // GetService allows to get a typed JSON document from the index based @@ -28,8 +28,8 @@ type GetService struct { id string routing string preference string - fields []string - refresh *bool + storedFields []string + refresh string realtime *bool fsc *FetchSourceContext version interface{} @@ -46,17 +46,6 @@ func NewGetService(client *Client) *GetService { } } -/* -// String returns a string representation of the GetService request. -func (s *GetService) String() string { - return fmt.Sprintf("[%v][%v][%v]: routing [%v]", - s.index, - s.typ, - s.id, - s.routing) -} -*/ - // Index is the name of the index. func (s *GetService) Index(index string) *GetService { s.index = index @@ -94,12 +83,9 @@ func (s *GetService) Preference(preference string) *GetService { return s } -// Fields is a list of fields to return in the response. -func (s *GetService) Fields(fields ...string) *GetService { - if s.fields == nil { - s.fields = make([]string, 0) - } - s.fields = append(s.fields, fields...) +// StoredFields is a list of fields to return in the response. +func (s *GetService) StoredFields(storedFields ...string) *GetService { + s.storedFields = append(s.storedFields, storedFields...) return s } @@ -118,8 +104,8 @@ func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) } // Refresh the shard containing the document before performing the operation. -func (s *GetService) Refresh(refresh bool) *GetService { - s.refresh = &refresh +func (s *GetService) Refresh(refresh string) *GetService { + s.refresh = refresh return s } @@ -198,11 +184,11 @@ func (s *GetService) buildURL() (string, url.Values, error) { if s.preference != "" { params.Set("preference", s.preference) } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + if s.refresh != "" { + params.Set("refresh", s.refresh) } if s.version != nil { params.Set("version", fmt.Sprintf("%v", s.version)) @@ -225,12 +211,7 @@ func (s *GetService) buildURL() (string, url.Values, error) { } // Do executes the operation. -func (s *GetService) Do() (*GetResult, error) { - return s.DoC(nil) -} - -// Do executes the operation. -func (s *GetService) DoC(ctx context.Context) (*GetResult, error) { +func (s *GetService) Do(ctx context.Context) (*GetResult, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -243,7 +224,7 @@ func (s *GetService) DoC(ctx context.Context) (*GetResult, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } @@ -260,18 +241,16 @@ func (s *GetService) DoC(ctx context.Context) (*GetResult, error) { // GetResult is the outcome of GetService.Do. type GetResult struct { - Index string `json:"_index"` // index meta field - Type string `json:"_type"` // type meta field - Id string `json:"_id"` // id meta field - Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) - Timestamp int64 `json:"_timestamp"` // timestamp meta field - TTL int64 `json:"_ttl"` // ttl meta field - Routing string `json:"_routing"` // routing meta field - Parent string `json:"_parent"` // parent meta field - Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService - Source *json.RawMessage `json:"_source,omitempty"` - Found bool `json:"found,omitempty"` - Fields map[string]interface{} `json:"fields,omitempty"` + Index string `json:"_index"` // index meta field + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // id meta field + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Source *json.RawMessage `json:"_source,omitempty"` + Found bool `json:"found,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` //Error string `json:"error,omitempty"` // used only in MultiGet // TODO double-check that MultiGet now returns details error information Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet diff --git a/vendor/gopkg.in/olivere/elastic.v3/get_template.go b/vendor/gopkg.in/olivere/elastic.v3/get_template.go index 89ce45082..31c44b6d7 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/get_template.go +++ b/vendor/gopkg.in/olivere/elastic.v3/get_template.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,7 +10,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // GetTemplateService reads a search template. @@ -83,12 +83,7 @@ func (s *GetTemplateService) Validate() error { } // Do executes the operation and returns the template. -func (s *GetTemplateService) Do() (*GetTemplateResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation and returns the template. -func (s *GetTemplateService) DoC(ctx context.Context) (*GetTemplateResponse, error) { +func (s *GetTemplateService) Do(ctx context.Context) (*GetTemplateResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -101,7 +96,7 @@ func (s *GetTemplateService) DoC(ctx context.Context) (*GetTemplateResponse, err } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/get_template_test.go b/vendor/gopkg.in/olivere/elastic.v3/get_template_test.go index 00aea6899..eff4a7fd5 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/get_template_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/get_template_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,6 +6,8 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestGetPutDeleteTemplate(t *testing.T) { @@ -23,22 +25,22 @@ func TestGetPutDeleteTemplate(t *testing.T) { "my_size" : 5 } }` - putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do() + putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do(context.TODO()) if err != nil { t.Fatalf("expected no error; got: %v", err) } if putres == nil { t.Fatalf("expected response; got: %v", putres) } - if !putres.Created { - t.Fatalf("expected template to be created; got: %v", putres.Created) + if !putres.Acknowledged { + t.Fatalf("expected template creation to be acknowledged; got: %v", putres.Acknowledged) } // Always delete template - defer client.DeleteTemplate().Id("elastic-template").Do() + defer client.DeleteTemplate().Id("elastic-template").Do(context.TODO()) // Get template - getres, err := client.GetTemplate().Id("elastic-template").Do() + getres, err := client.GetTemplate().Id("elastic-template").Do(context.TODO()) if err != nil { t.Fatalf("expected no error; got: %v", err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/get_test.go b/vendor/gopkg.in/olivere/elastic.v3/get_test.go index 25dbe7391..77eac20f4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/get_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/get_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,19 +7,21 @@ package elastic import ( "encoding/json" "testing" + + "golang.org/x/net/context" ) func TestGet(t *testing.T) { client := setupTestClientAndCreateIndex(t) tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } // Get document 1 - res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do() + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -31,7 +33,7 @@ func TestGet(t *testing.T) { } // Get non existent document 99 - res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do() + res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do(context.TODO()) if err == nil { t.Fatalf("expected error; got: %v", err) } @@ -47,13 +49,13 @@ func TestGetWithSourceFiltering(t *testing.T) { client := setupTestClientAndCreateIndex(t) tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } // Get document 1, without source - res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do() + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -66,7 +68,7 @@ func TestGetWithSourceFiltering(t *testing.T) { // Get document 1, exclude Message field fsc := NewFetchSourceContext(true).Exclude("message") - res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do() + res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -90,16 +92,16 @@ func TestGetWithSourceFiltering(t *testing.T) { } func TestGetWithFields(t *testing.T) { - client := setupTestClientAndCreateIndex(t) + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } // Get document 1, specifying fields - res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Fields("message").Do() + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").StoredFields("message").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -144,22 +146,22 @@ func TestGetValidate(t *testing.T) { // Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name client := setupTestClientAndCreateIndex(t) - if _, err := client.Get().Do(); err == nil { + if _, err := client.Get().Do(context.TODO()); err == nil { t.Fatal("expected Get to fail") } - if _, err := client.Get().Index(testIndexName).Do(); err == nil { + if _, err := client.Get().Index(testIndexName).Do(context.TODO()); err == nil { t.Fatal("expected Get to fail") } - if _, err := client.Get().Type("tweet").Do(); err == nil { + if _, err := client.Get().Type("tweet").Do(context.TODO()); err == nil { t.Fatal("expected Get to fail") } - if _, err := client.Get().Id("1").Do(); err == nil { + if _, err := client.Get().Id("1").Do(context.TODO()); err == nil { t.Fatal("expected Get to fail") } - if _, err := client.Get().Index(testIndexName).Type("tweet").Do(); err == nil { + if _, err := client.Get().Index(testIndexName).Type("tweet").Do(context.TODO()); err == nil { t.Fatal("expected Get to fail") } - if _, err := client.Get().Type("tweet").Id("1").Do(); err == nil { + if _, err := client.Get().Type("tweet").Id("1").Do(context.TODO()); err == nil { t.Fatal("expected Get to fail") } } diff --git a/vendor/gopkg.in/olivere/elastic.v3/highlight.go b/vendor/gopkg.in/olivere/elastic.v3/highlight.go index 706368e6c..e26dbad6b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/highlight.go +++ b/vendor/gopkg.in/olivere/elastic.v3/highlight.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/highlight_test.go b/vendor/gopkg.in/olivere/elastic.v3/highlight_test.go index be5cd963e..ce1b2b189 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/highlight_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/highlight_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,8 +6,9 @@ package elastic import ( "encoding/json" - _ "net/http" "testing" + + "golang.org/x/net/context" ) func TestHighlighterField(t *testing.T) { @@ -125,22 +126,22 @@ func TestHighlightWithTermQuery(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -156,7 +157,7 @@ func TestHighlightWithTermQuery(t *testing.T) { Index(testIndexName). Highlight(hl). Query(query). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/index.go b/vendor/gopkg.in/olivere/elastic.v3/index.go index 6cfc6ce53..8a0a6f54d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/index.go +++ b/vendor/gopkg.in/olivere/elastic.v3/index.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,33 +10,33 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndexService adds or updates a typed JSON document in a specified index, // making it searchable. // -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-index_.html // for details. type IndexService struct { - client *Client - pretty bool - id string - index string - typ string - parent string - replication string - routing string - timeout string - timestamp string - ttl string - version interface{} - opType string - versionType string - refresh *bool - consistency string - bodyJson interface{} - bodyString string + client *Client + pretty bool + id string + index string + typ string + parent string + routing string + timeout string + timestamp string + ttl string + version interface{} + opType string + versionType string + refresh string + waitForActiveShards string + pipeline string + bodyJson interface{} + bodyString string } // NewIndexService creates a new IndexService. @@ -64,15 +64,25 @@ func (s *IndexService) Type(typ string) *IndexService { return s } -// Consistency is an explicit write consistency setting for the operation. -func (s *IndexService) Consistency(consistency string) *IndexService { - s.consistency = consistency +// WaitForActiveShards sets the number of shard copies that must be active +// before proceeding with the index operation. Defaults to 1, meaning the +// primary shard only. Set to `all` for all shard copies, otherwise set to +// any non-negative value less than or equal to the total number of copies +// for the shard (number of replicas + 1). +func (s *IndexService) WaitForActiveShards(waitForActiveShards string) *IndexService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pipeline specifies the pipeline id to preprocess incoming documents with. +func (s *IndexService) Pipeline(pipeline string) *IndexService { + s.pipeline = pipeline return s } // Refresh the index after performing the operation. -func (s *IndexService) Refresh(refresh bool) *IndexService { - s.refresh = &refresh +func (s *IndexService) Refresh(refresh string) *IndexService { + s.refresh = refresh return s } @@ -106,12 +116,6 @@ func (s *IndexService) Parent(parent string) *IndexService { return s } -// Replication is a specific replication type. -func (s *IndexService) Replication(replication string) *IndexService { - s.replication = replication - return s -} - // Routing is a specific routing value. func (s *IndexService) Routing(routing string) *IndexService { s.routing = routing @@ -185,11 +189,11 @@ func (s *IndexService) buildURL() (string, string, url.Values, error) { if s.pretty { params.Set("pretty", "1") } - if s.consistency != "" { - params.Set("consistency", s.consistency) + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + if s.refresh != "" { + params.Set("refresh", s.refresh) } if s.opType != "" { params.Set("op_type", s.opType) @@ -197,8 +201,8 @@ func (s *IndexService) buildURL() (string, string, url.Values, error) { if s.parent != "" { params.Set("parent", s.parent) } - if s.replication != "" { - params.Set("replication", s.replication) + if s.pipeline != "" { + params.Set("pipeline", s.pipeline) } if s.routing != "" { params.Set("routing", s.routing) @@ -240,12 +244,7 @@ func (s *IndexService) Validate() error { } // Do executes the operation. -func (s *IndexService) Do() (*IndexResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndexService) DoC(ctx context.Context) (*IndexResponse, error) { +func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -266,7 +265,7 @@ func (s *IndexService) DoC(ctx context.Context) (*IndexResponse, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, method, path, params, body) + res, err := s.client.PerformRequest(ctx, method, path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/index_test.go b/vendor/gopkg.in/olivere/elastic.v3/index_test.go index 01722b3e3..3faf281a2 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/index_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/index_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,6 +7,8 @@ package elastic import ( "encoding/json" "testing" + + "golang.org/x/net/context" ) func TestIndexLifecycle(t *testing.T) { @@ -20,7 +22,7 @@ func TestIndexLifecycle(t *testing.T) { Type("tweet"). Id("1"). BodyJson(&tweet1). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -29,7 +31,7 @@ func TestIndexLifecycle(t *testing.T) { } // Exists - exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -42,7 +44,7 @@ func TestIndexLifecycle(t *testing.T) { Index(testIndexName). Type("tweet"). Id("1"). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -73,7 +75,7 @@ func TestIndexLifecycle(t *testing.T) { } // Delete document again - deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do() + deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -82,7 +84,7 @@ func TestIndexLifecycle(t *testing.T) { } // Exists - exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -101,7 +103,7 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) { Index(testIndexName). Type("tweet"). BodyJson(&tweet1). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -114,7 +116,7 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) { id := indexResult.Id // Exists - exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do() + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -127,7 +129,7 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) { Index(testIndexName). Type("tweet"). Id(id). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -158,7 +160,7 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) { } // Delete document again - deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do() + deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -167,7 +169,7 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) { } // Exists - exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do() + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -182,7 +184,7 @@ func TestIndexValidate(t *testing.T) { tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} // No index name -> fail with error - res, err := NewIndexService(client).Type("tweet").Id("1").BodyJson(&tweet).Do() + res, err := NewIndexService(client).Type("tweet").Id("1").BodyJson(&tweet).Do(context.TODO()) if err == nil { t.Fatalf("expected Index to fail without index name") } @@ -191,7 +193,7 @@ func TestIndexValidate(t *testing.T) { } // No index name -> fail with error - res, err = NewIndexService(client).Index(testIndexName).Id("1").BodyJson(&tweet).Do() + res, err = NewIndexService(client).Index(testIndexName).Id("1").BodyJson(&tweet).Do(context.TODO()) if err == nil { t.Fatalf("expected Index to fail without type") } @@ -209,7 +211,7 @@ func TestIndexCreateExistsOpenCloseDelete(t *testing.T) { client := setupTestClient(t) // Create index - createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -221,7 +223,7 @@ func TestIndexCreateExistsOpenCloseDelete(t *testing.T) { } // Exists - indexExists, err := client.IndexExists(testIndexName).Do() + indexExists, err := client.IndexExists(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -230,13 +232,13 @@ func TestIndexCreateExistsOpenCloseDelete(t *testing.T) { } // Flush - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Close index - closeIndex, err := client.CloseIndex(testIndexName).Do() + closeIndex, err := client.CloseIndex(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -248,7 +250,7 @@ func TestIndexCreateExistsOpenCloseDelete(t *testing.T) { } // Open index - openIndex, err := client.OpenIndex(testIndexName).Do() + openIndex, err := client.OpenIndex(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -260,13 +262,13 @@ func TestIndexCreateExistsOpenCloseDelete(t *testing.T) { } // Flush - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Delete index - deleteIndex, err := client.DeleteIndex(testIndexName).Do() + deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_close.go b/vendor/gopkg.in/olivere/elastic.v3/indices_close.go index 4a0a0b927..2123cc1cf 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_close.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_close.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,7 +10,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesCloseService closes an index. @@ -122,12 +122,7 @@ func (s *IndicesCloseService) Validate() error { } // Do executes the operation. -func (s *IndicesCloseService) Do() (*IndicesCloseResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesCloseService) DoC(ctx context.Context) (*IndicesCloseResponse, error) { +func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -140,7 +135,7 @@ func (s *IndicesCloseService) DoC(ctx context.Context) (*IndicesCloseResponse, e } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, nil) + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_close_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_close_test.go index 7293bb1c4..c80a104f8 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_close_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_close_test.go @@ -1,10 +1,14 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) // TODO(oe): Find out why this test fails on Travis CI. /* @@ -12,7 +16,7 @@ func TestIndicesOpenAndClose(t *testing.T) { client := setupTestClient(t) // Create index - createIndex, err := client.CreateIndex(testIndexName).Do() + createIndex, err := client.CreateIndex(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -21,7 +25,7 @@ func TestIndicesOpenAndClose(t *testing.T) { } defer func() { // Delete index - deleteIndex, err := client.DeleteIndex(testIndexName).Do() + deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -32,7 +36,7 @@ func TestIndicesOpenAndClose(t *testing.T) { waitForYellow := func() { // Wait for status yellow - res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do() + res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -45,7 +49,7 @@ func TestIndicesOpenAndClose(t *testing.T) { waitForYellow() // Close index - cresp, err := client.CloseIndex(testIndexName).Do() + cresp, err := client.CloseIndex(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -57,7 +61,7 @@ func TestIndicesOpenAndClose(t *testing.T) { waitForYellow() // Open index again - oresp, err := client.OpenIndex(testIndexName).Do() + oresp, err := client.OpenIndex(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -71,7 +75,7 @@ func TestIndicesCloseValidate(t *testing.T) { client := setupTestClient(t) // No index name -> fail with error - res, err := NewIndicesCloseService(client).Do() + res, err := NewIndicesCloseService(client).Do(context.TODO()) if err == nil { t.Fatalf("expected IndicesClose to fail without index name") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_create.go b/vendor/gopkg.in/olivere/elastic.v3/indices_create.go index 898a0b0ae..17f1dfc3e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_create.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_create.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,7 +10,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesCreateService creates a new index. @@ -77,12 +77,7 @@ func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService { } // Do executes the operation. -func (b *IndicesCreateService) Do() (*IndicesCreateResult, error) { - return b.DoC(nil) -} - -// DoC executes the operation. -func (b *IndicesCreateService) DoC(ctx context.Context) (*IndicesCreateResult, error) { +func (b *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, error) { if b.index == "" { return nil, errors.New("missing index name") } @@ -115,7 +110,7 @@ func (b *IndicesCreateService) DoC(ctx context.Context) (*IndicesCreateResult, e } // Get response - res, err := b.client.PerformRequestC(ctx, "PUT", path, params, body) + res, err := b.client.PerformRequest(ctx, "PUT", path, params, body) if err != nil { return nil, err } @@ -131,5 +126,6 @@ func (b *IndicesCreateService) DoC(ctx context.Context) (*IndicesCreateResult, e // IndicesCreateResult is the outcome of creating a new index. type IndicesCreateResult struct { - Acknowledged bool `json:"acknowledged"` + Acknowledged bool `json:"acknowledged"` + ShardsAcknowledged bool `json:"shards_acknowledged"` } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_create_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_create_test.go index b3723950a..96a3bce55 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_create_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_create_test.go @@ -1,16 +1,20 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestIndicesLifecycle(t *testing.T) { client := setupTestClient(t) // Create index - createIndex, err := client.CreateIndex(testIndexName).Do() + createIndex, err := client.CreateIndex(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -19,7 +23,7 @@ func TestIndicesLifecycle(t *testing.T) { } // Check if index exists - indexExists, err := client.IndexExists(testIndexName).Do() + indexExists, err := client.IndexExists(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -28,7 +32,7 @@ func TestIndicesLifecycle(t *testing.T) { } // Delete index - deleteIndex, err := client.DeleteIndex(testIndexName).Do() + deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -37,7 +41,7 @@ func TestIndicesLifecycle(t *testing.T) { } // Check if index exists - indexExists, err = client.IndexExists(testIndexName).Do() + indexExists, err = client.IndexExists(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -50,7 +54,7 @@ func TestIndicesCreateValidate(t *testing.T) { client := setupTestClient(t) // No index name -> fail with error - res, err := NewIndicesCreateService(client).Body(testMapping).Do() + res, err := NewIndicesCreateService(client).Body(testMapping).Do(context.TODO()) if err == nil { t.Fatalf("expected IndicesCreate to fail without index name") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_delete.go b/vendor/gopkg.in/olivere/elastic.v3/indices_delete.go index 8943199b9..8127f50d3 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_delete.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_delete.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesDeleteService allows to delete existing indices. @@ -96,12 +96,7 @@ func (s *IndicesDeleteService) Validate() error { } // Do executes the operation. -func (s *IndicesDeleteService) Do() (*IndicesDeleteResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesDeleteService) DoC(ctx context.Context) (*IndicesDeleteResponse, error) { +func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -114,7 +109,7 @@ func (s *IndicesDeleteService) DoC(ctx context.Context) (*IndicesDeleteResponse, } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "DELETE", path, params, nil) + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_delete_template.go b/vendor/gopkg.in/olivere/elastic.v3/indices_delete_template.go index dd2201be0..5e53b4145 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_delete_template.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_delete_template.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,7 +10,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesDeleteTemplateService deletes index templates. @@ -91,12 +91,7 @@ func (s *IndicesDeleteTemplateService) Validate() error { } // Do executes the operation. -func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesDeleteTemplateService) DoC(ctx context.Context) (*IndicesDeleteTemplateResponse, error) { +func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTemplateResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -109,7 +104,7 @@ func (s *IndicesDeleteTemplateService) DoC(ctx context.Context) (*IndicesDeleteT } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "DELETE", path, params, nil) + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_delete_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_delete_test.go index d84edb8de..2785e9051 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_delete_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_delete_test.go @@ -1,16 +1,20 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestIndicesDeleteValidate(t *testing.T) { client := setupTestClient(t) // No index name -> fail with error - res, err := NewIndicesDeleteService(client).Do() + res, err := NewIndicesDeleteService(client).Do(context.TODO()) if err == nil { t.Fatalf("expected IndicesDelete to fail without index name") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go b/vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go deleted file mode 100644 index 9e84f2de8..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "net/url" - "strings" - - "golang.org/x/net/context" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesDeleteWarmerService allows to delete a warmer. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html. -type IndicesDeleteWarmerService struct { - client *Client - pretty bool - index []string - name []string - masterTimeout string -} - -// NewIndicesDeleteWarmerService creates a new IndicesDeleteWarmerService. -func NewIndicesDeleteWarmerService(client *Client) *IndicesDeleteWarmerService { - return &IndicesDeleteWarmerService{ - client: client, - index: make([]string, 0), - name: make([]string, 0), - } -} - -// Index is a list of index names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all indices. -func (s *IndicesDeleteWarmerService) Index(indices ...string) *IndicesDeleteWarmerService { - s.index = append(s.index, indices...) - return s -} - -// Name is a list of warmer names to delete (supports wildcards); -// use `_all` to delete all warmers in the specified indices. -func (s *IndicesDeleteWarmerService) Name(name ...string) *IndicesDeleteWarmerService { - s.name = append(s.name, name...) - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesDeleteWarmerService) MasterTimeout(masterTimeout string) *IndicesDeleteWarmerService { - s.masterTimeout = masterTimeout - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesDeleteWarmerService) Pretty(pretty bool) *IndicesDeleteWarmerService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesDeleteWarmerService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ - "index": strings.Join(s.index, ","), - "name": strings.Join(s.name, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if len(s.name) > 0 { - params.Set("name", strings.Join(s.name, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesDeleteWarmerService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(s.name) == 0 { - invalid = append(invalid, "Name") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesDeleteWarmerService) Do() (*DeleteWarmerResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesDeleteWarmerService) DoC(ctx context.Context) (*DeleteWarmerResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "DELETE", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(DeleteWarmerResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// DeleteWarmerResponse is the response of IndicesDeleteWarmerService.Do. -type DeleteWarmerResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer_test.go deleted file mode 100644 index 3d811ea59..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_delete_warmer_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestDeleteWarmerBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Names []string - Expected string - }{ - { - []string{"test"}, - []string{"warmer_1"}, - "/test/_warmer/warmer_1", - }, - { - []string{"*"}, - []string{"warmer_1"}, - "/%2A/_warmer/warmer_1", - }, - { - []string{"_all"}, - []string{"warmer_1"}, - "/_all/_warmer/warmer_1", - }, - { - []string{"index-1", "index-2"}, - []string{"warmer_1", "warmer_2"}, - "/index-1%2Cindex-2/_warmer/warmer_1%2Cwarmer_2", - }, - } - - for _, test := range tests { - path, _, err := client.DeleteWarmer().Index(test.Indices...).Name(test.Names...).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_exists.go b/vendor/gopkg.in/olivere/elastic.v3/indices_exists.go index 0b043ecf9..6fbc4959c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_exists.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_exists.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -12,7 +12,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesExistsService checks if an index or indices exist or not. @@ -121,12 +121,7 @@ func (s *IndicesExistsService) Validate() error { } // Do executes the operation. -func (s *IndicesExistsService) Do() (bool, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesExistsService) DoC(ctx context.Context) (bool, error) { +func (s *IndicesExistsService) Do(ctx context.Context) (bool, error) { // Check pre-conditions if err := s.Validate(); err != nil { return false, err @@ -139,7 +134,7 @@ func (s *IndicesExistsService) DoC(ctx context.Context) (bool, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "HEAD", path, params, nil, 404) + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) if err != nil { return false, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template.go b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template.go index 5db79278f..b01d80157 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesExistsTemplateService checks if a given template exists. @@ -84,12 +84,7 @@ func (s *IndicesExistsTemplateService) Validate() error { } // Do executes the operation. -func (s *IndicesExistsTemplateService) Do() (bool, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesExistsTemplateService) DoC(ctx context.Context) (bool, error) { +func (s *IndicesExistsTemplateService) Do(ctx context.Context) (bool, error) { // Check pre-conditions if err := s.Validate(); err != nil { return false, err @@ -102,7 +97,7 @@ func (s *IndicesExistsTemplateService) DoC(ctx context.Context) (bool, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "HEAD", path, params, nil, 404) + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) if err != nil { return false, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template_test.go index 32fb82ad3..24ee9a2c2 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_template_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,6 +6,8 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestIndexExistsTemplate(t *testing.T) { @@ -21,20 +23,19 @@ func TestIndexExistsTemplate(t *testing.T) { "tweet":{ "properties":{ "tags":{ - "type":"string" + "type":"keyword" }, "location":{ "type":"geo_point" }, "suggest_field":{ - "type":"completion", - "payloads":true + "type":"completion" } } } } }` - putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do() + putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do(context.TODO()) if err != nil { t.Fatalf("expected no error; got: %v", err) } @@ -46,10 +47,10 @@ func TestIndexExistsTemplate(t *testing.T) { } // Always delete template - defer client.IndexDeleteTemplate("elastic-template").Do() + defer client.IndexDeleteTemplate("elastic-template").Do(context.TODO()) // Check if template exists - exists, err := client.IndexTemplateExists("elastic-template").Do() + exists, err := client.IndexTemplateExists("elastic-template").Do(context.TODO()) if err != nil { t.Fatalf("expected no error; got: %v", err) } @@ -58,7 +59,7 @@ func TestIndexExistsTemplate(t *testing.T) { } // Get template - getres, err := client.IndexGetTemplate("elastic-template").Do() + getres, err := client.IndexGetTemplate("elastic-template").Do(context.TODO()) if err != nil { t.Fatalf("expected no error; got: %v", err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_test.go index 8cb6f5fab..d5c0e9511 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_test.go @@ -1,16 +1,20 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestIndicesExistsWithoutIndex(t *testing.T) { client := setupTestClient(t) // No index name -> fail with error - res, err := NewIndicesExistsService(client).Do() + res, err := NewIndicesExistsService(client).Do(context.TODO()) if err == nil { t.Fatalf("expected IndicesExists to fail without index name") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type.go b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type.go index 6049c8c89..6766187d3 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -12,7 +12,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesExistsTypeService checks if one or more types exist in one or more indices. @@ -34,8 +34,6 @@ type IndicesExistsTypeService struct { func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService { return &IndicesExistsTypeService{ client: client, - index: make([]string, 0), - typ: make([]string, 0), } } @@ -89,7 +87,7 @@ func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService // buildURL builds the URL for the operation. func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) { // Build URL - path, err := uritemplates.Expand("/{index}/{type}", map[string]string{ + path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ "index": strings.Join(s.index, ","), "type": strings.Join(s.typ, ","), }) @@ -133,12 +131,7 @@ func (s *IndicesExistsTypeService) Validate() error { } // Do executes the operation. -func (s *IndicesExistsTypeService) Do() (bool, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesExistsTypeService) DoC(ctx context.Context) (bool, error) { +func (s *IndicesExistsTypeService) Do(ctx context.Context) (bool, error) { // Check pre-conditions if err := s.Validate(); err != nil { return false, err @@ -151,7 +144,7 @@ func (s *IndicesExistsTypeService) DoC(ctx context.Context) (bool, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "HEAD", path, params, nil, 404) + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) if err != nil { return false, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type_test.go index 51721b125..c66d30d98 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_exists_type_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,6 +6,8 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestIndicesExistsTypeBuildURL(t *testing.T) { @@ -38,19 +40,19 @@ func TestIndicesExistsTypeBuildURL(t *testing.T) { { []string{"index1"}, []string{"type1"}, - "/index1/type1", + "/index1/_mapping/type1", false, }, { []string{"index1", "index2"}, []string{"type1"}, - "/index1%2Cindex2/type1", + "/index1%2Cindex2/_mapping/type1", false, }, { []string{"index1", "index2"}, []string{"type1", "type2"}, - "/index1%2Cindex2/type1%2Ctype2", + "/index1%2Cindex2/_mapping/type1%2Ctype2", false, }, } @@ -58,20 +60,20 @@ func TestIndicesExistsTypeBuildURL(t *testing.T) { for i, test := range tests { err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate() if err == nil && test.ExpectValidateFailure { - t.Errorf("case #%d: expected validate to fail", i+1) + t.Errorf("#%d: expected validate to fail", i+1) continue } if err != nil && !test.ExpectValidateFailure { - t.Errorf("case #%d: expected validate to succeed", i+1) + t.Errorf("#%d: expected validate to succeed", i+1) continue } if !test.ExpectValidateFailure { path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL() if err != nil { - t.Fatalf("case #%d: %v", i+1, err) + t.Fatalf("#%d: %v", i+1, err) } if path != test.Expected { - t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + t.Errorf("#%d: expected %q; got: %q", i+1, test.Expected, path) } } } @@ -81,7 +83,7 @@ func TestIndicesExistsType(t *testing.T) { client := setupTestClient(t) // Create index with tweet type - createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -93,7 +95,7 @@ func TestIndicesExistsType(t *testing.T) { } // Check if type exists - exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do() + exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -102,7 +104,7 @@ func TestIndicesExistsType(t *testing.T) { } // Delete index - deleteIndex, err := client.DeleteIndex(testIndexName).Do() + deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -111,7 +113,7 @@ func TestIndicesExistsType(t *testing.T) { } // Check if type exists - exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do() + exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -124,7 +126,7 @@ func TestIndicesExistsTypeValidate(t *testing.T) { client := setupTestClient(t) // No index name -> fail with error - res, err := NewIndicesExistsTypeService(client).Do() + res, err := NewIndicesExistsTypeService(client).Do(context.TODO()) if err == nil { t.Fatalf("expected IndicesExistsType to fail without index name") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_flush.go b/vendor/gopkg.in/olivere/elastic.v3/indices_flush.go index 26dbadeb6..c780db10b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_flush.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_flush.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // Flush allows to flush one or more indices. The flush process of an index @@ -137,12 +137,7 @@ func (s *IndicesFlushService) Validate() error { } // Do executes the service. -func (s *IndicesFlushService) Do() (*IndicesFlushResponse, error) { - return s.DoC(nil) -} - -// DoC executes the service. -func (s *IndicesFlushService) DoC(ctx context.Context) (*IndicesFlushResponse, error) { +func (s *IndicesFlushService) Do(ctx context.Context) (*IndicesFlushResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -155,7 +150,7 @@ func (s *IndicesFlushService) DoC(ctx context.Context) (*IndicesFlushResponse, e } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, nil) + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_flush_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_flush_test.go index 4e30a000b..77a744ef7 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_flush_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_flush_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,13 +6,15 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestFlush(t *testing.T) { client := setupTestClientAndCreateIndex(t) // Flush all indices - res, err := client.Flush().Do() + res, err := client.Flush().Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge.go b/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge.go index c086899fb..7b550f554 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesForcemergeService allows to force merging of one or more indices. @@ -19,7 +19,7 @@ import ( // within each shard. The force merge operation allows to reduce the number // of segments by merging them. // -// See http://www.elastic.co/guide/en/elasticsearch/reference/2.1/indices-forcemerge.html +// See http://www.elastic.co/guide/en/elasticsearch/reference/2.4/indices-forcemerge.html // for more information. type IndicesForcemergeService struct { client *Client @@ -32,7 +32,6 @@ type IndicesForcemergeService struct { maxNumSegments interface{} onlyExpungeDeletes *bool operationThreading interface{} - waitForMerge *bool } // NewIndicesForcemergeService creates a new IndicesForcemergeService. @@ -101,13 +100,6 @@ func (s *IndicesForcemergeService) OperationThreading(operationThreading interfa return s } -// WaitForMerge specifies whether the request should block until the -// merge process is finished (default: true). -func (s *IndicesForcemergeService) WaitForMerge(waitForMerge bool) *IndicesForcemergeService { - s.waitForMerge = &waitForMerge - return s -} - // Pretty indicates that the JSON response be indented and human readable. func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService { s.pretty = pretty @@ -157,9 +149,6 @@ func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) { if s.operationThreading != nil { params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading)) } - if s.waitForMerge != nil { - params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge)) - } return path, params, nil } @@ -169,12 +158,7 @@ func (s *IndicesForcemergeService) Validate() error { } // Do executes the operation. -func (s *IndicesForcemergeService) Do() (*IndicesForcemergeResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesForcemergeService) DoC(ctx context.Context) (*IndicesForcemergeResponse, error) { +func (s *IndicesForcemergeService) Do(ctx context.Context) (*IndicesForcemergeResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -187,7 +171,7 @@ func (s *IndicesForcemergeService) DoC(ctx context.Context) (*IndicesForcemergeR } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, nil) + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge_test.go index c620654cc..f6b1fb753 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_forcemerge_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,6 +6,8 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestIndicesForcemergeBuildURL(t *testing.T) { @@ -44,7 +46,7 @@ func TestIndicesForcemergeBuildURL(t *testing.T) { func TestIndicesForcemerge(t *testing.T) { client := setupTestClientAndCreateIndexAndAddDocs(t) - _, err := client.Forcemerge(testIndexName).MaxNumSegments(1).WaitForMerge(true).Do() + _, err := client.Forcemerge(testIndexName).MaxNumSegments(1).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get.go index b83b5c35d..589063c98 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesGetService retrieves information about one or more indices. @@ -168,12 +168,7 @@ func (s *IndicesGetService) Validate() error { } // Do executes the operation. -func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesGetService) DoC(ctx context.Context) (map[string]*IndicesGetResponse, error) { +func (s *IndicesGetService) Do(ctx context.Context) (map[string]*IndicesGetResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -186,7 +181,7 @@ func (s *IndicesGetService) DoC(ctx context.Context) (map[string]*IndicesGetResp } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases.go index 1c2d753a1..24a0da928 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases.go @@ -11,30 +11,34 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) +// AliasesService returns the aliases associated with one or more indices. +// See http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html. type AliasesService struct { - client *Client - indices []string - pretty bool + client *Client + index []string + pretty bool } +// NewAliasesService instantiates a new AliasesService. func NewAliasesService(client *Client) *AliasesService { builder := &AliasesService{ - client: client, - indices: make([]string, 0), + client: client, } return builder } +// Pretty asks Elasticsearch to indent the returned JSON. func (s *AliasesService) Pretty(pretty bool) *AliasesService { s.pretty = pretty return s } -func (s *AliasesService) Index(indices ...string) *AliasesService { - s.indices = append(s.indices, indices...) +// Index adds one or more indices. +func (s *AliasesService) Index(index ...string) *AliasesService { + s.index = append(s.index, index...) return s } @@ -43,9 +47,9 @@ func (s *AliasesService) buildURL() (string, url.Values, error) { var err error var path string - if len(s.indices) > 0 { + if len(s.index) > 0 { path, err = uritemplates.Expand("/{index}/_aliases", map[string]string{ - "index": strings.Join(s.indices, ","), + "index": strings.Join(s.index, ","), }) } else { path = "/_aliases" @@ -62,18 +66,14 @@ func (s *AliasesService) buildURL() (string, url.Values, error) { return path, params, nil } -func (s *AliasesService) Do() (*AliasesResult, error) { - return s.DoC(nil) -} - -func (s *AliasesService) DoC(ctx context.Context) (*AliasesResult, error) { +func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error) { path, params, err := s.buildURL() if err != nil { return nil, err } // Get response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } @@ -109,7 +109,7 @@ func (s *AliasesService) DoC(ctx context.Context) (*AliasesResult, error) { if ok { aliasesData, ok := indexDataMap["aliases"].(map[string]interface{}) if ok { - for aliasName := range aliasesData { + for aliasName, _ := range aliasesData { aliasRes := aliasResult{AliasName: aliasName} indexOut.Aliases = append(indexOut.Aliases, aliasRes) } @@ -138,7 +138,6 @@ type aliasResult struct { func (ar AliasesResult) IndicesByAlias(aliasName string) []string { var indices []string - for indexName, indexInfo := range ar.Indices { for _, aliasInfo := range indexInfo.Aliases { if aliasInfo.AliasName == aliasName { @@ -146,7 +145,6 @@ func (ar AliasesResult) IndicesByAlias(aliasName string) []string { } } } - return indices } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases_test.go index 891f5e1d8..1003ac79d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_aliases_test.go @@ -6,6 +6,8 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestAliasesBuildURL(t *testing.T) { @@ -52,26 +54,26 @@ func TestAliases(t *testing.T) { tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."} // Add tweets to first index - _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } // Add tweets to second index - _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } // Flush - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName2).Do() + _, err = client.Flush().Index(testIndexName2).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -80,7 +82,7 @@ func TestAliases(t *testing.T) { aliasesResult1, err := client.Aliases(). Index(testIndexName, testIndexName2). //Pretty(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -98,7 +100,7 @@ func TestAliases(t *testing.T) { Add(testIndexName, testAliasName). Add(testIndexName2, testAliasName). //Pretty(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -110,7 +112,7 @@ func TestAliases(t *testing.T) { aliasesResult2, err := client.Aliases(). Index(testIndexName, testIndexName2). //Pretty(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -147,7 +149,7 @@ func TestAliases(t *testing.T) { aliasRemove1, err := client.Alias(). Remove(testIndexName, testAliasName). //Pretty(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -156,7 +158,7 @@ func TestAliases(t *testing.T) { } // Alias should now exist only for index 2 - aliasesResult3, err := client.Aliases().Index(testIndexName, testIndexName2).Do() + aliasesResult3, err := client.Aliases().Index(testIndexName, testIndexName2).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping.go index 1e7f461bb..eaf8864fd 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesGetMappingService retrieves the mapping definitions for an index or @@ -144,13 +144,7 @@ func (s *IndicesGetMappingService) Validate() error { // Do executes the operation. It returns mapping definitions for an index // or index/type. -func (s *IndicesGetMappingService) Do() (map[string]interface{}, error) { - return s.DoC(nil) -} - -// DoC executes the operation. It returns mapping definitions for an index -// or index/type. -func (s *IndicesGetMappingService) DoC(ctx context.Context) (map[string]interface{}, error) { +func (s *IndicesGetMappingService) Do(ctx context.Context) (map[string]interface{}, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -163,7 +157,7 @@ func (s *IndicesGetMappingService) DoC(ctx context.Context) (map[string]interfac } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping_test.go index ccfa27fed..5ec54e7fb 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_mapping_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings.go index 542a9c94d..9c18dbc93 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesGetSettingsService allows to retrieve settings of one @@ -152,12 +152,7 @@ func (s *IndicesGetSettingsService) Validate() error { } // Do executes the operation. -func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesGetSettingsService) DoC(ctx context.Context) (map[string]*IndicesGetSettingsResponse, error) { +func (s *IndicesGetSettingsService) Do(ctx context.Context) (map[string]*IndicesGetSettingsResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -170,7 +165,7 @@ func (s *IndicesGetSettingsService) DoC(ctx context.Context) (map[string]*Indice } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings_test.go index f53512d53..cc6cfe053 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_settings_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,6 +6,8 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestIndexGetSettingsURL(t *testing.T) { @@ -61,7 +63,7 @@ func TestIndexGetSettingsService(t *testing.T) { return } - res, err := client.IndexGetSettings().Index(testIndexName).Do() + res, err := client.IndexGetSettings().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_template.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_template.go index 629a2ec30..1339e21c7 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_template.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_template.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesGetTemplateService returns an index template. @@ -93,12 +93,7 @@ func (s *IndicesGetTemplateService) Validate() error { } // Do executes the operation. -func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesGetTemplateService) DoC(ctx context.Context) (map[string]*IndicesGetTemplateResponse, error) { +func (s *IndicesGetTemplateService) Do(ctx context.Context) (map[string]*IndicesGetTemplateResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -111,7 +106,7 @@ func (s *IndicesGetTemplateService) DoC(ctx context.Context) (map[string]*Indice } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_template_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_template_test.go index 693cde5ea..c884ec1cb 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_template_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_template_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_test.go index fcdee54db..a0c1c627e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_get_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,13 +6,15 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestIndicesGetValidate(t *testing.T) { client := setupTestClient(t) // No index name -> fail with error - res, err := NewIndicesGetService(client).Index("").Do() + res, err := NewIndicesGetService(client).Index("").Do(context.TODO()) if err == nil { t.Fatalf("expected IndicesGet to fail without index name") } @@ -74,7 +76,7 @@ func TestIndicesGetService(t *testing.T) { return } - res, err := client.IndexGet().Index(testIndexName).Do() + res, err := client.IndexGet().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer.go deleted file mode 100644 index c9f0ffe4c..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "net/url" - "strings" - - "golang.org/x/net/context" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesGetWarmerService allows to get the definition of a warmer for a -// specific index (or alias, or several indices) based on its name. -// The provided name can be a simple wildcard expression or omitted to get -// all warmers. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html -// for more information. -type IndicesGetWarmerService struct { - client *Client - pretty bool - index []string - name []string - typ []string - allowNoIndices *bool - expandWildcards string - ignoreUnavailable *bool - local *bool -} - -// NewIndicesGetWarmerService creates a new IndicesGetWarmerService. -func NewIndicesGetWarmerService(client *Client) *IndicesGetWarmerService { - return &IndicesGetWarmerService{ - client: client, - typ: make([]string, 0), - index: make([]string, 0), - name: make([]string, 0), - } -} - -// Index is a list of index names to restrict the operation; use `_all` to perform the operation on all indices. -func (s *IndicesGetWarmerService) Index(indices ...string) *IndicesGetWarmerService { - s.index = append(s.index, indices...) - return s -} - -// Name is the name of the warmer (supports wildcards); leave empty to get all warmers. -func (s *IndicesGetWarmerService) Name(name ...string) *IndicesGetWarmerService { - s.name = append(s.name, name...) - return s -} - -// Type is a list of type names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all types. -func (s *IndicesGetWarmerService) Type(typ ...string) *IndicesGetWarmerService { - s.typ = append(s.typ, typ...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// This includes `_all` string or when no indices have been specified. -func (s *IndicesGetWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesGetWarmerService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesGetWarmerService) ExpandWildcards(expandWildcards string) *IndicesGetWarmerService { - s.expandWildcards = expandWildcards - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesGetWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetWarmerService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Local indicates wether or not to return local information, -// do not retrieve the state from master node (default: false). -func (s *IndicesGetWarmerService) Local(local bool) *IndicesGetWarmerService { - s.local = &local - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesGetWarmerService) Pretty(pretty bool) *IndicesGetWarmerService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetWarmerService) buildURL() (string, url.Values, error) { - var err error - var path string - - if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) == 0 { - path = "/_warmer" - } else if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) > 0 { - path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{ - "name": strings.Join(s.name, ","), - }) - } else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) == 0 { - path, err = uritemplates.Expand("/_all/{type}/_warmer", map[string]string{ - "type": strings.Join(s.typ, ","), - }) - } else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) > 0 { - path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{ - "type": strings.Join(s.typ, ","), - "name": strings.Join(s.name, ","), - }) - } else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) == 0 { - path, err = uritemplates.Expand("/{index}/_warmer", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) > 0 { - path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ - "index": strings.Join(s.index, ","), - "name": strings.Join(s.name, ","), - }) - } else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) == 0 { - path, err = uritemplates.Expand("/{index}/{type}/_warmer", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - }) - } else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) > 0 { - path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - "name": strings.Join(s.name, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetWarmerService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesGetWarmerService) Do() (map[string]interface{}, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesGetWarmerService) DoC(ctx context.Context) (map[string]interface{}, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]interface{} - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer_test.go deleted file mode 100644 index ea01a628e..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_get_warmer_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestGetWarmerBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Types []string - Names []string - Expected string - }{ - { - []string{}, - []string{}, - []string{}, - "/_warmer", - }, - { - []string{}, - []string{}, - []string{"warmer_1"}, - "/_warmer/warmer_1", - }, - { - []string{}, - []string{"tweet"}, - []string{}, - "/_all/tweet/_warmer", - }, - { - []string{}, - []string{"tweet"}, - []string{"warmer_1"}, - "/_all/tweet/_warmer/warmer_1", - }, - { - []string{"test"}, - []string{}, - []string{}, - "/test/_warmer", - }, - { - []string{"test"}, - []string{}, - []string{"warmer_1"}, - "/test/_warmer/warmer_1", - }, - { - []string{"*"}, - []string{}, - []string{"warmer_1"}, - "/%2A/_warmer/warmer_1", - }, - { - []string{"test"}, - []string{"tweet"}, - []string{"warmer_1"}, - "/test/tweet/_warmer/warmer_1", - }, - { - []string{"index-1", "index-2"}, - []string{"type-1", "type-2"}, - []string{"warmer_1", "warmer_2"}, - "/index-1%2Cindex-2/type-1%2Ctype-2/_warmer/warmer_1%2Cwarmer_2", - }, - } - - for _, test := range tests { - path, _, err := client.GetWarmer().Index(test.Indices...).Type(test.Types...).Name(test.Names...).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_open.go b/vendor/gopkg.in/olivere/elastic.v3/indices_open.go index 004fa228c..1f1221101 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_open.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_open.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,7 +10,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesOpenService opens an index. @@ -126,12 +126,7 @@ func (s *IndicesOpenService) Validate() error { } // Do executes the operation. -func (s *IndicesOpenService) Do() (*IndicesOpenResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesOpenService) DoC(ctx context.Context) (*IndicesOpenResponse, error) { +func (s *IndicesOpenService) Do(ctx context.Context) (*IndicesOpenResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -144,7 +139,7 @@ func (s *IndicesOpenService) DoC(ctx context.Context) (*IndicesOpenResponse, err } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, nil) + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_open_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_open_test.go index 352bb479b..39b848502 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_open_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_open_test.go @@ -1,16 +1,20 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestIndicesOpenValidate(t *testing.T) { client := setupTestClient(t) // No index name -> fail with error - res, err := NewIndicesOpenService(client).Do() + res, err := NewIndicesOpenService(client).Do(context.TODO()) if err == nil { t.Fatalf("expected IndicesOpen to fail without index name") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias.go index 336193c47..347b8fa54 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias.go @@ -256,12 +256,7 @@ func (s *AliasService) buildURL() (string, url.Values, error) { } // Do executes the command. -func (s *AliasService) Do() (*AliasResult, error) { - return s.DoC(nil) -} - -// DoC executes the command. -func (s *AliasService) DoC(ctx context.Context) (*AliasResult, error) { +func (s *AliasService) Do(ctx context.Context) (*AliasResult, error) { path, params, err := s.buildURL() if err != nil { return nil, err @@ -280,7 +275,7 @@ func (s *AliasService) DoC(ctx context.Context) (*AliasResult, error) { body["actions"] = actions // Get response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, body) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias_test.go index 1e3d3974b..ce2d75ca9 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_alias_test.go @@ -7,6 +7,8 @@ package elastic import ( "encoding/json" "testing" + + "golang.org/x/net/context" ) const ( @@ -24,28 +26,28 @@ func TestAliasLifecycle(t *testing.T) { tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."} // Add tweets to first index - _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } // Add tweets to second index - _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } // Flush - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName2).Do() + _, err = client.Flush().Index(testIndexName2).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -55,7 +57,7 @@ func TestAliasLifecycle(t *testing.T) { Add(testIndexName, testAliasName). Action(NewAliasAddAction(testAliasName).Index(testIndexName2)). //Pretty(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -65,7 +67,7 @@ func TestAliasLifecycle(t *testing.T) { // Search should return all 3 tweets matchAll := NewMatchAllQuery() - searchResult1, err := client.Search().Index(testAliasName).Query(matchAll).Do() + searchResult1, err := client.Search().Index(testAliasName).Query(matchAll).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -80,7 +82,7 @@ func TestAliasLifecycle(t *testing.T) { aliasRemove1, err := client.Alias(). Remove(testIndexName, testAliasName). //Pretty(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -88,7 +90,7 @@ func TestAliasLifecycle(t *testing.T) { t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged) } - searchResult2, err := client.Search().Index(testAliasName).Query(matchAll).Do() + searchResult2, err := client.Search().Index(testAliasName).Query(matchAll).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping.go index 9b8d1448c..f2cb25879 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesPutMappingService allows to register specific mapping definition @@ -182,12 +182,7 @@ func (s *IndicesPutMappingService) Validate() error { } // Do executes the operation. -func (s *IndicesPutMappingService) Do() (*PutMappingResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesPutMappingService) DoC(ctx context.Context) (*PutMappingResponse, error) { +func (s *IndicesPutMappingService) Do(ctx context.Context) (*PutMappingResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -208,7 +203,7 @@ func (s *IndicesPutMappingService) DoC(ctx context.Context) (*PutMappingResponse } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "PUT", path, params, body) + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping_test.go index 356aa2728..f95e53c39 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_mapping_test.go @@ -1,10 +1,14 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestPutMappingURL(t *testing.T) { client := setupTestClientAndCreateIndex(t) @@ -48,14 +52,14 @@ func TestMappingLifecycle(t *testing.T) { mapping := `{ "tweetdoc":{ "properties":{ - "message":{ + "field":{ "type":"string" } } } }` - putresp, err := client.PutMapping().Index(testIndexName2).Type("tweetdoc").BodyString(mapping).Do() + putresp, err := client.PutMapping().Index(testIndexName2).Type("tweetdoc").BodyString(mapping).Do(context.TODO()) if err != nil { t.Fatalf("expected put mapping to succeed; got: %v", err) } @@ -66,7 +70,7 @@ func TestMappingLifecycle(t *testing.T) { t.Fatalf("expected put mapping ack; got: %v", putresp.Acknowledged) } - getresp, err := client.GetMapping().Index(testIndexName2).Type("tweetdoc").Do() + getresp, err := client.GetMapping().Index(testIndexName2).Type("tweetdoc").Do(context.TODO()) if err != nil { t.Fatalf("expected get mapping to succeed; got: %v", err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings.go index 8ceedeadc..ab7231e58 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesPutSettingsService changes specific index level settings in @@ -145,12 +145,7 @@ func (s *IndicesPutSettingsService) Validate() error { } // Do executes the operation. -func (s *IndicesPutSettingsService) Do() (*IndicesPutSettingsResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesPutSettingsService) DoC(ctx context.Context) (*IndicesPutSettingsResponse, error) { +func (s *IndicesPutSettingsService) Do(ctx context.Context) (*IndicesPutSettingsResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -171,7 +166,7 @@ func (s *IndicesPutSettingsService) DoC(ctx context.Context) (*IndicesPutSetting } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "PUT", path, params, body) + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings_test.go index 4bc86e18e..d0a961794 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_settings_test.go @@ -1,10 +1,14 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestIndicesPutSettingsBuildURL(t *testing.T) { client := setupTestClientAndCreateIndex(t) @@ -48,7 +52,7 @@ func TestIndicesSettingsLifecycle(t *testing.T) { }` // Put settings - putres, err := client.IndexPutSettings().Index(testIndexName).BodyString(body).Do() + putres, err := client.IndexPutSettings().Index(testIndexName).BodyString(body).Do(context.TODO()) if err != nil { t.Fatalf("expected put settings to succeed; got: %v", err) } @@ -60,7 +64,7 @@ func TestIndicesSettingsLifecycle(t *testing.T) { } // Read settings - getres, err := client.IndexGetSettings().Index(testIndexName).Do() + getres, err := client.IndexGetSettings().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatalf("expected get mapping to succeed; got: %v", err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_template.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_template.go index 3361bc494..3222539a2 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_put_template.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_put_template.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,7 +10,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesPutTemplateService creates or updates index mappings. @@ -140,12 +140,7 @@ func (s *IndicesPutTemplateService) Validate() error { } // Do executes the operation. -func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesPutTemplateService) DoC(ctx context.Context) (*IndicesPutTemplateResponse, error) { +func (s *IndicesPutTemplateService) Do(ctx context.Context) (*IndicesPutTemplateResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -166,7 +161,7 @@ func (s *IndicesPutTemplateService) DoC(ctx context.Context) (*IndicesPutTemplat } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "PUT", path, params, body) + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer_test.go deleted file mode 100644 index 25a1f3ecb..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestPutWarmerBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Types []string - Name string - Expected string - }{ - { - []string{}, - []string{}, - "warmer_1", - "/_warmer/warmer_1", - }, - { - []string{"*"}, - []string{}, - "warmer_1", - "/%2A/_warmer/warmer_1", - }, - { - []string{}, - []string{"*"}, - "warmer_1", - "/_all/%2A/_warmer/warmer_1", - }, - { - []string{"index-1", "index-2"}, - []string{"type-1", "type-2"}, - "warmer_1", - "/index-1%2Cindex-2/type-1%2Ctype-2/_warmer/warmer_1", - }, - } - - for _, test := range tests { - path, _, err := client.PutWarmer().Index(test.Indices...).Type(test.Types...).Name(test.Name).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} - -func TestWarmerLifecycle(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - mapping := `{ - "query": { - "match_all": {} - } - }` - - // Ensure well prepared test index - client.Flush(testIndexName2).Do() - - putresp, err := client.PutWarmer().Index(testIndexName2).Type("tweet").Name("warmer_1").BodyString(mapping).Do() - if err != nil { - t.Fatalf("expected put warmer to succeed; got: %v", err) - } - if putresp == nil { - t.Fatalf("expected put warmer response; got: %v", putresp) - } - if !putresp.Acknowledged { - t.Fatalf("expected put warmer ack; got: %v", putresp.Acknowledged) - } - - getresp, err := client.GetWarmer().Index(testIndexName2).Name("warmer_1").Do() - if err != nil { - t.Fatalf("expected get warmer to succeed; got: %v", err) - } - if getresp == nil { - t.Fatalf("expected get warmer response; got: %v", getresp) - } - props, ok := getresp[testIndexName2] - if !ok { - t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props) - } - - delresp, err := client.DeleteWarmer().Index(testIndexName2).Name("warmer_1").Do() - if err != nil { - t.Fatalf("expected del warmer to succeed; got: %v", err) - } - if delresp == nil { - t.Fatalf("expected del warmer response; got: %v", getresp) - } - if !delresp.Acknowledged { - t.Fatalf("expected del warmer ack; got: %v", delresp.Acknowledged) - } -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_refresh.go b/vendor/gopkg.in/olivere/elastic.v3/indices_refresh.go index 5c7229ed0..a221481f6 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_refresh.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_refresh.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,75 +11,80 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) +// RefreshService explicitly refreshes one or more indices. +// See https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html. type RefreshService struct { - client *Client - indices []string - force *bool - pretty bool + client *Client + index []string + force *bool + pretty bool } +// NewRefreshService creates a new instance of RefreshService. func NewRefreshService(client *Client) *RefreshService { builder := &RefreshService{ - client: client, - indices: make([]string, 0), + client: client, } return builder } -func (s *RefreshService) Index(indices ...string) *RefreshService { - s.indices = append(s.indices, indices...) +// Index specifies the indices to refresh. +func (s *RefreshService) Index(index ...string) *RefreshService { + s.index = append(s.index, index...) return s } +// Force forces a refresh. func (s *RefreshService) Force(force bool) *RefreshService { s.force = &force return s } +// Pretty asks Elasticsearch to return indented JSON. func (s *RefreshService) Pretty(pretty bool) *RefreshService { s.pretty = pretty return s } -func (s *RefreshService) Do() (*RefreshResult, error) { - return s.DoC(nil) -} - -func (s *RefreshService) DoC(ctx context.Context) (*RefreshResult, error) { - // Build url - path := "/" +// buildURL builds the URL for the operation. +func (s *RefreshService) buildURL() (string, url.Values, error) { + var err error + var path string - // Indices part - var indexPart []string - for _, index := range s.indices { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": index, + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_refresh", map[string]string{ + "index": strings.Join(s.index, ","), }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) + } else { + path = "/_refresh" } - if len(indexPart) > 0 { - path += strings.Join(indexPart, ",") + if err != nil { + return "", url.Values{}, err } - path += "/_refresh" - - // Parameters - params := make(url.Values) + // Add query string parameters + params := url.Values{} if s.force != nil { params.Set("force", fmt.Sprintf("%v", *s.force)) } if s.pretty { params.Set("pretty", fmt.Sprintf("%v", s.pretty)) } + return path, params, nil +} + +// Do executes the request. +func (s *RefreshService) Do(ctx context.Context) (*RefreshResult, error) { + path, params, err := s.buildURL() + if err != nil { + return nil, err + } // Get response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, nil) + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) if err != nil { return nil, err } @@ -94,6 +99,7 @@ func (s *RefreshService) DoC(ctx context.Context) (*RefreshResult, error) { // -- Result of a refresh request. +// RefreshResult is the outcome of RefreshService.Do. type RefreshResult struct { Shards shardsInfo `json:"_shards,omitempty"` } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_refresh_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_refresh_test.go index 885e63365..6d486c0ab 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_refresh_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_refresh_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,8 +6,43 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) +func TestRefreshBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_refresh", + }, + { + []string{"index1"}, + "/index1/_refresh", + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_refresh", + }, + } + + for i, test := range tests { + path, _, err := client.Refresh().Index(test.Indices...).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + func TestRefresh(t *testing.T) { client := setupTestClientAndCreateIndex(t) @@ -16,28 +51,28 @@ func TestRefresh(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add some documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Refresh indices - res, err := client.Refresh(testIndexName, testIndexName2).Do() + res, err := client.Refresh(testIndexName, testIndexName2).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_rollover.go b/vendor/gopkg.in/olivere/elastic.v3/indices_rollover.go new file mode 100644 index 000000000..133fd2f06 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_rollover.go @@ -0,0 +1,268 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesRolloverService rolls an alias over to a new index when the +// existing index is considered to be too large or too old. +// +// It is documented at +// https://www.elastic.co/guide/en/elasticsearch/reference/5.0/indices-rollover-index.html. +type IndicesRolloverService struct { + client *Client + pretty bool + dryRun bool + newIndex string + alias string + masterTimeout string + timeout string + waitForActiveShards string + conditions map[string]interface{} + settings map[string]interface{} + mappings map[string]interface{} + bodyJson interface{} + bodyString string +} + +// NewIndicesRolloverService creates a new IndicesRolloverService. +func NewIndicesRolloverService(client *Client) *IndicesRolloverService { + return &IndicesRolloverService{ + client: client, + conditions: make(map[string]interface{}), + settings: make(map[string]interface{}), + mappings: make(map[string]interface{}), + } +} + +// Alias is the name of the alias to rollover. +func (s *IndicesRolloverService) Alias(alias string) *IndicesRolloverService { + s.alias = alias + return s +} + +// NewIndex is the name of the rollover index. +func (s *IndicesRolloverService) NewIndex(newIndex string) *IndicesRolloverService { + s.newIndex = newIndex + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesRolloverService) MasterTimeout(masterTimeout string) *IndicesRolloverService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout sets an explicit operation timeout. +func (s *IndicesRolloverService) Timeout(timeout string) *IndicesRolloverService { + s.timeout = timeout + return s +} + +// WaitForActiveShards sets the number of active shards to wait for on the +// newly created rollover index before the operation returns. +func (s *IndicesRolloverService) WaitForActiveShards(waitForActiveShards string) *IndicesRolloverService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesRolloverService) Pretty(pretty bool) *IndicesRolloverService { + s.pretty = pretty + return s +} + +// DryRun, when set, specifies that only conditions are checked without +// performing the actual rollover. +func (s *IndicesRolloverService) DryRun(dryRun bool) *IndicesRolloverService { + s.dryRun = dryRun + return s +} + +// Conditions allows to specify all conditions as a dictionary. +func (s *IndicesRolloverService) Conditions(conditions map[string]interface{}) *IndicesRolloverService { + s.conditions = conditions + return s +} + +// AddCondition adds a condition to the rollover decision. +func (s *IndicesRolloverService) AddCondition(name string, value interface{}) *IndicesRolloverService { + s.conditions[name] = value + return s +} + +// AddMaxIndexAgeCondition adds a condition to set the max index age. +func (s *IndicesRolloverService) AddMaxIndexAgeCondition(time string) *IndicesRolloverService { + s.conditions["max_age"] = time + return s +} + +// AddMaxIndexDocsCondition adds a condition to set the max documents in the index. +func (s *IndicesRolloverService) AddMaxIndexDocsCondition(docs int64) *IndicesRolloverService { + s.conditions["max_docs"] = docs + return s +} + +// Settings adds the index settings. +func (s *IndicesRolloverService) Settings(settings map[string]interface{}) *IndicesRolloverService { + s.settings = settings + return s +} + +// AddSetting adds an index setting. +func (s *IndicesRolloverService) AddSetting(name string, value interface{}) *IndicesRolloverService { + s.settings[name] = value + return s +} + +// Mappings adds the index mappings. +func (s *IndicesRolloverService) Mappings(mappings map[string]interface{}) *IndicesRolloverService { + s.mappings = mappings + return s +} + +// AddMapping adds a mapping for the given type. +func (s *IndicesRolloverService) AddMapping(typ string, mapping interface{}) *IndicesRolloverService { + s.mappings[typ] = mapping + return s +} + +// BodyJson sets the conditions that needs to be met for executing rollover, +// specified as a serializable JSON instance which is sent as the body of +// the request. +func (s *IndicesRolloverService) BodyJson(body interface{}) *IndicesRolloverService { + s.bodyJson = body + return s +} + +// BodyString sets the conditions that needs to be met for executing rollover, +// specified as a string which is sent as the body of the request. +func (s *IndicesRolloverService) BodyString(body string) *IndicesRolloverService { + s.bodyString = body + return s +} + +// getBody returns the body of the request, if not explicitly set via +// BodyJson or BodyString. +func (s *IndicesRolloverService) getBody() interface{} { + body := make(map[string]interface{}) + if len(s.conditions) > 0 { + body["conditions"] = s.conditions + } + if len(s.settings) > 0 { + body["settings"] = s.settings + } + if len(s.mappings) > 0 { + body["mappings"] = s.mappings + } + return body +} + +// buildURL builds the URL for the operation. +func (s *IndicesRolloverService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if s.newIndex != "" { + path, err = uritemplates.Expand("/{alias}/_rollover/{new_index}", map[string]string{ + "alias": s.alias, + "new_index": s.newIndex, + }) + } else { + path, err = uritemplates.Expand("/{alias}/_rollover", map[string]string{ + "alias": s.alias, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.dryRun { + params.Set("dry_run", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesRolloverService) Validate() error { + var invalid []string + if s.alias == "" { + invalid = append(invalid, "Alias") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesRolloverService) Do(ctx context.Context) (*IndicesRolloverResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } else { + body = s.getBody() + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesRolloverResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesRolloverResponse is the response of IndicesRolloverService.Do. +type IndicesRolloverResponse struct { + OldIndex string `json:"old_index"` + NewIndex string `json:"new_index"` + RolledOver bool `json:"rolled_over"` + DryRun bool `json:"dry_run"` + Acknowledged bool `json:"acknowledged"` + ShardsAcknowledged bool `json:"shards_acknowledged"` + Conditions map[string]bool `json:"conditions"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_rollover_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_rollover_test.go new file mode 100644 index 000000000..77ac1e851 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_rollover_test.go @@ -0,0 +1,116 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestIndicesRolloverBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Alias string + NewIndex string + Expected string + }{ + { + "logs_write", + "", + "/logs_write/_rollover", + }, + { + "logs_write", + "my_new_index_name", + "/logs_write/_rollover/my_new_index_name", + }, + } + + for i, test := range tests { + path, _, err := client.RolloverIndex(test.Alias).NewIndex(test.NewIndex).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestIndicesRolloverBodyConditions(t *testing.T) { + client := setupTestClient(t) + svc := NewIndicesRolloverService(client). + Conditions(map[string]interface{}{ + "max_age": "7d", + "max_docs": 1000, + }) + data, err := json.Marshal(svc.getBody()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"conditions":{"max_age":"7d","max_docs":1000}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestIndicesRolloverBodyAddCondition(t *testing.T) { + client := setupTestClient(t) + svc := NewIndicesRolloverService(client). + AddCondition("max_age", "7d"). + AddCondition("max_docs", 1000) + data, err := json.Marshal(svc.getBody()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"conditions":{"max_age":"7d","max_docs":1000}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestIndicesRolloverBodyAddPredefinedConditions(t *testing.T) { + client := setupTestClient(t) + svc := NewIndicesRolloverService(client). + AddMaxIndexAgeCondition("2d"). + AddMaxIndexDocsCondition(1000000) + data, err := json.Marshal(svc.getBody()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"conditions":{"max_age":"2d","max_docs":1000000}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestIndicesRolloverBodyComplex(t *testing.T) { + client := setupTestClient(t) + svc := NewIndicesRolloverService(client). + AddMaxIndexAgeCondition("2d"). + AddMaxIndexDocsCondition(1000000). + AddSetting("index.number_of_shards", 2). + AddMapping("tweet", map[string]interface{}{ + "properties": map[string]interface{}{ + "user": map[string]interface{}{ + "type": "keyword", + }, + }, + }) + data, err := json.Marshal(svc.getBody()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"conditions":{"max_age":"2d","max_docs":1000000},"mappings":{"tweet":{"properties":{"user":{"type":"keyword"}}}},"settings":{"index.number_of_shards":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_shrink.go b/vendor/gopkg.in/olivere/elastic.v3/indices_shrink.go new file mode 100644 index 000000000..162bd3986 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_shrink.go @@ -0,0 +1,174 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesShrinkService allows you to shrink an existing index into a +// new index with fewer primary shards. +// +// For further details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.0/indices-shrink-index.html. +type IndicesShrinkService struct { + client *Client + pretty bool + source string + target string + masterTimeout string + timeout string + waitForActiveShards string + bodyJson interface{} + bodyString string +} + +// NewIndicesShrinkService creates a new IndicesShrinkService. +func NewIndicesShrinkService(client *Client) *IndicesShrinkService { + return &IndicesShrinkService{ + client: client, + } +} + +// Source is the name of the source index to shrink. +func (s *IndicesShrinkService) Source(source string) *IndicesShrinkService { + s.source = source + return s +} + +// Target is the name of the target index to shrink into. +func (s *IndicesShrinkService) Target(target string) *IndicesShrinkService { + s.target = target + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesShrinkService) MasterTimeout(masterTimeout string) *IndicesShrinkService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesShrinkService) Timeout(timeout string) *IndicesShrinkService { + s.timeout = timeout + return s +} + +// WaitForActiveShards sets the number of active shards to wait for on +// the shrunken index before the operation returns. +func (s *IndicesShrinkService) WaitForActiveShards(waitForActiveShards string) *IndicesShrinkService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesShrinkService) Pretty(pretty bool) *IndicesShrinkService { + s.pretty = pretty + return s +} + +// BodyJson is the configuration for the target index (`settings` and `aliases`) +// defined as a JSON-serializable instance to be sent as the request body. +func (s *IndicesShrinkService) BodyJson(body interface{}) *IndicesShrinkService { + s.bodyJson = body + return s +} + +// BodyString is the configuration for the target index (`settings` and `aliases`) +// defined as a string to send as the request body. +func (s *IndicesShrinkService) BodyString(body string) *IndicesShrinkService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesShrinkService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{source}/_shrink/{target}", map[string]string{ + "source": s.source, + "target": s.target, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesShrinkService) Validate() error { + var invalid []string + if s.source == "" { + invalid = append(invalid, "Source") + } + if s.target == "" { + invalid = append(invalid, "Target") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesShrinkService) Do(ctx context.Context) (*IndicesShrinkResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesShrinkResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesShrinkResponse is the response of IndicesShrinkService.Do. +type IndicesShrinkResponse struct { + Acknowledged bool `json:"acknowledged"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_shrink_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_shrink_test.go new file mode 100644 index 000000000..06ab7d923 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_shrink_test.go @@ -0,0 +1,34 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesShrinkBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Source string + Target string + Expected string + }{ + { + "my_source_index", + "my_target_index", + "/my_source_index/_shrink/my_target_index", + }, + } + + for i, test := range tests { + path, _, err := client.ShrinkIndex(test.Source, test.Target).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_stats.go b/vendor/gopkg.in/olivere/elastic.v3/indices_stats.go index 6329ee523..7d7e94a9d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_stats.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_stats.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // IndicesStatsService provides stats on various metrics of one or more @@ -168,12 +168,7 @@ func (s *IndicesStatsService) Validate() error { } // Do executes the operation. -func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesStatsService) DoC(ctx context.Context) (*IndicesStatsResponse, error) { +func (s *IndicesStatsService) Do(ctx context.Context) (*IndicesStatsResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -186,7 +181,7 @@ func (s *IndicesStatsService) DoC(ctx context.Context) (*IndicesStatsResponse, e } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_stats_test.go b/vendor/gopkg.in/olivere/elastic.v3/indices_stats_test.go index 2a72858d7..367d63ba9 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_stats_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/indices_stats_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,6 +6,8 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestIndexStatsBuildURL(t *testing.T) { @@ -62,7 +64,7 @@ func TestIndexStatsBuildURL(t *testing.T) { func TestIndexStats(t *testing.T) { client := setupTestClientAndCreateIndexAndAddDocs(t) - stats, err := client.IndexStats(testIndexName).Do() + stats, err := client.IndexStats(testIndexName).Do(context.TODO()) if err != nil { t.Fatalf("expected no error; got: %v", err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/ingest_delete_pipeline.go b/vendor/gopkg.in/olivere/elastic.v3/ingest_delete_pipeline.go new file mode 100644 index 000000000..641c1eb26 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/ingest_delete_pipeline.go @@ -0,0 +1,124 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestDeletePipelineService deletes pipelines by ID. +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.0/delete-pipeline-api.html. +type IngestDeletePipelineService struct { + client *Client + pretty bool + id string + masterTimeout string + timeout string +} + +// NewIngestDeletePipelineService creates a new IngestDeletePipelineService. +func NewIngestDeletePipelineService(client *Client) *IngestDeletePipelineService { + return &IngestDeletePipelineService{ + client: client, + } +} + +// Id is documented as: Pipeline ID. +func (s *IngestDeletePipelineService) Id(id string) *IngestDeletePipelineService { + s.id = id + return s +} + +// MasterTimeout is documented as: Explicit operation timeout for connection to master node. +func (s *IngestDeletePipelineService) MasterTimeout(masterTimeout string) *IngestDeletePipelineService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout is documented as: Explicit operation timeout. +func (s *IngestDeletePipelineService) Timeout(timeout string) *IngestDeletePipelineService { + s.timeout = timeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestDeletePipelineService) Pretty(pretty bool) *IngestDeletePipelineService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestDeletePipelineService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestDeletePipelineService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IngestDeletePipelineService) Do(ctx context.Context) (*IngestDeletePipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IngestDeletePipelineResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestDeletePipelineResponse is the response of IngestDeletePipelineService.Do. +type IngestDeletePipelineResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/ingest_delete_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v3/ingest_delete_pipeline_test.go new file mode 100644 index 000000000..1163e0f17 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/ingest_delete_pipeline_test.go @@ -0,0 +1,31 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIngestDeletePipelineURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Id string + Expected string + }{ + { + "my-pipeline-id", + "/_ingest/pipeline/my-pipeline-id", + }, + } + + for _, test := range tests { + path, _, err := client.IngestDeletePipeline(test.Id).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/ingest_get_pipeline.go b/vendor/gopkg.in/olivere/elastic.v3/ingest_get_pipeline.go new file mode 100644 index 000000000..ecff1a862 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/ingest_get_pipeline.go @@ -0,0 +1,118 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestGetPipelineService returns pipelines based on ID. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/get-pipeline-api.html +// for documentation. +type IngestGetPipelineService struct { + client *Client + pretty bool + id []string + masterTimeout string +} + +// NewIngestGetPipelineService creates a new IngestGetPipelineService. +func NewIngestGetPipelineService(client *Client) *IngestGetPipelineService { + return &IngestGetPipelineService{ + client: client, + } +} + +// Id is a list of pipeline ids. Wildcards supported. +func (s *IngestGetPipelineService) Id(id ...string) *IngestGetPipelineService { + s.id = append(s.id, id...) + return s +} + +// MasterTimeout is an explicit operation timeout for connection to master node. +func (s *IngestGetPipelineService) MasterTimeout(masterTimeout string) *IngestGetPipelineService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestGetPipelineService) Pretty(pretty bool) *IngestGetPipelineService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestGetPipelineService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if len(s.id) > 0 { + path, err = uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ + "id": strings.Join(s.id, ","), + }) + } else { + path = "/_ingest/pipeline" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestGetPipelineService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IngestGetPipelineService) Do(ctx context.Context) (IngestGetPipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret IngestGetPipelineResponse + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestGetPipelineResponse is the response of IngestGetPipelineService.Do. +type IngestGetPipelineResponse map[string]*IngestGetPipeline + +type IngestGetPipeline struct { + ID string `json:"id"` + Config map[string]interface{} `json:"config"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/ingest_get_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v3/ingest_get_pipeline_test.go new file mode 100644 index 000000000..ddafe9fce --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/ingest_get_pipeline_test.go @@ -0,0 +1,118 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "context" + "testing" +) + +func TestIngestGetPipelineURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Id []string + Expected string + }{ + { + nil, + "/_ingest/pipeline", + }, + { + []string{"my-pipeline-id"}, + "/_ingest/pipeline/my-pipeline-id", + }, + { + []string{"*"}, + "/_ingest/pipeline/%2A", + }, + { + []string{"pipeline-1", "pipeline-2"}, + "/_ingest/pipeline/pipeline-1%2Cpipeline-2", + }, + } + + for _, test := range tests { + path, _, err := client.IngestGetPipeline(test.Id...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIngestLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + // Get all pipelines (returns 404 that indicates an error) + getres, err := client.IngestGetPipeline().Do(context.TODO()) + if err == nil { + t.Fatal(err) + } + if getres != nil { + t.Fatalf("expected no response, got %v", getres) + } + + // Add a pipeline + pipelineDef := `{ + "description" : "reset retweets", + "processors" : [ + { + "set" : { + "field": "retweets", + "value": 0 + } + } + ] +}` + putres, err := client.IngestPutPipeline("my-pipeline").BodyString(pipelineDef).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if putres == nil { + t.Fatal("expected response, got nil") + } + if want, have := true, putres.Acknowledged; want != have { + t.Fatalf("expected ack = %v, got %v", want, have) + } + + // Get all pipelines again + getres, err = client.IngestGetPipeline().Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if want, have := 1, len(getres); want != have { + t.Fatalf("expected %d pipelines, got %d", want, have) + } + if _, found := getres["my-pipeline"]; !found { + t.Fatalf("expected to find pipline with id %q", "my-pipeline") + } + + // Get all pipeline by ID + getres, err = client.IngestGetPipeline("my-pipeline").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if want, have := 1, len(getres); want != have { + t.Fatalf("expected %d pipelines, got %d", want, have) + } + if _, found := getres["my-pipeline"]; !found { + t.Fatalf("expected to find pipline with id %q", "my-pipeline") + } + + // Delete pipeline + delres, err := client.IngestDeletePipeline("my-pipeline").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if delres == nil { + t.Fatal("expected response, got nil") + } + if want, have := true, delres.Acknowledged; want != have { + t.Fatalf("expected ack = %v, got %v", want, have) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/ingest_put_pipeline.go b/vendor/gopkg.in/olivere/elastic.v3/ingest_put_pipeline.go new file mode 100644 index 000000000..723a8ad78 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/ingest_put_pipeline.go @@ -0,0 +1,152 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestPutPipelineService adds pipelines and updates existing pipelines in +// the cluster. +// +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.0/put-pipeline-api.html. +type IngestPutPipelineService struct { + client *Client + pretty bool + id string + masterTimeout string + timeout string + bodyJson interface{} + bodyString string +} + +// NewIngestPutPipelineService creates a new IngestPutPipelineService. +func NewIngestPutPipelineService(client *Client) *IngestPutPipelineService { + return &IngestPutPipelineService{ + client: client, + } +} + +// Id is the pipeline ID. +func (s *IngestPutPipelineService) Id(id string) *IngestPutPipelineService { + s.id = id + return s +} + +// MasterTimeout is an explicit operation timeout for connection to master node. +func (s *IngestPutPipelineService) MasterTimeout(masterTimeout string) *IngestPutPipelineService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout specifies an explicit operation timeout. +func (s *IngestPutPipelineService) Timeout(timeout string) *IngestPutPipelineService { + s.timeout = timeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestPutPipelineService) Pretty(pretty bool) *IngestPutPipelineService { + s.pretty = pretty + return s +} + +// BodyJson is the ingest definition, defined as a JSON-serializable document. +// Use e.g. a map[string]interface{} here. +func (s *IngestPutPipelineService) BodyJson(body interface{}) *IngestPutPipelineService { + s.bodyJson = body + return s +} + +// BodyString is the ingest definition, specified as a string. +func (s *IngestPutPipelineService) BodyString(body string) *IngestPutPipelineService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestPutPipelineService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestPutPipelineService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IngestPutPipelineService) Do(ctx context.Context) (*IngestPutPipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IngestPutPipelineResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestPutPipelineResponse is the response of IngestPutPipelineService.Do. +type IngestPutPipelineResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/ingest_put_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v3/ingest_put_pipeline_test.go new file mode 100644 index 000000000..9609f2f53 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/ingest_put_pipeline_test.go @@ -0,0 +1,31 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIngestPutPipelineURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Id string + Expected string + }{ + { + "my-pipeline-id", + "/_ingest/pipeline/my-pipeline-id", + }, + } + + for _, test := range tests { + path, _, err := client.IngestPutPipeline(test.Id).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/ingest_simulate_pipeline.go b/vendor/gopkg.in/olivere/elastic.v3/ingest_simulate_pipeline.go new file mode 100644 index 000000000..212327dfb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/ingest_simulate_pipeline.go @@ -0,0 +1,157 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestSimulatePipelineService executes a specific pipeline against the set of +// documents provided in the body of the request. +// +// The API is documented at +// https://www.elastic.co/guide/en/elasticsearch/reference/5.0/simulate-pipeline-api.html. +type IngestSimulatePipelineService struct { + client *Client + pretty bool + id string + verbose *bool + bodyJson interface{} + bodyString string +} + +// NewIngestSimulatePipelineService creates a new IngestSimulatePipeline. +func NewIngestSimulatePipelineService(client *Client) *IngestSimulatePipelineService { + return &IngestSimulatePipelineService{ + client: client, + } +} + +// Id specifies the pipeline ID. +func (s *IngestSimulatePipelineService) Id(id string) *IngestSimulatePipelineService { + s.id = id + return s +} + +// Verbose mode. Display data output for each processor in executed pipeline. +func (s *IngestSimulatePipelineService) Verbose(verbose bool) *IngestSimulatePipelineService { + s.verbose = &verbose + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestSimulatePipelineService) Pretty(pretty bool) *IngestSimulatePipelineService { + s.pretty = pretty + return s +} + +// BodyJson is the ingest definition, defined as a JSON-serializable simulate +// definition. Use e.g. a map[string]interface{} here. +func (s *IngestSimulatePipelineService) BodyJson(body interface{}) *IngestSimulatePipelineService { + s.bodyJson = body + return s +} + +// BodyString is the simulate definition, defined as a string. +func (s *IngestSimulatePipelineService) BodyString(body string) *IngestSimulatePipelineService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestSimulatePipelineService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if s.id != "" { + path, err = uritemplates.Expand("/_ingest/pipeline/{id}/_simulate", map[string]string{ + "id": s.id, + }) + } else { + path = "/_ingest/pipeline/_simulate" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.verbose != nil { + params.Set("verbose", fmt.Sprintf("%v", *s.verbose)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestSimulatePipelineService) Validate() error { + var invalid []string + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IngestSimulatePipelineService) Do(ctx context.Context) (*IngestSimulatePipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IngestSimulatePipelineResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestSimulatePipelineResponse is the response of IngestSimulatePipeline.Do. +type IngestSimulatePipelineResponse struct { + Docs []*IngestSimulateDocumentResult `json:"docs"` +} + +type IngestSimulateDocumentResult struct { + Doc map[string]interface{} `json:"doc"` + ProcessorResults []*IngestSimulateProcessorResult `json:"processor_results"` +} + +type IngestSimulateProcessorResult struct { + ProcessorTag string `json:"tag"` + Doc map[string]interface{} `json:"doc"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/ingest_simulate_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v3/ingest_simulate_pipeline_test.go new file mode 100644 index 000000000..a254f85ff --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/ingest_simulate_pipeline_test.go @@ -0,0 +1,35 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIngestSimulatePipelineURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Id string + Expected string + }{ + { + "", + "/_ingest/pipeline/_simulate", + }, + { + "my-pipeline-id", + "/_ingest/pipeline/my-pipeline-id/_simulate", + }, + } + + for _, test := range tests { + path, _, err := client.IngestSimulatePipeline().Id(test.Id).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/inner_hit.go b/vendor/gopkg.in/olivere/elastic.v3/inner_hit.go index 1330df1ee..2200bcd00 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/inner_hit.go +++ b/vendor/gopkg.in/olivere/elastic.v3/inner_hit.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -66,18 +66,18 @@ func (hit *InnerHit) Version(version bool) *InnerHit { return hit } -func (hit *InnerHit) Field(fieldName string) *InnerHit { - hit.source.Field(fieldName) +func (hit *InnerHit) StoredField(storedFieldName string) *InnerHit { + hit.source.StoredField(storedFieldName) return hit } -func (hit *InnerHit) Fields(fieldNames ...string) *InnerHit { - hit.source.Fields(fieldNames...) +func (hit *InnerHit) StoredFields(storedFieldNames ...string) *InnerHit { + hit.source.StoredFields(storedFieldNames...) return hit } -func (hit *InnerHit) NoFields() *InnerHit { - hit.source.NoFields() +func (hit *InnerHit) NoStoredFields() *InnerHit { + hit.source.NoStoredFields() return hit } @@ -91,13 +91,13 @@ func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) return hit } -func (hit *InnerHit) FieldDataFields(fieldDataFields ...string) *InnerHit { - hit.source.FieldDataFields(fieldDataFields...) +func (hit *InnerHit) DocvalueFields(docvalueFields ...string) *InnerHit { + hit.source.DocvalueFields(docvalueFields...) return hit } -func (hit *InnerHit) FieldDataField(fieldDataField string) *InnerHit { - hit.source.FieldDataField(fieldDataField) +func (hit *InnerHit) DocvalueField(docvalueField string) *InnerHit { + hit.source.DocvalueField(docvalueField) return hit } diff --git a/vendor/gopkg.in/olivere/elastic.v3/inner_hit_test.go b/vendor/gopkg.in/olivere/elastic.v3/inner_hit_test.go index c4a74dafa..fd9bd2e8a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/inner_hit_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/inner_hit_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/logger.go b/vendor/gopkg.in/olivere/elastic.v3/logger.go index 0fb16b19f..095eb4cd4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/logger.go +++ b/vendor/gopkg.in/olivere/elastic.v3/logger.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/mget.go b/vendor/gopkg.in/olivere/elastic.v3/mget.go index 9b4725937..dcf72c624 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/mget.go +++ b/vendor/gopkg.in/olivere/elastic.v3/mget.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,6 +7,7 @@ package elastic import ( "fmt" "net/url" + "strings" "golang.org/x/net/context" ) @@ -16,38 +17,56 @@ import ( // a docs array with all the fetched documents, each element similar // in structure to a document provided by the Get API. // -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-multi-get.html // for details. type MgetService struct { - client *Client - pretty bool - preference string - realtime *bool - refresh *bool - items []*MultiGetItem + client *Client + pretty bool + preference string + realtime *bool + refresh string + routing string + storedFields []string + items []*MultiGetItem } +// NewMgetService initializes a new Multi GET API request call. func NewMgetService(client *Client) *MgetService { builder := &MgetService{ client: client, - items: make([]*MultiGetItem, 0), } return builder } -func (b *MgetService) Preference(preference string) *MgetService { - b.preference = preference - return b +// Preference specifies the node or shard the operation should be performed +// on (default: random). +func (s *MgetService) Preference(preference string) *MgetService { + s.preference = preference + return s } -func (b *MgetService) Refresh(refresh bool) *MgetService { - b.refresh = &refresh - return b +// Refresh the shard containing the document before performing the operation. +func (s *MgetService) Refresh(refresh string) *MgetService { + s.refresh = refresh + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *MgetService) Realtime(realtime bool) *MgetService { + s.realtime = &realtime + return s } -func (b *MgetService) Realtime(realtime bool) *MgetService { - b.realtime = &realtime - return b +// Routing is the specific routing value. +func (s *MgetService) Routing(routing string) *MgetService { + s.routing = routing + return s +} + +// StoredFields is a list of fields to return in the response. +func (s *MgetService) StoredFields(storedFields ...string) *MgetService { + s.storedFields = append(s.storedFields, storedFields...) + return s } // Pretty indicates that the JSON response be indented and human readable. @@ -56,15 +75,17 @@ func (s *MgetService) Pretty(pretty bool) *MgetService { return s } -func (b *MgetService) Add(items ...*MultiGetItem) *MgetService { - b.items = append(b.items, items...) - return b +// Add an item to the request. +func (s *MgetService) Add(items ...*MultiGetItem) *MgetService { + s.items = append(s.items, items...) + return s } -func (b *MgetService) Source() (interface{}, error) { +// Source returns the request body, which will be serialized into JSON. +func (s *MgetService) Source() (interface{}, error) { source := make(map[string]interface{}) - items := make([]interface{}, len(b.items)) - for i, item := range b.items { + items := make([]interface{}, len(s.items)) + for i, item := range s.items { src, err := item.Source() if err != nil { return nil, err @@ -75,40 +96,43 @@ func (b *MgetService) Source() (interface{}, error) { return source, nil } -func (b *MgetService) Do() (*MgetResponse, error) { - return b.DoC(nil) -} - -func (b *MgetService) DoC(ctx context.Context) (*MgetResponse, error) { +// Do executes the request. +func (s *MgetService) Do(ctx context.Context) (*MgetResponse, error) { // Build url path := "/_mget" params := make(url.Values) - if b.realtime != nil { - params.Add("realtime", fmt.Sprintf("%v", *b.realtime)) + if s.realtime != nil { + params.Add("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.preference != "" { + params.Add("preference", s.preference) } - if b.preference != "" { - params.Add("preference", b.preference) + if s.refresh != "" { + params.Add("refresh", s.refresh) } - if b.refresh != nil { - params.Add("refresh", fmt.Sprintf("%v", *b.refresh)) + if s.routing != "" { + params.Set("routing", s.routing) + } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) } // Set body - body, err := b.Source() + body, err := s.Source() if err != nil { return nil, err } // Get response - res, err := b.client.PerformRequestC(ctx, "GET", path, params, body) + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) if err != nil { return nil, err } // Return result ret := new(MgetResponse) - if err := b.client.decoder.Decode(res.Body, ret); err != nil { + if err := s.client.decoder.Decode(res.Body, ret); err != nil { return nil, err } return ret, nil @@ -118,45 +142,48 @@ func (b *MgetService) DoC(ctx context.Context) (*MgetResponse, error) { // MultiGetItem is a single document to retrieve via the MgetService. type MultiGetItem struct { - index string - typ string - id string - routing string - fields []string - version *int64 // see org.elasticsearch.common.lucene.uid.Versions - versionType string // see org.elasticsearch.index.VersionType - fsc *FetchSourceContext + index string + typ string + id string + routing string + storedFields []string + version *int64 // see org.elasticsearch.common.lucene.uid.Versions + versionType string // see org.elasticsearch.index.VersionType + fsc *FetchSourceContext } +// NewMultiGetItem initializes a new, single item for a Multi GET request. func NewMultiGetItem() *MultiGetItem { return &MultiGetItem{} } +// Index specifies the index name. func (item *MultiGetItem) Index(index string) *MultiGetItem { item.index = index return item } +// Type specifies the type name. func (item *MultiGetItem) Type(typ string) *MultiGetItem { item.typ = typ return item } +// Id specifies the identifier of the document. func (item *MultiGetItem) Id(id string) *MultiGetItem { item.id = id return item } +// Routing is the specific routing value. func (item *MultiGetItem) Routing(routing string) *MultiGetItem { item.routing = routing return item } -func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem { - if item.fields == nil { - item.fields = make([]string, 0) - } - item.fields = append(item.fields, fields...) +// StoredFields is a list of fields to return in the response. +func (item *MultiGetItem) StoredFields(storedFields ...string) *MultiGetItem { + item.storedFields = append(item.storedFields, storedFields...) return item } @@ -176,6 +203,7 @@ func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem { return item } +// FetchSource allows to specify source filtering. func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem { item.fsc = fetchSourceContext return item @@ -201,12 +229,12 @@ func (item *MultiGetItem) Source() (interface{}, error) { } source["_source"] = src } - if item.fields != nil { - source["fields"] = item.fields - } if item.routing != "" { source["_routing"] = item.routing } + if len(item.storedFields) > 0 { + source["stored_fields"] = strings.Join(item.storedFields, ",") + } if item.version != nil { source["version"] = fmt.Sprintf("%d", *item.version) } @@ -219,6 +247,7 @@ func (item *MultiGetItem) Source() (interface{}, error) { // -- Result of a Multi Get request. +// MgetResponse is the outcome of a Multi GET API request. type MgetResponse struct { Docs []*GetResult `json:"docs,omitempty"` } diff --git a/vendor/gopkg.in/olivere/elastic.v3/mget_test.go b/vendor/gopkg.in/olivere/elastic.v3/mget_test.go index da78e3122..30391dfed 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/mget_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/mget_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,6 +7,8 @@ package elastic import ( "encoding/json" "testing" + + "golang.org/x/net/context" ) func TestMultiGet(t *testing.T) { @@ -17,28 +19,28 @@ func TestMultiGet(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add some documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Count documents - count, err := client.Count(testIndexName).Do() + count, err := client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -50,7 +52,7 @@ func TestMultiGet(t *testing.T) { res, err := client.MultiGet(). Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("1")). Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("3")). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/msearch.go b/vendor/gopkg.in/olivere/elastic.v3/msearch.go index 3fc4d1219..a568acd92 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/msearch.go +++ b/vendor/gopkg.in/olivere/elastic.v3/msearch.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -48,11 +48,7 @@ func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService { return s } -func (s *MultiSearchService) Do() (*MultiSearchResult, error) { - return s.DoC(nil) -} - -func (s *MultiSearchService) DoC(ctx context.Context) (*MultiSearchResult, error) { +func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) { // Build url path := "/_msearch" @@ -84,7 +80,7 @@ func (s *MultiSearchService) DoC(ctx context.Context) (*MultiSearchResult, error body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n // Get response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, body) + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/msearch_test.go b/vendor/gopkg.in/olivere/elastic.v3/msearch_test.go index 332ade2c6..d2d1e1896 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/msearch_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/msearch_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -8,6 +8,8 @@ import ( "encoding/json" _ "net/http" "testing" + + "golang.org/x/net/context" ) func TestMultiSearch(t *testing.T) { @@ -30,22 +32,22 @@ func TestMultiSearch(t *testing.T) { } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -61,7 +63,7 @@ func TestMultiSearch(t *testing.T) { searchResult, err := client.MultiSearch(). Add(sreq1, sreq2). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -135,22 +137,22 @@ func TestMultiSearchWithOneRequest(t *testing.T) { } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -163,7 +165,7 @@ func TestMultiSearchWithOneRequest(t *testing.T) { searchResult, err := client.MultiSearch(). Index(testIndexName). Add(sreq). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/mtermvectors.go b/vendor/gopkg.in/olivere/elastic.v3/mtermvectors.go index 26074062b..a313cd92c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/mtermvectors.go +++ b/vendor/gopkg.in/olivere/elastic.v3/mtermvectors.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // MultiTermvectorService returns information and statistics on terms in the @@ -256,12 +256,7 @@ func (s *MultiTermvectorService) Validate() error { } // Do executes the operation. -func (s *MultiTermvectorService) Do() (*MultiTermvectorResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *MultiTermvectorService) DoC(ctx context.Context) (*MultiTermvectorResponse, error) { +func (s *MultiTermvectorService) Do(ctx context.Context) (*MultiTermvectorResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -284,7 +279,7 @@ func (s *MultiTermvectorService) DoC(ctx context.Context) (*MultiTermvectorRespo } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, body) + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/mtermvectors_test.go b/vendor/gopkg.in/olivere/elastic.v3/mtermvectors_test.go index fc4c36be6..c22fcd43d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/mtermvectors_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/mtermvectors_test.go @@ -6,6 +6,8 @@ package elastic import ( "testing" + + "golang.org/x/net/context" ) func TestMultiTermVectorsValidateAndBuildURL(t *testing.T) { @@ -81,28 +83,28 @@ func TestMultiTermVectorsWithIds(t *testing.T) { tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Count documents - count, err := client.Count(testIndexName).Do() + count, err := client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -117,7 +119,7 @@ func TestMultiTermVectorsWithIds(t *testing.T) { Type("tweet"). Add(NewMultiTermvectorItem().Index(testIndexName).Type("tweet").Id("1").Fields(field)). Add(NewMultiTermvectorItem().Index(testIndexName).Type("tweet").Id("3").Fields(field)). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/nodes_info.go b/vendor/gopkg.in/olivere/elastic.v3/nodes_info.go index 092e70edf..c956a5eac 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/nodes_info.go +++ b/vendor/gopkg.in/olivere/elastic.v3/nodes_info.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -12,7 +12,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // NodesInfoService allows to retrieve one or more or all of the @@ -101,12 +101,7 @@ func (s *NodesInfoService) Validate() error { } // Do executes the operation. -func (s *NodesInfoService) Do() (*NodesInfoResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *NodesInfoService) DoC(ctx context.Context) (*NodesInfoResponse, error) { +func (s *NodesInfoService) Do(ctx context.Context) (*NodesInfoResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -119,7 +114,7 @@ func (s *NodesInfoService) DoC(ctx context.Context) (*NodesInfoResponse, error) } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/nodes_info_test.go b/vendor/gopkg.in/olivere/elastic.v3/nodes_info_test.go index 0402b2706..626f6bfd4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/nodes_info_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/nodes_info_test.go @@ -1,10 +1,14 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestNodesInfo(t *testing.T) { client, err := NewClient() @@ -12,7 +16,7 @@ func TestNodesInfo(t *testing.T) { t.Fatal(err) } - info, err := client.NodesInfo().Do() + info, err := client.NodesInfo().Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/nodes_stats.go b/vendor/gopkg.in/olivere/elastic.v3/nodes_stats.go index 4c0d1b8c4..9af56d56e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/nodes_stats.go +++ b/vendor/gopkg.in/olivere/elastic.v3/nodes_stats.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // NodesStatsService returns node statistics. @@ -201,12 +201,7 @@ func (s *NodesStatsService) Validate() error { } // Do executes the operation. -func (s *NodesStatsService) Do() (*NodesStatsResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *NodesStatsService) DoC(ctx context.Context) (*NodesStatsResponse, error) { +func (s *NodesStatsService) Do(ctx context.Context) (*NodesStatsResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -219,7 +214,7 @@ func (s *NodesStatsService) DoC(ctx context.Context) (*NodesStatsResponse, error } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } @@ -247,8 +242,10 @@ type NodesStatsNode struct { TransportAddress string `json:"transport_address"` // Host is the host name, e.g. "macbookair" Host string `json:"host"` - // IP is the list of IP addresses, e.g. ["192.168.1.2"] - IP []string `json:"ip"` + // IP is an IP address, e.g. "192.168.1.2" + IP string `json:"ip"` + // Roles is a list of the roles of the node, e.g. master, data, ingest. + Roles []string `json:"roles"` // Attributes of the node. Attributes map[string]interface{} `json:"attributes"` @@ -282,6 +279,12 @@ type NodesStatsNode struct { // ScriptStats information. ScriptStats *NodesStatsScriptStats `json:"script"` + + // Discovery information. + Discovery *NodesStatsDiscovery `json:"discovery"` + + // Ingest information + Ingest *NodesStatsIngest `json:"ingest"` } type NodesStatsIndex struct { @@ -500,11 +503,15 @@ type NodesStatsRecoveryStats struct { } type NodesStatsNodeOS struct { - Timestamp int64 `json:"timestamp"` - CPUPercent int `json:"cpu_percent"` - LoadAverage float64 `json:"load_average"` - Mem *NodesStatsNodeOSMem `json:"mem"` - Swap *NodesStatsNodeOSSwap `json:"swap"` + Timestamp int64 `json:"timestamp"` + CPU *NodesStatsNodeOSCPU `json:"cpu"` + Mem *NodesStatsNodeOSMem `json:"mem"` + Swap *NodesStatsNodeOSSwap `json:"swap"` +} + +type NodesStatsNodeOSCPU struct { + Percent int `json:"percent"` + LoadAverage map[string]float64 `json:"load_average"` // keys are: 1m, 5m, and 15m } type NodesStatsNodeOSMem struct { @@ -617,6 +624,7 @@ type NodesStatsNodeFS struct { Timestamp int64 `json:"timestamp"` Total *NodesStatsNodeFSEntry `json:"total"` Data []*NodesStatsNodeFSEntry `json:"data"` + IOStats *NodesStatsNodeFSIOStats `json:"io_stats"` } type NodesStatsNodeFSEntry struct { @@ -632,6 +640,20 @@ type NodesStatsNodeFSEntry struct { Spins string `json:"spins"` } +type NodesStatsNodeFSIOStats struct { + Devices []*NodesStatsNodeFSIOStatsEntry `json:"devices"` + Total *NodesStatsNodeFSIOStatsEntry `json:"total"` +} + +type NodesStatsNodeFSIOStatsEntry struct { + DeviceName string `json:"device_name"` + Operations int64 `json:"operations"` + ReadOperations int64 `json:"read_operations"` + WriteOperations int64 `json:"write_operations"` + ReadKilobytes int64 `json:"read_kilobytes"` + WriteKilobytes int64 `json:"write_kilobytes"` +} + type NodesStatsNodeTransport struct { ServerOpen int `json:"server_open"` RxCount int64 `json:"rx_count"` @@ -660,3 +682,26 @@ type NodesStatsScriptStats struct { Compilations int64 `json:"compilations"` CacheEvictions int64 `json:"cache_evictions"` } + +type NodesStatsDiscovery struct { + ClusterStateQueue *NodesStatsDiscoveryStats `json:"cluster_state_queue"` +} + +type NodesStatsDiscoveryStats struct { + Total int64 `json:"total"` + Pending int64 `json:"pending"` + Committed int64 `json:"committed"` +} + +type NodesStatsIngest struct { + Total *NodesStatsIngestStats `json:"total"` + Pipelines interface{} `json:"pipelines"` +} + +type NodesStatsIngestStats struct { + Count int64 `json:"count"` + Time string `json:"time"` + TimeInMillis int64 `json:"time_in_millis"` + Current int64 `json:"current"` + Failed int64 `json:"failed"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/nodes_stats_test.go b/vendor/gopkg.in/olivere/elastic.v3/nodes_stats_test.go index 9b5fd1671..d74134243 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/nodes_stats_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/nodes_stats_test.go @@ -4,7 +4,11 @@ package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestNodesStats(t *testing.T) { client, err := NewClient() @@ -12,7 +16,7 @@ func TestNodesStats(t *testing.T) { t.Fatal(err) } - info, err := client.NodesStats().Human(true).Do() + info, err := client.NodesStats().Human(true).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/optimize.go b/vendor/gopkg.in/olivere/elastic.v3/optimize.go deleted file mode 100644 index 5c762c9a9..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/optimize.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "net/url" - "strings" - - "golang.org/x/net/context" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -type OptimizeService struct { - client *Client - indices []string - maxNumSegments *int - onlyExpungeDeletes *bool - flush *bool - waitForMerge *bool - force *bool - pretty bool -} - -func NewOptimizeService(client *Client) *OptimizeService { - builder := &OptimizeService{ - client: client, - indices: make([]string, 0), - } - return builder -} - -func (s *OptimizeService) Index(indices ...string) *OptimizeService { - s.indices = append(s.indices, indices...) - return s -} - -func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService { - s.maxNumSegments = &maxNumSegments - return s -} - -func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService { - s.onlyExpungeDeletes = &onlyExpungeDeletes - return s -} - -func (s *OptimizeService) Flush(flush bool) *OptimizeService { - s.flush = &flush - return s -} - -func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService { - s.waitForMerge = &waitForMerge - return s -} - -func (s *OptimizeService) Force(force bool) *OptimizeService { - s.force = &force - return s -} - -func (s *OptimizeService) Pretty(pretty bool) *OptimizeService { - s.pretty = pretty - return s -} - -func (s *OptimizeService) Do() (*OptimizeResult, error) { - return s.DoC(nil) -} - -func (s *OptimizeService) DoC(ctx context.Context) (*OptimizeResult, error) { - // Build url - path := "/" - - // Indices part - var indexPart []string - for _, index := range s.indices { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": index, - }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) - } - if len(indexPart) > 0 { - path += strings.Join(indexPart, ",") - } - - path += "/_optimize" - - // Parameters - params := make(url.Values) - if s.maxNumSegments != nil { - params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments)) - } - if s.onlyExpungeDeletes != nil { - params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) - } - if s.flush != nil { - params.Set("flush", fmt.Sprintf("%v", *s.flush)) - } - if s.waitForMerge != nil { - params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge)) - } - if s.force != nil { - params.Set("force", fmt.Sprintf("%v", *s.force)) - } - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - - // Get response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, nil) - if err != nil { - return nil, err - } - - // Return result - ret := new(OptimizeResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of an optimize request. - -type OptimizeResult struct { - Shards shardsInfo `json:"_shards,omitempty"` -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/optimize_test.go b/vendor/gopkg.in/olivere/elastic.v3/optimize_test.go deleted file mode 100644 index c47de3a94..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/optimize_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestOptimize(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add some documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Optimize documents - res, err := client.Optimize(testIndexName, testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected result; got nil") - } -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/percolate.go b/vendor/gopkg.in/olivere/elastic.v3/percolate.go deleted file mode 100644 index 2f8221252..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/percolate.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "net/url" - "strings" - - "golang.org/x/net/context" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// PercolateService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-percolate.html. -type PercolateService struct { - client *Client - pretty bool - index string - typ string - id string - version interface{} - versionType string - routing []string - preference string - ignoreUnavailable *bool - percolateIndex string - percolatePreference string - percolateRouting string - source string - allowNoIndices *bool - expandWildcards string - percolateFormat string - percolateType string - bodyJson interface{} - bodyString string -} - -// NewPercolateService creates a new PercolateService. -func NewPercolateService(client *Client) *PercolateService { - return &PercolateService{ - client: client, - routing: make([]string, 0), - } -} - -// Index is the name of the index of the document being percolated. -func (s *PercolateService) Index(index string) *PercolateService { - s.index = index - return s -} - -// Type is the type of the document being percolated. -func (s *PercolateService) Type(typ string) *PercolateService { - s.typ = typ - return s -} - -// Id is to substitute the document in the request body with a -// document that is known by the specified id. On top of the id, -// the index and type parameter will be used to retrieve -// the document from within the cluster. -func (s *PercolateService) Id(id string) *PercolateService { - s.id = id - return s -} - -// ExpandWildcards indicates whether to expand wildcard expressions -// to concrete indices that are open, closed or both. -func (s *PercolateService) ExpandWildcards(expandWildcards string) *PercolateService { - s.expandWildcards = expandWildcards - return s -} - -// PercolateFormat indicates whether to return an array of matching -// query IDs instead of objects. -func (s *PercolateService) PercolateFormat(percolateFormat string) *PercolateService { - s.percolateFormat = percolateFormat - return s -} - -// PercolateType is the type to percolate document into. Defaults to type. -func (s *PercolateService) PercolateType(percolateType string) *PercolateService { - s.percolateType = percolateType - return s -} - -// PercolateRouting is the routing value to use when percolating -// the existing document. -func (s *PercolateService) PercolateRouting(percolateRouting string) *PercolateService { - s.percolateRouting = percolateRouting - return s -} - -// Source is the URL-encoded request definition. -func (s *PercolateService) Source(source string) *PercolateService { - s.source = source - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *PercolateService) AllowNoIndices(allowNoIndices bool) *PercolateService { - s.allowNoIndices = &allowNoIndices - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should -// be ignored when unavailable (missing or closed). -func (s *PercolateService) IgnoreUnavailable(ignoreUnavailable bool) *PercolateService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// PercolateIndex is the index to percolate the document into. Defaults to index. -func (s *PercolateService) PercolateIndex(percolateIndex string) *PercolateService { - s.percolateIndex = percolateIndex - return s -} - -// PercolatePreference defines which shard to prefer when executing -// the percolate request. -func (s *PercolateService) PercolatePreference(percolatePreference string) *PercolateService { - s.percolatePreference = percolatePreference - return s -} - -// Version is an explicit version number for concurrency control. -func (s *PercolateService) Version(version interface{}) *PercolateService { - s.version = version - return s -} - -// VersionType is the specific version type. -func (s *PercolateService) VersionType(versionType string) *PercolateService { - s.versionType = versionType - return s -} - -// Routing is a list of specific routing values. -func (s *PercolateService) Routing(routing []string) *PercolateService { - s.routing = routing - return s -} - -// Preference specifies the node or shard the operation should be -// performed on (default: random). -func (s *PercolateService) Preference(preference string) *PercolateService { - s.preference = preference - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *PercolateService) Pretty(pretty bool) *PercolateService { - s.pretty = pretty - return s -} - -// Doc wraps the given document into the "doc" key of the body. -func (s *PercolateService) Doc(doc interface{}) *PercolateService { - return s.BodyJson(map[string]interface{}{"doc": doc}) -} - -// BodyJson is the percolator request definition using the percolate DSL. -func (s *PercolateService) BodyJson(body interface{}) *PercolateService { - s.bodyJson = body - return s -} - -// BodyString is the percolator request definition using the percolate DSL. -func (s *PercolateService) BodyString(body string) *PercolateService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *PercolateService) buildURL() (string, url.Values, error) { - // Build URL - var path string - var err error - if s.id == "" { - path, err = uritemplates.Expand("/{index}/{type}/_percolate", map[string]string{ - "index": s.index, - "type": s.typ, - }) - } else { - path, err = uritemplates.Expand("/{index}/{type}/{id}/_percolate", map[string]string{ - "index": s.index, - "type": s.typ, - "id": s.id, - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - if len(s.routing) > 0 { - params.Set("routing", strings.Join(s.routing, ",")) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.percolateIndex != "" { - params.Set("percolate_index", s.percolateIndex) - } - if s.percolatePreference != "" { - params.Set("percolate_preference", s.percolatePreference) - } - if s.percolateRouting != "" { - params.Set("percolate_routing", s.percolateRouting) - } - if s.source != "" { - params.Set("source", s.source) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.percolateFormat != "" { - params.Set("percolate_format", s.percolateFormat) - } - if s.percolateType != "" { - params.Set("percolate_type", s.percolateType) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *PercolateService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *PercolateService) Do() (*PercolateResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *PercolateService) DoC(ctx context.Context) (*PercolateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(PercolateResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// PercolateResponse is the response of PercolateService.Do. -type PercolateResponse struct { - TookInMillis int64 `json:"took"` // search time in milliseconds - Total int64 `json:"total"` // total matches - Matches []*PercolateMatch `json:"matches,omitempty"` - Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations -} - -// PercolateMatch returns a single match in a PercolateResponse. -type PercolateMatch struct { - Index string `json:"_index,omitempty"` - Id string `json:"_id"` - Score float64 `json:"_score,omitempty"` -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/percolate_test.go b/vendor/gopkg.in/olivere/elastic.v3/percolate_test.go index 07b36fef7..b847f02a8 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/percolate_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/percolate_test.go @@ -1,92 +1,58 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestPercolate(t *testing.T) { client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - // Add a document - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - // Register a query in the ".percolator" type. - search := NewSearchSource().Query(NewMatchQuery("message", "Golang")) - searchSrc, err := search.Source() - if err != nil { - t.Fatal(err) - } - _, err = client.Index(). - Index(testIndexName).Type(".percolator").Id("1"). - BodyJson(searchSrc). - Do() + _, err := client.Index(). + Index(testIndexName). + Type("queries"). + Id("1"). + BodyJson(`{"query":{"match":{"message":"bonsai tree"}}}`). + Refresh("wait_for"). + Do(context.TODO()) if err != nil { t.Fatal(err) } // Percolate should return our registered query - newTweet := tweet{User: "olivere", Message: "Golang is fun."} - res, err := client.Percolate(). - Index(testIndexName).Type("tweet"). - Doc(newTweet). // shortcut for: BodyJson(map[string]interface{}{"doc": newTweet}). - Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Errorf("expected results != nil; got nil") - } - if res.Total != 1 { - t.Fatalf("expected 1 result; got: %d", res.Total) - } - if res.Matches == nil { - t.Fatalf("expected Matches; got: %v", res.Matches) - } - matches := res.Matches - if matches == nil { - t.Fatalf("expected matches as map; got: %v", matches) - } - if len(matches) != 1 { - t.Fatalf("expected %d registered matches; got: %d", 1, len(matches)) - } - if matches[0].Id != "1" { - t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id) - } - - // Percolating an existsing document should return our registered query - res, err = client.Percolate(). - Index(testIndexName).Type("tweet"). - Id("1"). - Pretty(true). - Do() + pq := NewPercolatorQuery(). + Field("query"). + DocumentType("doctype"). + Document(doctype{Message: "A new bonsai tree in the office"}) + res, err := client.Search(testIndexName).Query(pq).Do(context.TODO()) if err != nil { t.Fatal(err) } if res == nil { - t.Errorf("expected results != nil; got nil") + t.Fatal("expected results != nil; got nil") } - if res.Total != 1 { - t.Fatalf("expected 1 result; got: %d", res.Total) + if res.Hits == nil { + t.Fatal("expected SearchResult.Hits != nil; got nil") } - if res.Matches == nil { - t.Fatalf("expected Matches; got: %v", res.Matches) + if got, want := res.Hits.TotalHits, int64(1); got != want { + t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got) } - matches = res.Matches - if matches == nil { - t.Fatalf("expected matches as map; got: %v", matches) + if got, want := len(res.Hits.Hits), 1; got != want { + t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got) } - if len(matches) != 1 { - t.Fatalf("expected %d registered matches; got: %d", 1, len(matches)) + hit := res.Hits.Hits[0] + if hit.Index != testIndexName { + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) } - if matches[0].Id != "1" { - t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id) + got := string(*hit.Source) + expected := `{"query":{"match":{"message":"bonsai tree"}}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) } } diff --git a/vendor/gopkg.in/olivere/elastic.v3/ping.go b/vendor/gopkg.in/olivere/elastic.v3/ping.go index e77617761..5ec7f0f9a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/ping.go +++ b/vendor/gopkg.in/olivere/elastic.v3/ping.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -74,13 +74,7 @@ func (s *PingService) Pretty(pretty bool) *PingService { // Do returns the PingResult, the HTTP status code of the Elasticsearch // server, and an error. -func (s *PingService) Do() (*PingResult, int, error) { - return s.DoC(nil) -} - -// DoC returns the PingResult, the HTTP status code of the Elasticsearch -// server, and an error. -func (s *PingService) DoC(ctx context.Context) (*PingResult, int, error) { +func (s *PingService) Do(ctx context.Context) (*PingResult, int, error) { s.client.mu.RLock() basicAuth := s.client.basicAuth basicAuthUsername := s.client.basicAuthUsername @@ -117,12 +111,7 @@ func (s *PingService) DoC(ctx context.Context) (*PingResult, int, error) { req.SetBasicAuth(basicAuthUsername, basicAuthPassword) } - var res *http.Response - if ctx == nil { - res, err = s.client.c.Do((*http.Request)(req)) - } else { - res, err = ctxhttp.Do(ctx, s.client.c, (*http.Request)(req)) - } + res, err := ctxhttp.Do(ctx, s.client.c, (*http.Request)(req)) if err != nil { return nil, 0, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/ping_test.go b/vendor/gopkg.in/olivere/elastic.v3/ping_test.go index 9891c2025..1462b3585 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/ping_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/ping_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,12 +7,14 @@ package elastic import ( "net/http" "testing" + + "golang.org/x/net/context" ) func TestPingGet(t *testing.T) { client := setupTestClientAndCreateIndex(t) - res, code, err := client.Ping(DefaultURL).Do() + res, code, err := client.Ping(DefaultURL).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -33,7 +35,7 @@ func TestPingGet(t *testing.T) { func TestPingHead(t *testing.T) { client := setupTestClientAndCreateIndex(t) - res, code, err := client.Ping(DefaultURL).HttpHeadOnly(true).Do() + res, code, err := client.Ping(DefaultURL).HttpHeadOnly(true).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -51,7 +53,7 @@ func TestPingHeadFailure(t *testing.T) { res, code, err := client. Ping("http://127.0.0.1:9299"). HttpHeadOnly(true). - Do() + Do(context.TODO()) if err == nil { t.Error("expected error, got nil") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/plugins.go b/vendor/gopkg.in/olivere/elastic.v3/plugins.go index 3906d74d7..a46ac3748 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/plugins.go +++ b/vendor/gopkg.in/olivere/elastic.v3/plugins.go @@ -1,9 +1,11 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic +import "golang.org/x/net/context" + // HasPlugin indicates whether the cluster has the named plugin. func (c *Client) HasPlugin(name string) (bool, error) { plugins, err := c.Plugins() @@ -20,7 +22,7 @@ func (c *Client) HasPlugin(name string) (bool, error) { // Plugins returns the list of all registered plugins. func (c *Client) Plugins() ([]string, error) { - stats, err := c.ClusterStats().Do() + stats, err := c.ClusterStats().Do(context.Background()) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/plugins_test.go b/vendor/gopkg.in/olivere/elastic.v3/plugins_test.go index 112b80943..969f0b0e5 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/plugins_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/plugins_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_template.go b/vendor/gopkg.in/olivere/elastic.v3/put_template.go similarity index 87% rename from vendor/gopkg.in/olivere/elastic.v3/search_template.go rename to vendor/gopkg.in/olivere/elastic.v3/put_template.go index ee8809005..13635a052 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_template.go +++ b/vendor/gopkg.in/olivere/elastic.v3/put_template.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -8,7 +8,9 @@ import ( "fmt" "net/url" - "gopkg.in/olivere/elastic.v3/uritemplates" + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" ) // PutTemplateService creates or updates a search template. @@ -109,7 +111,7 @@ func (s *PutTemplateService) Validate() error { } // Do executes the operation. -func (s *PutTemplateService) Do() (*PutTemplateResponse, error) { +func (s *PutTemplateService) Do(ctx context.Context) (*AcknowledgedResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -130,22 +132,15 @@ func (s *PutTemplateService) Do() (*PutTemplateResponse, error) { } // Get HTTP response - res, err := s.client.PerformRequest("PUT", path, params, body) + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) if err != nil { return nil, err } // Return operation response - ret := new(PutTemplateResponse) + ret := new(AcknowledgedResponse) if err := s.client.decoder.Decode(res.Body, ret); err != nil { return nil, err } return ret, nil } - -// PutTemplateResponse is the response of PutTemplateService.Do. -type PutTemplateResponse struct { - Id string `json:"_id"` - Version int `json:"_version"` - Created bool `json:"created"` -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/put_template_test.go b/vendor/gopkg.in/olivere/elastic.v3/put_template_test.go new file mode 100644 index 000000000..ea7971958 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/put_template_test.go @@ -0,0 +1,54 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestSearchTemplatesLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Template + tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}` + + // Create template + cresp, err := client.PutTemplate().Id("elastic-test").BodyString(tmpl).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if cresp == nil { + t.Fatalf("expected response != nil; got: %v", cresp) + } + if !cresp.Acknowledged { + t.Errorf("expected acknowledged = %v; got: %v", true, cresp.Acknowledged) + } + + // Get template + resp, err := client.GetTemplate().Id("elastic-test").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("expected response != nil; got: %v", resp) + } + if resp.Template == "" { + t.Errorf("expected template != %q; got: %q", "", resp.Template) + } + + // Delete template + dresp, err := client.DeleteTemplate().Id("elastic-test").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if dresp == nil { + t.Fatalf("expected response != nil; got: %v", dresp) + } + if !dresp.Acknowledged { + t.Fatalf("expected acknowledged = %v; got: %v", true, dresp.Acknowledged) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/query.go b/vendor/gopkg.in/olivere/elastic.v3/query.go index 0869eaecc..ad01354a0 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/query.go +++ b/vendor/gopkg.in/olivere/elastic.v3/query.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/recipes/sliced_scroll.go b/vendor/gopkg.in/olivere/elastic.v3/recipes/sliced_scroll.go new file mode 100644 index 000000000..f706dd424 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/recipes/sliced_scroll.go @@ -0,0 +1,161 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// SlicedScroll illustrates scrolling through a set of documents +// in parallel. It uses the sliced scrolling feature introduced +// in Elasticsearch 5.0 to create a number of Goroutines, each +// scrolling through a slice of the total results. A second goroutine +// receives the hits from the set of goroutines scrolling through +// the slices and simply counts the total number and the number of +// documents received per slice. +// +// The speedup of sliced scrolling can be significant but is very +// dependent on the specific use case. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/search-request-scroll.html#sliced-scroll +// for details on sliced scrolling in Elasticsearch. +// +// Example +// +// Scroll with 4 parallel slices through an index called "products". +// Use "_uid" as the default field: +// +// sliced_scroll -index=products -n=4 +// +package main + +import ( + "context" + "flag" + "fmt" + "io" + "log" + "sync" + "sync/atomic" + "time" + + "golang.org/x/sync/errgroup" + "gopkg.in/olivere/elastic.v5" +) + +func main() { + var ( + url = flag.String("url", "http://localhost:9200", "Elasticsearch URL") + index = flag.String("index", "", "Elasticsearch index name") + typ = flag.String("type", "", "Elasticsearch type name") + field = flag.String("field", "", "Slice field (must be numeric)") + numSlices = flag.Int("n", 2, "Number of slices to use in parallel") + sniff = flag.Bool("sniff", true, "Enable or disable sniffing") + ) + flag.Parse() + log.SetFlags(0) + + if *url == "" { + log.Fatal("missing url parameter") + } + if *index == "" { + log.Fatal("missing index parameter") + } + if *numSlices <= 0 { + log.Fatal("n must be greater than zero") + } + + // Create an Elasticsearch client + client, err := elastic.NewClient(elastic.SetURL(*url), elastic.SetSniff(*sniff)) + if err != nil { + log.Fatal(err) + } + + // Setup a group of goroutines from the excellent errgroup package + g, ctx := errgroup.WithContext(context.TODO()) + + // Hits channel will be sent to from the first set of goroutines and consumed by the second + type hit struct { + Slice int + Hit elastic.SearchHit + } + hitsc := make(chan hit) + + begin := time.Now() + + // Start a number of goroutines to parallelize scrolling + var wg sync.WaitGroup + for i := 0; i < *numSlices; i++ { + wg.Add(1) + + slice := i + + // Prepare the query + var query elastic.Query + if *typ == "" { + query = elastic.NewMatchAllQuery() + } else { + query = elastic.NewTypeQuery(*typ) + } + + // Prepare the slice + sliceQuery := elastic.NewSliceQuery().Id(i).Max(*numSlices) + if *field != "" { + sliceQuery = sliceQuery.Field(*field) + } + + // Start goroutine for this sliced scroll + g.Go(func() error { + defer wg.Done() + svc := client.Scroll(*index).Query(query).Slice(sliceQuery) + for { + res, err := svc.Do(ctx) + if err == io.EOF { + break + } + if err != nil { + return err + } + for _, searchHit := range res.Hits.Hits { + // Pass the hit to the hits channel, which will be consumed below + select { + case hitsc <- hit{Slice: slice, Hit: *searchHit}: + case <-ctx.Done(): + return ctx.Err() + } + } + } + return nil + }) + } + go func() { + // Wait until all scrolling is done + wg.Wait() + close(hitsc) + }() + + // Second goroutine will consume the hits sent from the workers in first set of goroutines + var total uint64 + totals := make([]uint64, *numSlices) + g.Go(func() error { + for hit := range hitsc { + // We simply count the hits here. + atomic.AddUint64(&totals[hit.Slice], 1) + current := atomic.AddUint64(&total, 1) + sec := int(time.Since(begin).Seconds()) + fmt.Printf("%8d | %02d:%02d\r", current, sec/60, sec%60) + select { + default: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + }) + + // Wait until all goroutines are finished + if err := g.Wait(); err != nil { + log.Fatal(err) + } + + fmt.Printf("Scrolled through a total of %d documents in %v\n", total, time.Since(begin)) + for i := 0; i < *numSlices; i++ { + fmt.Printf("Slice %2d received %d documents\n", i, totals[i]) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/reindex.go b/vendor/gopkg.in/olivere/elastic.v3/reindex.go index 9f6cf0c92..4fce544e7 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/reindex.go +++ b/vendor/gopkg.in/olivere/elastic.v3/reindex.go @@ -12,26 +12,21 @@ import ( ) // ReindexService is a method to copy documents from one index to another. -// It was introduced in Elasticsearch 2.3.0. -// -// Notice that Elastic already had a Reindexer service that pre-dated -// the Reindex API. Use that if you're on an earlier version of Elasticsearch. -// -// It is documented at https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html. +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-reindex.html. type ReindexService struct { - client *Client - pretty bool - consistency string - refresh *bool - timeout string - waitForCompletion *bool - bodyJson interface{} - bodyString string - source *ReindexSource - destination *ReindexDestination - conflicts string - size *int - script *Script + client *Client + pretty bool + refresh string + timeout string + waitForActiveShards string + waitForCompletion *bool + requestsPerSecond *int + body interface{} + source *ReindexSource + destination *ReindexDestination + conflicts string + size *int + script *Script } // NewReindexService creates a new ReindexService. @@ -41,16 +36,26 @@ func NewReindexService(client *Client) *ReindexService { } } -// Consistency specifies an explicit write consistency setting for the operation. -func (s *ReindexService) Consistency(consistency string) *ReindexService { - s.consistency = consistency +// WaitForActiveShards sets the number of shard copies that must be active before +// proceeding with the reindex operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than or +// equal to the total number of copies for the shard (number of replicas + 1). +func (s *ReindexService) WaitForActiveShards(waitForActiveShards string) *ReindexService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// RequestsPerSecond specifies the throttle to set on this request in sub-requests per second. +// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. +func (s *ReindexService) RequestsPerSecond(requestsPerSecond int) *ReindexService { + s.requestsPerSecond = &requestsPerSecond return s } // Refresh indicates whether Elasticsearch should refresh the effected indexes // immediately. -func (s *ReindexService) Refresh(refresh bool) *ReindexService { - s.refresh = &refresh +func (s *ReindexService) Refresh(refresh string) *ReindexService { + s.refresh = refresh return s } @@ -149,18 +154,10 @@ func (s *ReindexService) Script(script *Script) *ReindexService { return s } -// BodyJson specifies e.g. the query to restrict the results specified with the -// Query DSL (optional). The interface{} will be serialized to a JSON document, -// so use a map[string]interface{}. -func (s *ReindexService) BodyJson(body interface{}) *ReindexService { - s.bodyJson = body - return s -} - -// Body specifies e.g. a query to restrict the results specified with -// the Query DSL (optional). -func (s *ReindexService) BodyString(body string) *ReindexService { - s.bodyString = body +// Body specifies the body of the request to send to Elasticsearch. +// It overrides settings specified with other setters, e.g. Query. +func (s *ReindexService) Body(body interface{}) *ReindexService { + s.body = body return s } @@ -174,15 +171,18 @@ func (s *ReindexService) buildURL() (string, url.Values, error) { if s.pretty { params.Set("pretty", "1") } - if s.consistency != "" { - params.Set("consistency", s.consistency) - } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + if s.refresh != "" { + params.Set("refresh", s.refresh) } if s.timeout != "" { params.Set("timeout", s.timeout) } + if s.requestsPerSecond != nil { + params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } if s.waitForCompletion != nil { params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) } @@ -192,6 +192,9 @@ func (s *ReindexService) buildURL() (string, url.Values, error) { // Validate checks if the operation is valid. func (s *ReindexService) Validate() error { var invalid []string + if s.body != nil { + return nil + } if s.source == nil { invalid = append(invalid, "Source") } else { @@ -208,13 +211,10 @@ func (s *ReindexService) Validate() error { return nil } -// body returns the body part of the document request. -func (s *ReindexService) body() (interface{}, error) { - if s.bodyJson != nil { - return s.bodyJson, nil - } - if s.bodyString != "" { - return s.bodyString, nil +// getBody returns the body part of the document request. +func (s *ReindexService) getBody() (interface{}, error) { + if s.body != nil { + return s.body, nil } body := make(map[string]interface{}) @@ -249,12 +249,7 @@ func (s *ReindexService) body() (interface{}, error) { } // Do executes the operation. -func (s *ReindexService) Do() (*ReindexResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *ReindexService) DoC(ctx context.Context) (*ReindexResponse, error) { +func (s *ReindexService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -267,41 +262,25 @@ func (s *ReindexService) DoC(ctx context.Context) (*ReindexResponse, error) { } // Setup HTTP request body - body, err := s.body() + body, err := s.getBody() if err != nil { return nil, err } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, body) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return nil, err } // Return operation response - ret := new(ReindexResponse) + ret := new(BulkIndexByScrollResponse) if err := s.client.decoder.Decode(res.Body, ret); err != nil { return nil, err } return ret, nil } -// ReindexResponse is the response of ReindexService.Do. -type ReindexResponse struct { - Took interface{} `json:"took"` // 2.3.0 returns "37.7ms" while 2.2 returns 38 for took - TimedOut bool `json:"timed_out"` - Total int64 `json:"total"` - Updated int64 `json:"updated"` - Created int64 `json:"created"` - Deleted int64 `json:"deleted"` - Batches int64 `json:"batches"` - VersionConflicts int64 `json:"version_conflicts"` - Noops int64 `json:"noops"` - Retries int64 `json:"retries"` - Canceled string `json:"canceled"` - Failures []shardOperationFailure `json:"failures"` -} - // -- Source of Reindex -- // ReindexSource specifies the source of a Reindex process. @@ -321,12 +300,7 @@ type ReindexSource struct { // NewReindexSource creates a new ReindexSource. func NewReindexSource() *ReindexSource { - return &ReindexSource{ - indices: make([]string, 0), - types: make([]string, 0), - sorts: make([]SortInfo, 0), - sorters: make([]Sorter, 0), - } + return &ReindexSource{} } // SearchType is the search operation type. Possible values are diff --git a/vendor/gopkg.in/olivere/elastic.v3/reindex_test.go b/vendor/gopkg.in/olivere/elastic.v3/reindex_test.go index 18866271a..5e7b8fe40 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/reindex_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/reindex_test.go @@ -7,11 +7,49 @@ package elastic import ( "encoding/json" "testing" + + "golang.org/x/net/context" ) +func TestReindexSourceWithBodyMap(t *testing.T) { + client := setupTestClient(t) + out, err := client.Reindex().Body(map[string]interface{}{ + "source": map[string]interface{}{ + "index": "twitter", + }, + "dest": map[string]interface{}{ + "index": "new_twitter", + }, + }).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithBodyString(t *testing.T) { + client := setupTestClient(t) + got, err := client.Reindex().Body(`{"source":{"index":"twitter"},"dest":{"index":"new_twitter"}}`).getBody() + if err != nil { + t.Fatal(err) + } + want := `{"source":{"index":"twitter"},"dest":{"index":"new_twitter"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + func TestReindexSourceWithSourceIndexAndDestinationIndex(t *testing.T) { client := setupTestClient(t) - out, err := client.ReindexTask().SourceIndex("twitter").DestinationIndex("new_twitter").body() + out, err := client.Reindex().SourceIndex("twitter").DestinationIndex("new_twitter").getBody() if err != nil { t.Fatal(err) } @@ -30,7 +68,7 @@ func TestReindexSourceWithSourceAndDestinationAndVersionType(t *testing.T) { client := setupTestClient(t) src := NewReindexSource().Index("twitter") dst := NewReindexDestination().Index("new_twitter").VersionType("external") - out, err := client.ReindexTask().Source(src).Destination(dst).body() + out, err := client.Reindex().Source(src).Destination(dst).getBody() if err != nil { t.Fatal(err) } @@ -49,7 +87,7 @@ func TestReindexSourceWithSourceAndDestinationAndOpType(t *testing.T) { client := setupTestClient(t) src := NewReindexSource().Index("twitter") dst := NewReindexDestination().Index("new_twitter").OpType("create") - out, err := client.ReindexTask().Source(src).Destination(dst).body() + out, err := client.Reindex().Source(src).Destination(dst).getBody() if err != nil { t.Fatal(err) } @@ -68,7 +106,7 @@ func TestReindexSourceWithConflictsProceed(t *testing.T) { client := setupTestClient(t) src := NewReindexSource().Index("twitter") dst := NewReindexDestination().Index("new_twitter").OpType("create") - out, err := client.ReindexTask().Conflicts("proceed").Source(src).Destination(dst).body() + out, err := client.Reindex().Conflicts("proceed").Source(src).Destination(dst).getBody() if err != nil { t.Fatal(err) } @@ -87,7 +125,7 @@ func TestReindexSourceWithProceedOnVersionConflict(t *testing.T) { client := setupTestClient(t) src := NewReindexSource().Index("twitter") dst := NewReindexDestination().Index("new_twitter").OpType("create") - out, err := client.ReindexTask().ProceedOnVersionConflict().Source(src).Destination(dst).body() + out, err := client.Reindex().ProceedOnVersionConflict().Source(src).Destination(dst).getBody() if err != nil { t.Fatal(err) } @@ -106,7 +144,7 @@ func TestReindexSourceWithQuery(t *testing.T) { client := setupTestClient(t) src := NewReindexSource().Index("twitter").Type("tweet").Query(NewTermQuery("user", "olivere")) dst := NewReindexDestination().Index("new_twitter") - out, err := client.ReindexTask().Source(src).Destination(dst).body() + out, err := client.Reindex().Source(src).Destination(dst).getBody() if err != nil { t.Fatal(err) } @@ -125,7 +163,7 @@ func TestReindexSourceWithMultipleSourceIndicesAndTypes(t *testing.T) { client := setupTestClient(t) src := NewReindexSource().Index("twitter", "blog").Type("tweet", "post") dst := NewReindexDestination().Index("all_together") - out, err := client.ReindexTask().Source(src).Destination(dst).body() + out, err := client.Reindex().Source(src).Destination(dst).getBody() if err != nil { t.Fatal(err) } @@ -144,7 +182,7 @@ func TestReindexSourceWithSourceAndSize(t *testing.T) { client := setupTestClient(t) src := NewReindexSource().Index("twitter").Sort("date", false) dst := NewReindexDestination().Index("new_twitter") - out, err := client.ReindexTask().Size(10000).Source(src).Destination(dst).body() + out, err := client.Reindex().Size(10000).Source(src).Destination(dst).getBody() if err != nil { t.Fatal(err) } @@ -164,7 +202,7 @@ func TestReindexSourceWithScript(t *testing.T) { src := NewReindexSource().Index("twitter") dst := NewReindexDestination().Index("new_twitter").VersionType("external") scr := NewScriptInline("if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}") - out, err := client.ReindexTask().Source(src).Destination(dst).Script(scr).body() + out, err := client.Reindex().Source(src).Destination(dst).Script(scr).getBody() if err != nil { t.Fatal(err) } @@ -183,7 +221,7 @@ func TestReindexSourceWithRouting(t *testing.T) { client := setupTestClient(t) src := NewReindexSource().Index("source").Query(NewMatchQuery("company", "cat")) dst := NewReindexDestination().Index("dest").Routing("=cat") - out, err := client.ReindexTask().Source(src).Destination(dst).body() + out, err := client.Reindex().Source(src).Destination(dst).getBody() if err != nil { t.Fatal(err) } @@ -208,7 +246,7 @@ func TestReindex(t *testing.T) { t.Skipf("Elasticsearch %v does not support Reindex API yet", esversion) } - sourceCount, err := client.Count(testIndexName).Do() + sourceCount, err := client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -216,7 +254,7 @@ func TestReindex(t *testing.T) { t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) } - targetCount, err := client.Count(testIndexName2).Do() + targetCount, err := client.Count(testIndexName2).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -227,7 +265,7 @@ func TestReindex(t *testing.T) { // Simple copying src := NewReindexSource().Index(testIndexName) dst := NewReindexDestination().Index(testIndexName2) - res, err := client.ReindexTask().Source(src).Destination(dst).Refresh(true).Do() + res, err := client.Reindex().Source(src).Destination(dst).Refresh("true").Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -244,7 +282,7 @@ func TestReindex(t *testing.T) { t.Errorf("expected %d, got %d", sourceCount, res.Created) } - targetCount, err = client.Count(testIndexName2).Do() + targetCount, err = client.Count(testIndexName2).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/reindexer.go b/vendor/gopkg.in/olivere/elastic.v3/reindexer.go deleted file mode 100644 index 7193a1337..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/reindexer.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "errors" -) - -// Reindexer simplifies the process of reindexing an index. You typically -// reindex a source index to a target index. However, you can also specify -// a query that filters out documents from the source index before bulk -// indexing them into the target index. The caller may also specify a -// different client for the target, e.g. when copying indices from one -// Elasticsearch cluster to another. -// -// Internally, the Reindex users a scan and scroll operation on the source -// index and bulk indexing to push data into the target index. -// -// By default the reindexer fetches the _source, _parent, and _routing -// attributes from the source index, using the provided CopyToTargetIndex -// will copy those attributes into the destinationIndex. -// This behaviour can be overridden by setting the ScanFields and providing a -// custom ReindexerFunc. -// -// The caller is responsible for setting up and/or clearing the target index -// before starting the reindex process. -// -// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html -// for more information about reindexing. -type Reindexer struct { - sourceClient, targetClient *Client - sourceIndex string - query Query - scanFields []string - bulkSize int - size int - scroll string - reindexerFunc ReindexerFunc - progress ReindexerProgressFunc - statsOnly bool -} - -// A ReindexerFunc receives each hit from the sourceIndex. -// It can choose to add any number of BulkableRequests to the bulkService. -type ReindexerFunc func(hit *SearchHit, bulkService *BulkService) error - -// CopyToTargetIndex returns a ReindexerFunc that copies the SearchHit's -// _source, _parent, and _routing attributes into the targetIndex -func CopyToTargetIndex(targetIndex string) ReindexerFunc { - return func(hit *SearchHit, bulkService *BulkService) error { - // TODO(oe) Do we need to deserialize here? - source := make(map[string]interface{}) - if err := json.Unmarshal(*hit.Source, &source); err != nil { - return err - } - req := NewBulkIndexRequest().Index(targetIndex).Type(hit.Type).Id(hit.Id).Doc(source) - if hit.Parent != "" { - req = req.Parent(hit.Parent) - } - if hit.Routing != "" { - req = req.Routing(hit.Routing) - } - bulkService.Add(req) - return nil - } -} - -// ReindexerProgressFunc is a callback that can be used with Reindexer -// to report progress while reindexing data. -type ReindexerProgressFunc func(current, total int64) - -// ReindexerResponse is returned from the Do func in a Reindexer. -// By default, it returns the number of succeeded and failed bulk operations. -// To return details about all failed items, set StatsOnly to false in -// Reindexer. -type ReindexerResponse struct { - Success int64 - Failed int64 - Errors []*BulkResponseItem -} - -// NewReindexer returns a new Reindexer. -func NewReindexer(client *Client, source string, reindexerFunc ReindexerFunc) *Reindexer { - return &Reindexer{ - sourceClient: client, - sourceIndex: source, - reindexerFunc: reindexerFunc, - statsOnly: true, - } -} - -// TargetClient specifies a different client for the target. This is -// necessary when the target index is in a different Elasticsearch cluster. -// By default, the source and target clients are the same. -func (ix *Reindexer) TargetClient(c *Client) *Reindexer { - ix.targetClient = c - return ix -} - -// Query specifies the query to apply to the source. It filters out those -// documents to be indexed into target. A nil query does not filter out any -// documents. -func (ix *Reindexer) Query(q Query) *Reindexer { - ix.query = q - return ix -} - -// ScanFields specifies the fields the scan query should load. -// The default fields are _source, _parent, _routing. -func (ix *Reindexer) ScanFields(scanFields ...string) *Reindexer { - ix.scanFields = scanFields - return ix -} - -// BulkSize returns the number of documents to send to Elasticsearch per chunk. -// The default is 500. -func (ix *Reindexer) BulkSize(bulkSize int) *Reindexer { - ix.bulkSize = bulkSize - return ix -} - -// Size is the number of results to return per shard, not per request. -// So a size of 10 which hits 5 shards will return a maximum of 50 results -// per scan request. -func (ix *Reindexer) Size(size int) *Reindexer { - ix.size = size - return ix -} - -// Scroll specifies for how long the scroll operation on the source index -// should be maintained. The default is 5m. -func (ix *Reindexer) Scroll(timeout string) *Reindexer { - ix.scroll = timeout - return ix -} - -// Progress indicates a callback that will be called while indexing. -func (ix *Reindexer) Progress(f ReindexerProgressFunc) *Reindexer { - ix.progress = f - return ix -} - -// StatsOnly indicates whether the Do method should return details e.g. about -// the documents that failed while indexing. It is true by default, i.e. only -// the number of documents that succeeded/failed are returned. Set to false -// if you want all the details. -func (ix *Reindexer) StatsOnly(statsOnly bool) *Reindexer { - ix.statsOnly = statsOnly - return ix -} - -// Do starts the reindexing process. -func (ix *Reindexer) Do() (*ReindexerResponse, error) { - if ix.sourceClient == nil { - return nil, errors.New("no source client") - } - if ix.sourceIndex == "" { - return nil, errors.New("no source index") - } - if ix.targetClient == nil { - ix.targetClient = ix.sourceClient - } - if ix.scanFields == nil { - ix.scanFields = []string{"_source", "_parent", "_routing"} - } - if ix.bulkSize <= 0 { - ix.bulkSize = 500 - } - if ix.scroll == "" { - ix.scroll = "5m" - } - - // Count total to report progress (if necessary) - var err error - var current, total int64 - if ix.progress != nil { - total, err = ix.count() - if err != nil { - return nil, err - } - } - - // Prepare scan and scroll to iterate through the source index - scanner := ix.sourceClient.Scan(ix.sourceIndex).Scroll(ix.scroll).Fields(ix.scanFields...) - if ix.query != nil { - scanner = scanner.Query(ix.query) - } - if ix.size > 0 { - scanner = scanner.Size(ix.size) - } - cursor, err := scanner.Do() - - bulk := ix.targetClient.Bulk() - - ret := &ReindexerResponse{ - Errors: make([]*BulkResponseItem, 0), - } - - // Main loop iterates through the source index and bulk indexes into target. - for { - docs, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - return ret, err - } - - if docs.TotalHits() > 0 { - for _, hit := range docs.Hits.Hits { - if ix.progress != nil { - current++ - ix.progress(current, total) - } - - err := ix.reindexerFunc(hit, bulk) - if err != nil { - return ret, err - } - - if bulk.NumberOfActions() >= ix.bulkSize { - bulk, err = ix.commit(bulk, ret) - if err != nil { - return ret, err - } - } - } - } - } - - // Final flush - if bulk.NumberOfActions() > 0 { - bulk, err = ix.commit(bulk, ret) - if err != nil { - return ret, err - } - bulk = nil - } - - return ret, nil -} - -// count returns the number of documents in the source index. -// The query is taken into account, if specified. -func (ix *Reindexer) count() (int64, error) { - service := ix.sourceClient.Count(ix.sourceIndex) - if ix.query != nil { - service = service.Query(ix.query) - } - return service.Do() -} - -// commit commits a bulk, updates the stats, and returns a fresh bulk service. -func (ix *Reindexer) commit(bulk *BulkService, ret *ReindexerResponse) (*BulkService, error) { - bres, err := bulk.Do() - if err != nil { - return nil, err - } - ret.Success += int64(len(bres.Succeeded())) - failed := bres.Failed() - ret.Failed += int64(len(failed)) - if !ix.statsOnly { - ret.Errors = append(ret.Errors, failed...) - } - bulk = ix.targetClient.Bulk() - return bulk, nil -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/reindexer_test.go b/vendor/gopkg.in/olivere/elastic.v3/reindexer_test.go deleted file mode 100644 index 6ee781e07..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/reindexer_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestReindexer(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - sourceCount, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if sourceCount <= 0 { - t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) - } - - targetCount, err := client.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != 0 { - t.Fatalf("expected %d documents; got: %d", 0, targetCount) - } - - r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) - ret, err := r.Do() - if err != nil { - t.Fatal(err) - } - if ret == nil { - t.Fatalf("expected result != %v; got: %v", nil, ret) - } - if ret.Success != sourceCount { - t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) - } - if ret.Failed != 0 { - t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) - } - if len(ret.Errors) != 0 { - t.Errorf("expected to return no errors by default; got: %v", ret.Errors) - } - - if _, err := client.Flush().Index(testIndexName2).Do(); err != nil { - t.Fatal(err) - } - - targetCount, err = client.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != sourceCount { - t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) - } -} - -func TestReindexerWithQuery(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - q := NewTermQuery("user", "olivere") - - sourceCount, err := client.Count(testIndexName).Query(q).Do() - if err != nil { - t.Fatal(err) - } - if sourceCount <= 0 { - t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) - } - - targetCount, err := client.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != 0 { - t.Fatalf("expected %d documents; got: %d", 0, targetCount) - } - - r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) - r = r.Query(q) - ret, err := r.Do() - if err != nil { - t.Fatal(err) - } - if ret == nil { - t.Fatalf("expected result != %v; got: %v", nil, ret) - } - if ret.Success != sourceCount { - t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) - } - if ret.Failed != 0 { - t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) - } - if len(ret.Errors) != 0 { - t.Errorf("expected to return no errors by default; got: %v", ret.Errors) - } - - if _, err := client.Flush().Index(testIndexName2).Do(); err != nil { - t.Fatal(err) - } - - targetCount, err = client.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != sourceCount { - t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) - } -} - -func TestReindexerProgress(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - sourceCount, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if sourceCount <= 0 { - t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) - } - - var calls int64 - totalsOk := true - progress := func(current, total int64) { - calls++ - totalsOk = totalsOk && total == sourceCount - } - - r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) - r = r.Progress(progress) - ret, err := r.Do() - if err != nil { - t.Fatal(err) - } - if ret == nil { - t.Fatalf("expected result != %v; got: %v", nil, ret) - } - if ret.Success != sourceCount { - t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) - } - if ret.Failed != 0 { - t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) - } - if len(ret.Errors) != 0 { - t.Errorf("expected to return no errors by default; got: %v", ret.Errors) - } - - if calls != sourceCount { - t.Errorf("expected progress to be called %d times; got: %d", sourceCount, calls) - } - if !totalsOk { - t.Errorf("expected totals in progress to be %d", sourceCount) - } -} - -func TestReindexerWithTargetClient(t *testing.T) { - sourceClient := setupTestClientAndCreateIndexAndAddDocs(t) - targetClient, err := NewClient() - if err != nil { - t.Fatal(err) - } - - sourceCount, err := sourceClient.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if sourceCount <= 0 { - t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) - } - - targetCount, err := targetClient.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != 0 { - t.Fatalf("expected %d documents; got: %d", 0, targetCount) - } - - r := NewReindexer(sourceClient, testIndexName, CopyToTargetIndex(testIndexName2)) - r = r.TargetClient(targetClient) - ret, err := r.Do() - if err != nil { - t.Fatal(err) - } - if ret == nil { - t.Fatalf("expected result != %v; got: %v", nil, ret) - } - if ret.Success != sourceCount { - t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) - } - if ret.Failed != 0 { - t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) - } - if len(ret.Errors) != 0 { - t.Errorf("expected to return no errors by default; got: %v", ret.Errors) - } - - if _, err := targetClient.Flush().Index(testIndexName2).Do(); err != nil { - t.Fatal(err) - } - - targetCount, err = targetClient.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != sourceCount { - t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) - } -} - -// TestReindexerPreservingTTL shows how a caller can take control of the -// copying process by providing ScanFields and a custom ReindexerFunc. -func TestReindexerPreservingTTL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").TTL("999999s").Version(10).VersionType("external").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - sourceCount, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if sourceCount <= 0 { - t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) - } - - targetCount, err := client.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != 0 { - t.Fatalf("expected %d documents; got: %d", 0, targetCount) - } - - // Carries over the source item's ttl to the reindexed item - copyWithTTL := func(hit *SearchHit, bulkService *BulkService) error { - source := make(map[string]interface{}) - if err := json.Unmarshal(*hit.Source, &source); err != nil { - return err - } - req := NewBulkIndexRequest().Index(testIndexName2).Type(hit.Type).Id(hit.Id).Doc(source) - if hit.TTL > 0 { - req = req.Ttl(hit.TTL) - } - bulkService.Add(req) - return nil - } - - r := NewReindexer(client, testIndexName, copyWithTTL).ScanFields("_source", "_ttl") - - ret, err := r.Do() - if err != nil { - t.Fatal(err) - } - if ret == nil { - t.Fatalf("expected result != %v; got: %v", nil, ret) - } - if ret.Success != sourceCount { - t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) - } - if ret.Failed != 0 { - t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) - } - if len(ret.Errors) != 0 { - t.Errorf("expected to return no errors by default; got: %v", ret.Errors) - } - - getResult, err := client.Get().Index(testIndexName2).Id("1").Fields("_source", "_ttl").Do() - if err != nil { - t.Fatal(err) - } - - if getResult.TTL <= 0 { - t.Errorf("expected TTL field in reindexed document") - } -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/request.go b/vendor/gopkg.in/olivere/elastic.v3/request.go index 6e9b108fb..e0e71b5e3 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/request.go +++ b/vendor/gopkg.in/olivere/elastic.v3/request.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -40,13 +40,15 @@ func (r *Request) SetBody(body interface{}, gzipCompress bool) error { case string: if gzipCompress { return r.setBodyGzip(b) + } else { + return r.setBodyString(b) } - return r.setBodyString(b) default: if gzipCompress { return r.setBodyGzip(body) + } else { + return r.setBodyJson(body) } - return r.setBodyJson(body) } } diff --git a/vendor/gopkg.in/olivere/elastic.v3/rescore.go b/vendor/gopkg.in/olivere/elastic.v3/rescore.go index 0cbc06710..9b7eaee1d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/rescore.go +++ b/vendor/gopkg.in/olivere/elastic.v3/rescore.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/rescorer.go b/vendor/gopkg.in/olivere/elastic.v3/rescorer.go index 28ad59cbb..ccd4bb854 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/rescorer.go +++ b/vendor/gopkg.in/olivere/elastic.v3/rescorer.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/response.go b/vendor/gopkg.in/olivere/elastic.v3/response.go index 9426c23af..e7380d98a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/response.go +++ b/vendor/gopkg.in/olivere/elastic.v3/response.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/run-es.sh b/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0-beta1.sh similarity index 64% rename from vendor/gopkg.in/olivere/elastic.v3/run-es.sh rename to vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0-beta1.sh index affc3cf67..08c67ea17 100755 --- a/vendor/gopkg.in/olivere/elastic.v3/run-es.sh +++ b/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0-beta1.sh @@ -1,2 +1 @@ -mkdir -p config/scripts -docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:2.4.1 elasticsearch +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.0-beta1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0-rc1.sh b/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0-rc1.sh new file mode 100755 index 000000000..d4586acca --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0-rc1.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.0-rc1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0.sh b/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0.sh new file mode 100755 index 000000000..e7a98c8fc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.0.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.0 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.1.sh b/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.1.sh new file mode 100755 index 000000000..528670211 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/run-es-5.0.1.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v3/scan.go b/vendor/gopkg.in/olivere/elastic.v3/scan.go deleted file mode 100644 index 5fad5f94d..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/scan.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "errors" - "fmt" - "io" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -const ( - defaultKeepAlive = "5m" -) - -var ( - // End of stream (or scan) - EOS = io.EOF - - // No ScrollId - ErrNoScrollId = errors.New("no scrollId") -) - -// ScanService manages a cursor through documents in Elasticsearch. -type ScanService struct { - client *Client - indices []string - types []string - keepAlive string - body interface{} - searchSource *SearchSource - pretty bool - routing string - preference string - size *int -} - -// NewScanService creates a new service to iterate through the results -// of a query. -func NewScanService(client *Client) *ScanService { - builder := &ScanService{ - client: client, - searchSource: NewSearchSource().Query(NewMatchAllQuery()), - } - return builder -} - -// Index sets the name(s) of the index to use for scan. -func (s *ScanService) Index(indices ...string) *ScanService { - if s.indices == nil { - s.indices = make([]string, 0) - } - s.indices = append(s.indices, indices...) - return s -} - -// Types allows to restrict the scan to a list of types. -func (s *ScanService) Type(types ...string) *ScanService { - if s.types == nil { - s.types = make([]string, 0) - } - s.types = append(s.types, types...) - return s -} - -// Scroll is an alias for KeepAlive, the time to keep -// the cursor alive (e.g. "5m" for 5 minutes). -func (s *ScanService) Scroll(keepAlive string) *ScanService { - s.keepAlive = keepAlive - return s -} - -// KeepAlive sets the maximum time the cursor will be -// available before expiration (e.g. "5m" for 5 minutes). -func (s *ScanService) KeepAlive(keepAlive string) *ScanService { - s.keepAlive = keepAlive - return s -} - -// Fields tells Elasticsearch to only load specific fields from a search hit. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html. -func (s *ScanService) Fields(fields ...string) *ScanService { - s.searchSource = s.searchSource.Fields(fields...) - return s -} - -// Body sets the raw body to send to Elasticsearch. This can be e.g. a string, -// a map[string]interface{} or anything that can be serialized into JSON. -// Notice that setting the body disables the use of SearchSource and many -// other properties of the ScanService. -func (s *ScanService) Body(body interface{}) *ScanService { - s.body = body - return s -} - -// SearchSource sets the search source builder to use with this service. -func (s *ScanService) SearchSource(searchSource *SearchSource) *ScanService { - s.searchSource = searchSource - if s.searchSource == nil { - s.searchSource = NewSearchSource().Query(NewMatchAllQuery()) - } - return s -} - -// Routing allows for (a comma-separated) list of specific routing values. -func (s *ScanService) Routing(routings ...string) *ScanService { - s.routing = strings.Join(routings, ",") - return s -} - -// Preference specifies the node or shard the operation should be -// performed on (default: "random"). -func (s *ScanService) Preference(preference string) *ScanService { - s.preference = preference - return s -} - -// Query sets the query to perform, e.g. MatchAllQuery. -func (s *ScanService) Query(query Query) *ScanService { - s.searchSource = s.searchSource.Query(query) - return s -} - -// PostFilter is executed as the last filter. It only affects the -// search hits but not facets. See -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html -// for details. -func (s *ScanService) PostFilter(postFilter Query) *ScanService { - s.searchSource = s.searchSource.PostFilter(postFilter) - return s -} - -// FetchSource indicates whether the response should contain the stored -// _source for every hit. -func (s *ScanService) FetchSource(fetchSource bool) *ScanService { - s.searchSource = s.searchSource.FetchSource(fetchSource) - return s -} - -// FetchSourceContext indicates how the _source should be fetched. -func (s *ScanService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScanService { - s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) - return s -} - -// Version can be set to true to return a version for each search hit. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html. -func (s *ScanService) Version(version bool) *ScanService { - s.searchSource = s.searchSource.Version(version) - return s -} - -// Sort the results by the given field, in the given order. -// Use the alternative SortWithInfo to use a struct to define the sorting. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html -// for detailed documentation of sorting. -func (s *ScanService) Sort(field string, ascending bool) *ScanService { - s.searchSource = s.searchSource.Sort(field, ascending) - return s -} - -// SortWithInfo defines how to sort results. -// Use the Sort func for a shortcut. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html -// for detailed documentation of sorting. -func (s *ScanService) SortWithInfo(info SortInfo) *ScanService { - s.searchSource = s.searchSource.SortWithInfo(info) - return s -} - -// SortBy defines how to sort results. -// Use the Sort func for a shortcut. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html -// for detailed documentation of sorting. -func (s *ScanService) SortBy(sorter ...Sorter) *ScanService { - s.searchSource = s.searchSource.SortBy(sorter...) - return s -} - -// Pretty enables the caller to indent the JSON output. -func (s *ScanService) Pretty(pretty bool) *ScanService { - s.pretty = pretty - return s -} - -// Size is the number of results to return per shard, not per request. -// So a size of 10 which hits 5 shards will return a maximum of 50 results -// per scan request. -func (s *ScanService) Size(size int) *ScanService { - s.size = &size - return s -} - -// Do executes the query and returns a "server-side cursor". -func (s *ScanService) Do() (*ScanCursor, error) { - // Build url - path := "/" - - // Indices part - indexPart := make([]string, 0) - for _, index := range s.indices { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": index, - }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) - } - if len(indexPart) > 0 { - path += strings.Join(indexPart, ",") - } - - // Types - typesPart := make([]string, 0) - for _, typ := range s.types { - typ, err := uritemplates.Expand("{type}", map[string]string{ - "type": typ, - }) - if err != nil { - return nil, err - } - typesPart = append(typesPart, typ) - } - if len(typesPart) > 0 { - path += "/" + strings.Join(typesPart, ",") - } - - // Search - path += "/_search" - - // Parameters - params := make(url.Values) - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - if s.keepAlive != "" { - params.Set("scroll", s.keepAlive) - } else { - params.Set("scroll", defaultKeepAlive) - } - if s.size != nil && *s.size > 0 { - params.Set("size", fmt.Sprintf("%d", *s.size)) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - - // Get response - var err error - var body interface{} - if s.body != nil { - body = s.body - } else { - if !s.searchSource.hasSort() { - // TODO: ES 2.1 deprecates search_type=scan. See https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated. - params.Set("search_type", "scan") - } - body, err = s.searchSource.Source() - if err != nil { - return nil, err - } - } - res, err := s.client.PerformRequest("POST", path, params, body) - if err != nil { - return nil, err - } - - // Return result - searchResult := new(SearchResult) - if err := s.client.decoder.Decode(res.Body, searchResult); err != nil { - return nil, err - } - - cursor := NewScanCursor(s.client, s.keepAlive, s.pretty, searchResult) - - return cursor, nil -} - -// scanCursor represents a single page of results from -// an Elasticsearch Scan operation. -type ScanCursor struct { - Results *SearchResult - - client *Client - keepAlive string - pretty bool - currentPage int -} - -// newScanCursor returns a new initialized instance -// of scanCursor. -func NewScanCursor(client *Client, keepAlive string, pretty bool, searchResult *SearchResult) *ScanCursor { - return &ScanCursor{ - client: client, - keepAlive: keepAlive, - pretty: pretty, - Results: searchResult, - } -} - -// TotalHits is a convenience method that returns the number -// of hits the cursor will iterate through. -func (c *ScanCursor) TotalHits() int64 { - if c.Results.Hits == nil { - return 0 - } - return c.Results.Hits.TotalHits -} - -// Next returns the next search result or nil when all -// documents have been scanned. -// -// Usage: -// -// for { -// res, err := cursor.Next() -// if err == elastic.EOS { -// // End of stream (or scan) -// break -// } -// if err != nil { -// // Handle error -// } -// // Work with res -// } -// -func (c *ScanCursor) Next() (*SearchResult, error) { - if c.currentPage > 0 { - if c.Results.Hits == nil || len(c.Results.Hits.Hits) == 0 || c.Results.Hits.TotalHits == 0 { - return nil, EOS - } - } - if c.Results.ScrollId == "" { - return nil, EOS - } - - // Build url - path := "/_search/scroll" - - // Parameters - params := make(url.Values) - if c.pretty { - params.Set("pretty", fmt.Sprintf("%v", c.pretty)) - } - if c.keepAlive != "" { - params.Set("scroll", c.keepAlive) - } else { - params.Set("scroll", defaultKeepAlive) - } - - // Set body - body := c.Results.ScrollId - - // Get response - res, err := c.client.PerformRequest("POST", path, params, body) - if err != nil { - return nil, err - } - - // Return result - c.Results = &SearchResult{ScrollId: body} - if err := c.client.decoder.Decode(res.Body, c.Results); err != nil { - return nil, err - } - - c.currentPage += 1 - - return c.Results, nil -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/scan_test.go b/vendor/gopkg.in/olivere/elastic.v3/scan_test.go deleted file mode 100644 index 2885cff0b..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/scan_test.go +++ /dev/null @@ -1,658 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "io" - _ "net/http" - "testing" -) - -func TestScan(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - cursor, err := client.Scan(testIndexName).Size(1).Do() - if err != nil { - t.Fatal(err) - } - - if cursor.Results == nil { - t.Fatalf("expected results != nil; got nil") - } - if cursor.Results.Hits == nil { - t.Fatalf("expected results.Hits != nil; got nil") - } - if want, have := int64(3), cursor.Results.Hits.TotalHits; want != have { - t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have) - } - if want, have := 0, len(cursor.Results.Hits.Hits); want != have { - t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", want, have) - } - - pages := 0 - docs := 0 - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages++ - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - docs++ - } - } - - if pages != 4 { - t.Fatalf("expected to retrieve %d pages; got %d", 4, pages) - } - if docs != 3 { - t.Errorf("expected to retrieve %d hits; got %d", 3, docs) - } -} - -func TestScanWithSort(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // We sort on a numerical field, because sorting on the 'message' string field would - // raise the whole question of tokenizing and analyzing. - cursor, err := client.Scan(testIndexName).Sort("retweets", true).Size(1).Do() - if err != nil { - t.Fatal(err) - } - - if cursor.Results == nil { - t.Fatal("expected results != nil; got nil") - } - if cursor.Results.Hits == nil { - t.Fatal("expected results.Hits != nil; got nil") - } - if want, have := int64(3), cursor.Results.Hits.TotalHits; want != have { - t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have) - } - if want, have := 1, len(cursor.Results.Hits.Hits); want != have { - t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", want, have) - } - if want, have := "3", cursor.Results.Hits.Hits[0].Id; want != have { - t.Fatalf("expected hitID = %v; got %v", want, have) - } - - docs := 1 // The cursor already gave us a result - pages := 0 - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages += 1 - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - docs += 1 - } - } - - if pages != 3 { - t.Fatalf("expected to retrieve %d pages; got %d", 3, pages) - } - if docs != 3 { - t.Fatalf("expected to retrieve %d hits; got %d", 3, docs) - } -} - -func TestScanWithSortByDoc(t *testing.T) { - // Sorting by doc is introduced in Elasticsearch 2.1, - // and replaces the deprecated search_type=scan. - // See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated - client := setupTestClientAndCreateIndex(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - if esversion < "2.1" { - t.Skipf(`Elasticsearch %s does not have {"sort":["_doc"]}`, esversion) - return - } - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - comment1 := comment{User: "nico", Comment: "You bet."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("1").BodyJson(&comment1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - cursor, err := client.Scan(testIndexName).Sort("_doc", true).Size(1).Do() - if err != nil { - t.Fatal(err) - } - - docs := 0 - pages := 0 - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages += 1 - - for range searchResult.Hits.Hits { - docs += 1 - } - } - - if pages != 3 { - t.Fatalf("expected to retrieve %d pages; got %d", 3, pages) - } - if docs != 2 { - t.Fatalf("expected to retrieve %d hits; got %d", 2, docs) - } -} - -func TestScanWithSearchSource(t *testing.T) { - //client := setupTestClientAndCreateIndexAndLog(t) - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - src := NewSearchSource(). - Query(NewTermQuery("user", "olivere")). - FetchSourceContext(NewFetchSourceContext(true).Include("retweets")) - cursor, err := client.Scan(testIndexName).SearchSource(src).Size(1).Do() - if err != nil { - t.Fatal(err) - } - - if cursor.Results == nil { - t.Fatalf("expected results != nil; got nil") - } - if cursor.Results.Hits == nil { - t.Fatalf("expected results.Hits != nil; got nil") - } - if want, have := int64(2), cursor.Results.Hits.TotalHits; want != have { - t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have) - } - - docs := 0 - pages := 0 - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages += 1 - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - if _, found := item["message"]; found { - t.Fatalf("expected to not see field %q; got: %#v", "message", item) - } - docs += 1 - } - } - - if pages != 3 { - t.Fatalf("expected to retrieve %d pages; got %d", 3, pages) - } - if docs != 2 { - t.Fatalf("expected to retrieve %d hits; got %d", 2, docs) - } -} - -func TestScanWithBody(t *testing.T) { - // client := setupTestClientAndCreateIndexAndLog(t) - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Test with simple strings and a map - var tests = []struct { - Body interface{} - ExpectedTotalHits int64 - ExpectedDocs int - ExpectedPages int - }{ - { - Body: `{"query":{"match_all":{}}}`, - ExpectedTotalHits: 3, - ExpectedDocs: 3, - ExpectedPages: 3, - }, - /* - { - Body: `{"query":{"term":{"user":"olivere"}},"sort":["_doc"]}`, - ExpectedTotalHits: 2, - ExpectedDocs: 2, - ExpectedPages: 2, - }, - { - Body: `{"query":{"term":{"user":"olivere"}},"sort":[{"retweets":"desc"}]}`, - ExpectedTotalHits: 2, - ExpectedDocs: 2, - ExpectedPages: 2, - }, - { - Body: map[string]interface{}{ - "query": map[string]interface{}{ - "term": map[string]interface{}{ - "user": "olivere", - }, - }, - "sort": []interface{}{"_doc"}, - }, - ExpectedTotalHits: 2, - ExpectedDocs: 2, - ExpectedPages: 2, - }, - */ - } - - for i, tt := range tests { - cursor, err := client.Scan(testIndexName).Body(tt.Body).Size(1).Do() - if err != nil { - t.Fatalf("#%d: %v", i, err) - } - if cursor.Results == nil { - t.Fatalf("#%d: expected search results, got nil", i) - } - if want, have := tt.ExpectedTotalHits, cursor.Results.Hits.TotalHits; want != have { - t.Fatalf("#%d: expected results.Hits.TotalHits = %d; got %d", i, want, have) - } - docs := len(cursor.Results.Hits.Hits) - for { - _, err = cursor.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatalf("#%d: %v", i, err) - } - if cursor.Results == nil { - t.Fatalf("#%d: expected search results, got nil", i) - } - docs += len(cursor.Results.Hits.Hits) - } - if want, have := tt.ExpectedDocs, docs; want != have { - t.Fatalf("#%d: expected to retrieve %d documents; got %d", i, want, have) - } - } -} - -func TestScanWithQuery(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Return tweets from olivere only - termQuery := NewTermQuery("user", "olivere") - cursor, err := client.Scan(testIndexName). - Size(1). - Query(termQuery). - Do() - if err != nil { - t.Fatal(err) - } - - if cursor.Results == nil { - t.Fatal("expected results != nil; got nil") - } - if cursor.Results.Hits == nil { - t.Fatal("expected results.Hits != nil; got nil") - } - if want, have := int64(2), cursor.Results.Hits.TotalHits; want != have { - t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have) - } - if want, have := 0, len(cursor.Results.Hits.Hits); want != have { - t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", want, have) - } - - pages := 0 - docs := 0 - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages += 1 - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - docs += 1 - } - } - - if pages != 3 { - t.Fatalf("expected to retrieve at %d pages; got %d", 3, pages) - } - if docs != 2 { - t.Fatalf("expected to retrieve %d hits; got %d", 2, docs) - } -} - -func TestScanAndScrollWithMissingIndex(t *testing.T) { - client := setupTestClient(t) // does not create testIndexName - - cursor, err := client.Scan(testIndexName).Scroll("30s").Do() - if err == nil { - t.Fatalf("expected error != nil; got: %v", err) - } - if cursor != nil { - t.Fatalf("expected cursor == nil; got: %v", cursor) - } -} - -func TestScanAndScrollWithEmptyIndex(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - if isTravis() { - t.Skip("test on Travis failes regularly with " + - "Error 503 (Service Unavailable): SearchPhaseExecutionException[Failed to execute phase [init_scan], all shards failed]") - } - - _, err := client.Flush().Index(testIndexName).WaitIfOngoing(true).Do() - if err != nil { - t.Fatal(err) - } - - cursor, err := client.Scan(testIndexName).Scroll("30s").Do() - if err != nil { - t.Fatal(err) - } - if cursor == nil { - t.Fatalf("expected cursor; got: %v", cursor) - } - - // First request returns no error, but no hits - res, err := cursor.Next() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected results != nil; got: nil") - } - if res.ScrollId == "" { - t.Fatalf("expected scrollId in results; got: %q", res.ScrollId) - } - if want, have := int64(0), res.TotalHits(); want != have { - t.Fatalf("expected TotalHits() = %d; got %d", want, have) - } - if res.Hits == nil { - t.Fatal("expected results.Hits != nil; got: nil") - } - if want, have := int64(0), res.Hits.TotalHits; want != have { - t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have) - } - if res.Hits.Hits == nil { - t.Fatalf("expected results.Hits.Hits != nil; got: %v", res.Hits.Hits) - } - if want, have := 0, len(res.Hits.Hits); want != have { - t.Fatalf("expected len(results.Hits.Hits) == %d; got: %d", want, have) - } - - // Subsequent requests return EOS - res, err = cursor.Next() - if err != EOS { - t.Fatal(err) - } - if res != nil { - t.Fatalf("expected results == %v; got: %v", nil, res) - } - - res, err = cursor.Next() - if err != EOS { - t.Fatal(err) - } - if res != nil { - t.Fatalf("expected results == %v; got: %v", nil, res) - } -} - -func TestScanIssue119(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - comment1 := comment{User: "nico", Comment: "You bet."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("1").BodyJson(&comment1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - cursor, err := client.Scan(testIndexName).Fields("_source", "_parent").Size(1).Do() - if err != nil { - t.Fatal(err) - } - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - for _, hit := range searchResult.Hits.Hits { - if hit.Type == "tweet" { - if _, ok := hit.Fields["_parent"].(string); ok { - t.Errorf("Type `tweet` cannot have any parent...") - - toPrint, _ := json.MarshalIndent(hit, "", " ") - t.Fatal(string(toPrint)) - } - } - - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - } - } -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/script.go b/vendor/gopkg.in/olivere/elastic.v3/script.go index a5c9e45e2..57b4d74fd 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/script.go +++ b/vendor/gopkg.in/olivere/elastic.v3/script.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/script_test.go b/vendor/gopkg.in/olivere/elastic.v3/script_test.go index 552d92a02..355e13a06 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/script_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/script_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/scroll.go b/vendor/gopkg.in/olivere/elastic.v3/scroll.go index b669bd606..01e04c65f 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/scroll.go +++ b/vendor/gopkg.in/olivere/elastic.v3/scroll.go @@ -13,7 +13,12 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +const ( + // DefaultScrollKeepAlive is the default time a scroll cursor will be kept alive. + DefaultScrollKeepAlive = "5m" ) // ScrollService iterates over pages of search results from Elasticsearch. @@ -41,7 +46,7 @@ func NewScrollService(client *Client) *ScrollService { builder := &ScrollService{ client: client, ss: NewSearchSource(), - keepAlive: defaultKeepAlive, + keepAlive: DefaultScrollKeepAlive, } return builder } @@ -120,6 +125,15 @@ func (s *ScrollService) PostFilter(postFilter Query) *ScrollService { return s } +// Slice allows slicing the scroll request into several batches. +// This is supported in Elasticsearch 5.0 or later. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/search-request-scroll.html#sliced-scroll +// for details. +func (s *ScrollService) Slice(sliceQuery Query) *ScrollService { + s.ss = s.ss.Slice(sliceQuery) + return s +} + // FetchSource indicates whether the response should contain the stored // _source for every hit. func (s *ScrollService) FetchSource(fetchSource bool) *ScrollService { @@ -216,13 +230,7 @@ func (s *ScrollService) ScrollId(scrollId string) *ScrollService { // Do returns the next search result. It will return io.EOF as error if there // are no more search results. -func (s *ScrollService) Do() (*SearchResult, error) { - return s.DoC(nil) -} - -// DoC returns the next search result. It will return io.EOF as error if there -// are no more search results. -func (s *ScrollService) DoC(ctx context.Context) (*SearchResult, error) { +func (s *ScrollService) Do(ctx context.Context) (*SearchResult, error) { s.mu.RLock() nextScrollId := s.scrollId s.mu.RUnlock() @@ -251,7 +259,7 @@ func (s *ScrollService) Clear(ctx context.Context) error { ScrollId: []string{scrollId}, } - _, err := s.client.PerformRequestC(ctx, "DELETE", path, params, body) + _, err := s.client.PerformRequest(ctx, "DELETE", path, params, body) if err != nil { return err } @@ -276,7 +284,7 @@ func (s *ScrollService) first(ctx context.Context) (*SearchResult, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, body) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return nil, err } @@ -369,17 +377,6 @@ func (s *ScrollService) bodyFirst() (interface{}, error) { if err != nil { return nil, err } - - // Slicing (in ES 5.x+) - /* - if s.slice != nil { - src, err := s.slice.Source() - if err != nil { - return nil, err - } - body["slice"] = src - } - */ } return body, nil @@ -401,7 +398,7 @@ func (s *ScrollService) next(ctx context.Context) (*SearchResult, error) { } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, body) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/scroll_test.go b/vendor/gopkg.in/olivere/elastic.v3/scroll_test.go index 89136b5eb..6857e2f52 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/scroll_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/scroll_test.go @@ -21,22 +21,22 @@ func TestScroll(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -48,8 +48,8 @@ func TestScroll(t *testing.T) { docs := 0 for { - res, err := svc.Do() - if err == EOS { // or err == io.EOF + res, err := svc.Do(context.TODO()) + if err == io.EOF { break } if err != nil { @@ -99,9 +99,9 @@ func TestScroll(t *testing.T) { t.Fatal(err) } - _, err = svc.Do() + _, err = svc.Do(context.TODO()) if err == nil { - t.Fatal(err) + t.Fatal("expected to fail") } } @@ -114,22 +114,22 @@ func TestScrollWithQueryAndSort(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -146,7 +146,7 @@ func TestScrollWithQueryAndSort(t *testing.T) { docs := 0 pages := 0 for { - res, err := svc.Do() + res, err := svc.Do(context.TODO()) if err == io.EOF { break } @@ -201,22 +201,22 @@ func TestScrollWithBody(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -269,7 +269,7 @@ func TestScrollWithBody(t *testing.T) { docs := 0 for { - res, err := svc.Do() + res, err := svc.Do(context.TODO()) if err == io.EOF { break } @@ -320,9 +320,75 @@ func TestScrollWithBody(t *testing.T) { t.Fatalf("#%d: failed to clear scroll context: %v", i, err) } - _, err = svc.Do() + _, err = svc.Do(context.TODO()) if err == nil { - t.Fatalf("#%d: failed to clear scroll context: %v", i, err) + t.Fatalf("#%d: expected to fail", i) + } + } +} + +func TestScrollWithSlice(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + // Should return all documents. Just don't call Do yet! + sliceQuery := NewSliceQuery().Id(0).Max(2) + svc := client.Scroll(testIndexName).Type("order").Slice(sliceQuery).Size(1) + + pages := 0 + docs := 0 + + for { + res, err := svc.Do(context.TODO()) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected results != nil; got nil") + } + if res.Hits == nil { + t.Fatal("expected results.Hits != nil; got nil") + } + if want, have := int64(6), res.Hits.TotalHits; want != have { + t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have) + } + if want, have := 1, len(res.Hits.Hits); want != have { + t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", want, have) + } + + pages++ + + for _, hit := range res.Hits.Hits { + if hit.Index != testIndexName { + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + docs++ } + + if len(res.ScrollId) == 0 { + t.Fatalf("expected scrollId in results; got %q", res.ScrollId) + } + } + + if want, have := 6, pages; want != have { + t.Fatalf("expected to retrieve %d pages; got %d", want, have) + } + if want, have := 6, docs; want != have { + t.Fatalf("expected to retrieve %d hits; got %d", want, have) + } + + if err := svc.Clear(context.TODO()); err != nil { + t.Fatal(err) + } + + if _, err := svc.Do(context.TODO()); err == nil { + t.Fatal("expected to fail") } } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search.go b/vendor/gopkg.in/olivere/elastic.v3/search.go index 57236c829..3017cd090 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -13,7 +13,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // Search for documents in Elasticsearch. @@ -230,25 +230,25 @@ func (s *SearchService) SortBy(sorter ...Sorter) *SearchService { return s } -// NoFields indicates that no fields should be loaded, resulting in only +// NoStoredFields indicates that no stored fields should be loaded, resulting in only // id and type to be returned per field. -func (s *SearchService) NoFields() *SearchService { - s.searchSource = s.searchSource.NoFields() +func (s *SearchService) NoStoredFields() *SearchService { + s.searchSource = s.searchSource.NoStoredFields() return s } -// Field adds a single field to load and return (note, must be stored) as +// StoredField adds a single field to load and return (note, must be stored) as // part of the search request. If none are specified, the source of the // document will be returned. -func (s *SearchService) Field(fieldName string) *SearchService { - s.searchSource = s.searchSource.Field(fieldName) +func (s *SearchService) StoredField(fieldName string) *SearchService { + s.searchSource = s.searchSource.StoredField(fieldName) return s } -// Fields sets the fields to load and return as part of the search request. +// StoredFields sets the fields to load and return as part of the search request. // If none are specified, the source of the document will be returned. -func (s *SearchService) Fields(fields ...string) *SearchService { - s.searchSource = s.searchSource.Fields(fields...) +func (s *SearchService) StoredFields(fields ...string) *SearchService { + s.searchSource = s.searchSource.StoredFields(fields...) return s } @@ -334,12 +334,7 @@ func (s *SearchService) Validate() error { } // Do executes the search and returns a SearchResult. -func (s *SearchService) Do() (*SearchResult, error) { - return s.DoC(nil) -} - -// DoC executes the search and returns a SearchResult. -func (s *SearchService) DoC(ctx context.Context) (*SearchResult, error) { +func (s *SearchService) Do(ctx context.Context) (*SearchResult, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -362,7 +357,7 @@ func (s *SearchService) DoC(ctx context.Context) (*SearchResult, error) { } body = src } - res, err := s.client.PerformRequestC(ctx, "POST", path, params, body) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return nil, err } @@ -377,17 +372,15 @@ func (s *SearchService) DoC(ctx context.Context) (*SearchResult, error) { // SearchResult is the result of a search in Elasticsearch. type SearchResult struct { - TookInMillis int64 `json:"took"` // search time in milliseconds - ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations - Hits *SearchHits `json:"hits"` // the actual search hits - Suggest SearchSuggest `json:"suggest"` // results from suggesters - Aggregations Aggregations `json:"aggregations"` // results from aggregations - TimedOut bool `json:"timed_out"` // true if the search timed out - TerminatedEarly bool `json:"terminated_early"` // true if the operation has terminated before e.g. an expiration was reached + TookInMillis int64 `json:"took"` // search time in milliseconds + ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations + Hits *SearchHits `json:"hits"` // the actual search hits + Suggest SearchSuggest `json:"suggest"` // results from suggesters + Aggregations Aggregations `json:"aggregations"` // results from aggregations + TimedOut bool `json:"timed_out"` // true if the search timed out //Error string `json:"error,omitempty"` // used in MultiSearch only // TODO double-check that MultiGet now returns details error information - Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet - Shards *shardsInfo `json:"_shards,omitempty"` // shard information + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet } // TotalHits is a convenience function to return the number of hits for @@ -430,15 +423,13 @@ type SearchHit struct { Type string `json:"_type"` // type meta field Id string `json:"_id"` // external or internal Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) - Timestamp int64 `json:"_timestamp"` // timestamp meta field - TTL int64 `json:"_ttl"` // ttl meta field Routing string `json:"_routing"` // routing meta field Parent string `json:"_parent"` // parent meta field Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService Sort []interface{} `json:"sort"` // sort information Highlight SearchHitHighlight `json:"highlight"` // highlighter information Source *json.RawMessage `json:"_source"` // stored document source - Fields map[string]interface{} `json:"fields"` // returned fields + Fields map[string]interface{} `json:"fields"` // returned (stored) fields Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed MatchedQueries []string `json:"matched_queries"` // matched queries InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0 @@ -479,12 +470,12 @@ type SearchSuggestion struct { // SearchSuggestionOption is an option of a SearchSuggestion. // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. type SearchSuggestionOption struct { - Text string `json:"text"` - Highlighted string `json:"highlighted"` - Score float64 `json:"score"` - CollateMatch bool `json:"collate_match"` - Freq int `json:"freq"` // deprecated in 2.x - Payload interface{} `json:"payload"` + Text string `json:"text"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Score float64 `json:"_score"` + Source *json.RawMessage `json:"_source"` } // Aggregations (see search_aggs.go) diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs.go index ff427d0ce..73dc6a268 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -1144,7 +1144,7 @@ func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error { type AggregationBucketHistogramItem struct { Aggregations - Key int64 //`json:"key"` + Key float64 //`json:"key"` KeyAsString *string //`json:"key_as_string"` DocCount int64 //`json:"doc_count"` } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go index 903e5461f..d3521388a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children_test.go index a305073f3..0486079a9 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go index 231c51ef8..029f0cd8c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram_test.go index 3c826ce9e..ddf790834 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go index 40b7fb8db..4f29b14dc 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range_test.go index 42c525121..c9ceaec8e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go index 101399882..2e04dea5a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter_test.go index 5c6262a26..6aa4fbb7c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go index 9acceb247..2fcb17998 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters_test.go index f074425e0..95cc8d7c3 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go index 276f02877..00afbab09 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance_test.go index 4cb0cd9f8..0466dca21 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go index 49e24d60f..c96e3c82f 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global_test.go index 8b55010c7..5f1e5e6cb 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go index 7821adbc0..ac42fe98e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram_test.go index 6a5d5fb92..aeb7eec54 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go index ca610c953..82f6de707 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing_test.go index b52a96511..179c3084f 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go index f65da8048..3da1b99bf 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested_test.go index c55612f07..219943e3d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go index 9cfb14792..2a8fd138a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range_test.go index f0fd5f5fd..361840ae1 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go index 9a6df15ec..8fb61b771 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,9 +10,6 @@ package elastic // that share a common value such as an "author". // See: https://www.elastic.co/guide/en/elasticsearch/reference/2.x/search-aggregations-bucket-sampler-aggregation.html type SamplerAggregation struct { - field string - script *Script - missing interface{} subAggregations map[string]Aggregation meta map[string]interface{} @@ -29,22 +26,6 @@ func NewSamplerAggregation() *SamplerAggregation { } } -func (a *SamplerAggregation) Field(field string) *SamplerAggregation { - a.field = field - return a -} - -func (a *SamplerAggregation) Script(script *Script) *SamplerAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *SamplerAggregation) Missing(missing interface{}) *SamplerAggregation { - a.missing = missing - return a -} - func (a *SamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *SamplerAggregation { a.subAggregations[name] = subAggregation return a @@ -78,7 +59,6 @@ func (a *SamplerAggregation) Source() (interface{}, error) { // "aggs" : { // "sample" : { // "sampler" : { - // "field" : "user.id", // "shard_size" : 200 // }, // "aggs": { @@ -98,21 +78,6 @@ func (a *SamplerAggregation) Source() (interface{}, error) { opts := make(map[string]interface{}) source["sampler"] = opts - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - if a.shardSize >= 0 { opts["shard_size"] = a.shardSize } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler_test.go index da4ca5534..c4dc1c7cc 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -12,7 +12,6 @@ import ( func TestSamplerAggregation(t *testing.T) { keywordsAgg := NewSignificantTermsAggregation().Field("text") agg := NewSamplerAggregation(). - Field("user.id"). ShardSize(200). SubAggregation("keywords", keywordsAgg) src, err := agg.Source() @@ -24,28 +23,7 @@ func TestSamplerAggregation(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"field":"user.id","shard_size":200}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSamplerAggregationWithMissing(t *testing.T) { - keywordsAgg := NewSignificantTermsAggregation().Field("text") - agg := NewSamplerAggregation(). - Field("user.id"). - Missing("n/a"). - SubAggregation("keywords", keywordsAgg) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"field":"user.id","missing":"n/a"}}` + expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"shard_size":200}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go index 041bdb43d..c6b24f929 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms_test.go index d5d27b6ea..2f87373d7 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go index 2d3c0d1ad..7c72d1ab0 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms_test.go index e5f979333..e84f51a15 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go index 37ec2b7ad..e09ba347a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg_test.go index c8539d12d..784ff45dd 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go index ebf247c79..c21d6c8b1 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality_test.go index bccfa7aae..b5f8490b5 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go index 69447409c..b2147bd9f 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats_test.go index 4a80693cf..76489630d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go index 647ba5139..f675cbdb4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds_test.go index 3096b8ee5..ea713c604 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go index 334cff020..7d7de53d1 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max_test.go index b5da00c19..773cc2e4b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go index f9e21f7a8..3a2578d7f 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min_test.go index 170650667..fcde3817c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go index c0b3aa663..41623a9c3 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks_test.go index df4b7c4a3..a4bac02b5 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go index b1695ebb3..0f7f77db9 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles_test.go index da2d2055e..93df1dd29 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go index 42da9c854..0a27f2e65 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats_test.go index 0ea0b175d..5cff372d4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go index 6f783e7e1..9eb74d61a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum_test.go index 737808931..ff0e42545 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go index c017abb98..9d84790b2 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -49,8 +49,8 @@ func (a *TopHitsAggregation) Version(version bool) *TopHitsAggregation { return a } -func (a *TopHitsAggregation) NoFields() *TopHitsAggregation { - a.searchSource = a.searchSource.NoFields() +func (a *TopHitsAggregation) NoStoredFields() *TopHitsAggregation { + a.searchSource = a.searchSource.NoStoredFields() return a } @@ -64,13 +64,13 @@ func (a *TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceC return a } -func (a *TopHitsAggregation) FieldDataFields(fieldDataFields ...string) *TopHitsAggregation { - a.searchSource = a.searchSource.FieldDataFields(fieldDataFields...) +func (a *TopHitsAggregation) DocvalueFields(docvalueFields ...string) *TopHitsAggregation { + a.searchSource = a.searchSource.DocvalueFields(docvalueFields...) return a } -func (a *TopHitsAggregation) FieldDataField(fieldDataField string) *TopHitsAggregation { - a.searchSource = a.searchSource.FieldDataField(fieldDataField) +func (a *TopHitsAggregation) DocvalueField(docvalueField string) *TopHitsAggregation { + a.searchSource = a.searchSource.DocvalueField(docvalueField) return a } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits_test.go index 2634a22b6..ff238ee62 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go index b2e3e8241..772555523 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count_test.go index eee189b51..18d2ba119 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go index 5cd93d5cc..56c5aab5b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket_test.go index 0e6509ecb..019b8f1ad 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go index 44d6bc624..ddce02ebf 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script_test.go index 7f4d966d0..b4e6bf1c0 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go index ce17ec1f6..3e074b600 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector_test.go index d4e0206de..dd276a867 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go index 018eb918f..4a3d4b6ff 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum_test.go index a4023d84e..69a215d43 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go index 66611f46e..7f6f7327a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative_test.go index 1d2ec2d38..7e7b26749 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go index da6f9ef36..6eb13aa10 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket_test.go index 8bdde8fcd..aa9bf2f6d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go index 325f00f03..c70f1bc78 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket_test.go index 86fc9cd7f..ff4abf2b2 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go index 021144ddc..017e8b1e0 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg_test.go index e17c1c0a0..af2fc7c27 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go index db81d3cf4..590375ebb 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff_test.go index 17e512c5d..6d336a2ee 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go index 16ef64986..4cd204369 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket_test.go index a1c84026d..be8275c81 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_test.go index be6bbfc87..bf8049a2c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_test.go @@ -1,10 +1,14 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestAggsIntegrationAvgBucket(t *testing.T) { //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) @@ -31,7 +35,7 @@ func TestAggsIntegrationAvgBucket(t *testing.T) { builder = builder.Aggregation("sales_per_month", h) builder = builder.Aggregation("avg_monthly_sales", NewAvgBucketAggregation().BucketsPath("sales_per_month>sales")) - res, err := builder.Do() + res, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -84,7 +88,7 @@ func TestAggsIntegrationDerivative(t *testing.T) { h = h.SubAggregation("sales_deriv", NewDerivativeAggregation().BucketsPath("sales")) builder = builder.Aggregation("sales_per_month", h) - res, err := builder.Do() + res, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -225,7 +229,7 @@ func TestAggsIntegrationMaxBucket(t *testing.T) { builder = builder.Aggregation("sales_per_month", h) builder = builder.Aggregation("max_monthly_sales", NewMaxBucketAggregation().BucketsPath("sales_per_month>sales")) - res, err := builder.Do() + res, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -284,7 +288,7 @@ func TestAggsIntegrationMinBucket(t *testing.T) { builder = builder.Aggregation("sales_per_month", h) builder = builder.Aggregation("min_monthly_sales", NewMinBucketAggregation().BucketsPath("sales_per_month>sales")) - res, err := builder.Do() + res, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -343,7 +347,7 @@ func TestAggsIntegrationSumBucket(t *testing.T) { builder = builder.Aggregation("sales_per_month", h) builder = builder.Aggregation("sum_monthly_sales", NewSumBucketAggregation().BucketsPath("sales_per_month>sales")) - res, err := builder.Do() + res, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -396,7 +400,7 @@ func TestAggsIntegrationMovAvg(t *testing.T) { h = h.SubAggregation("the_movavg", NewMovAvgAggregation().BucketsPath("the_sum")) builder = builder.Aggregation("my_date_histo", h) - res, err := builder.Do() + res, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -518,7 +522,7 @@ func TestAggsIntegrationCumulativeSum(t *testing.T) { h = h.SubAggregation("cumulative_sales", NewCumulativeSumAggregation().BucketsPath("sales")) builder = builder.Aggregation("sales_per_month", h) - res, err := builder.Do() + res, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -647,8 +651,7 @@ func TestAggsIntegrationCumulativeSum(t *testing.T) { } func TestAggsIntegrationBucketScript(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) + client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) esversion, err := client.ElasticsearchVersion(DefaultURL) if err != nil { @@ -676,10 +679,10 @@ func TestAggsIntegrationBucketScript(t *testing.T) { GapPolicy("insert_zeros"). AddBucketsPath("appleSales", "apple_sales>sales"). AddBucketsPath("totalSales", "total_sales"). - Script(NewScript("appleSales / totalSales * 100"))) + Script(NewScript("params.appleSales / params.totalSales * 100"))) builder = builder.Aggregation("sales_per_month", h) - res, err := builder.Do() + res, err := builder.Pretty(true).Do(context.TODO()) if err != nil { t.Fatalf("%v (maybe scripting is disabled?)", err) } @@ -829,10 +832,10 @@ func TestAggsIntegrationBucketSelector(t *testing.T) { h = h.SubAggregation("sales_bucket_filter", NewBucketSelectorAggregation(). AddBucketsPath("totalSales", "total_sales"). - Script(NewScript("totalSales <= 100"))) + Script(NewScript("params.totalSales <= 100"))) builder = builder.Aggregation("sales_per_month", h) - res, err := builder.Do() + res, err := builder.Do(context.TODO()) if err != nil { t.Fatalf("%v (maybe scripting is disabled?)", err) } @@ -889,7 +892,7 @@ func TestAggsIntegrationSerialDiff(t *testing.T) { h = h.SubAggregation("the_diff", NewSerialDiffAggregation().BucketsPath("sales").Lag(1)) builder = builder.Aggregation("sales_per_month", h) - res, err := builder.Do() + res, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_test.go index 4d66163d8..6b6a54018 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_aggs_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_aggs_test.go @@ -9,6 +9,8 @@ import ( "strings" "testing" "time" + + "golang.org/x/net/context" ) func TestAggs(t *testing.T) { @@ -47,22 +49,22 @@ func TestAggs(t *testing.T) { } // Add all documents - _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -86,7 +88,7 @@ func TestAggs(t *testing.T) { percentileRanksRetweetsAgg := NewPercentileRanksAggregation().Field("retweets").Values(25, 50, 75) cardinalityAgg := NewCardinalityAggregation().Field("user") significantTermsAgg := NewSignificantTermsAggregation().Field("message") - samplerAgg := NewSamplerAggregation().Field("user").SubAggregation("tagged_with", NewTermsAggregation().Field("tags")) + samplerAgg := NewSamplerAggregation().SubAggregation("tagged_with", NewTermsAggregation().Field("tags")) retweetsRangeAgg := NewRangeAggregation().Field("retweets").Lt(10).Between(10, 100).Gt(100) retweetsKeyedRangeAgg := NewRangeAggregation().Field("retweets").Keyed(true).Lt(10).Between(10, 100).Gt(100) dateRangeAgg := NewDateRangeAggregation().Field("created").Lt("2012-01-01").Between("2012-01-01", "2013-01-01").Gt("2013-01-01") @@ -171,7 +173,7 @@ func TestAggs(t *testing.T) { dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets")) builder = builder.Aggregation("movingAvgDateHisto", dateHisto) } - searchResult, err := builder.Do() + searchResult, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -607,8 +609,8 @@ func TestAggs(t *testing.T) { if samplerAggRes == nil { t.Fatalf("expected != nil; got: nil") } - if samplerAggRes.DocCount != 2 { - t.Errorf("expected %v; got: %v", 2, samplerAggRes.DocCount) + if samplerAggRes.DocCount != 3 { + t.Errorf("expected %v; got: %v", 3, samplerAggRes.DocCount) } sub, found := samplerAggRes.Aggregations["tagged_with"] if !found { @@ -769,7 +771,7 @@ func TestAggs(t *testing.T) { t.Errorf("expected %d; got: %d", 1, histoRes.Buckets[1].DocCount) } if histoRes.Buckets[1].Key != 100.0 { - t.Errorf("expected %v; got: %v", 100.0, histoRes.Buckets[1].Key) + t.Errorf("expected %v; got: %+v", 100.0, histoRes.Buckets[1].Key) } // dateHisto @@ -999,11 +1001,11 @@ func TestAggsMarshal(t *testing.T) { } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -1015,7 +1017,7 @@ func TestAggsMarshal(t *testing.T) { // Run query builder := client.Search().Index(testIndexName).Query(all) builder = builder.Aggregation("dhagg", dhagg) - searchResult, err := builder.Do() + searchResult, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool.go index 685ce0219..5e15a3b94 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool_test.go index 327d3f635..1eb2038fd 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_bool_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting.go index 7f7a53b8b..9f9a5366b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting_test.go index 0ef03dfef..6c7f263f4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_boosting_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go index d45825067..e99f44303 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms_test.go index 02c1c2b60..cade9247f 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_common_terms_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -8,6 +8,8 @@ import ( "encoding/json" _ "net/http" "testing" + + "golang.org/x/net/context" ) func TestCommonTermsQuery(t *testing.T) { @@ -35,29 +37,29 @@ func TestSearchQueriesCommonTermsQuery(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Common terms query q := NewCommonTermsQuery("message", "Golang") - searchResult, err := client.Search().Index(testIndexName).Query(q).Do() + searchResult, err := client.Search().Index(testIndexName).Query(q).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go index c754d279d..0fc500cac 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score_test.go index bdcce659c..6508a91fb 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_constant_score_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go index f97b24a1d..52eaa31fb 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max_test.go index 8b005a61e..76ddfb079 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_dis_max_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists.go index e117673bd..b88555fc5 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists_test.go index a1112085c..f2d047087 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_exists_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq.go index e978c5c1b..be15b6211 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -13,6 +13,7 @@ package elastic // https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html type FunctionScoreQuery struct { query Query + filter Query boost *float64 maxBoost *float64 scoreMode string @@ -34,6 +35,14 @@ func NewFunctionScoreQuery() *FunctionScoreQuery { // Query sets the query for the function score query. func (q *FunctionScoreQuery) Query(query Query) *FunctionScoreQuery { q.query = query + q.filter = nil + return q +} + +// Filter sets the filter for the function score query. +func (q *FunctionScoreQuery) Filter(filter Query) *FunctionScoreQuery { + q.query = nil + q.filter = filter return q } @@ -98,6 +107,12 @@ func (q *FunctionScoreQuery) Source() (interface{}, error) { return nil, err } query["query"] = src + } else if q.filter != nil { + src, err := q.filter.Source() + if err != nil { + return nil, err + } + query["filter"] = src } if len(q.filters) == 1 && q.filters[0] == nil { diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go index fbce3577d..5c60018ff 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_test.go index 59f1cd191..a8e7430ce 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fsq_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go index da79dc7e6..152cbb0e6 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy_test.go index fbbfe2f94..89140ca23 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_fuzzy_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go index 808ce82df..4b4e95501 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box_test.go index 6b15885ca..59cd437d5 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go index c1eed8521..f84e73b23 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance_test.go index f0b8ca654..7b91d94e8 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_distance_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go index df6e36a65..dbd46a1ef 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon_test.go index efe89a8d4..932c57d7b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child.go index a8907546b..ab0abb7f0 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -13,7 +13,7 @@ type HasChildQuery struct { query Query childType string boost *float64 - scoreType string + scoreMode string minChildren *int maxChildren *int shortCircuitCutoff *int @@ -35,10 +35,11 @@ func (q *HasChildQuery) Boost(boost float64) *HasChildQuery { return q } -// ScoreType defines how the scores from the matching child documents -// are mapped into the parent document. -func (q *HasChildQuery) ScoreType(scoreType string) *HasChildQuery { - q.scoreType = scoreType +// ScoreMode defines how the scores from the matching child documents +// are mapped into the parent document. Allowed values are: min, max, +// avg, or none. +func (q *HasChildQuery) ScoreMode(scoreMode string) *HasChildQuery { + q.scoreMode = scoreMode return q } @@ -83,6 +84,7 @@ func (q *HasChildQuery) Source() (interface{}, error) { // { // "has_child" : { // "type" : "blog_tag", + // "score_mode" : "min", // "query" : { // "term" : { // "tag" : "something" @@ -103,8 +105,8 @@ func (q *HasChildQuery) Source() (interface{}, error) { if q.boost != nil { query["boost"] = *q.boost } - if q.scoreType != "" { - query["score_type"] = q.scoreType + if q.scoreMode != "" { + query["score_mode"] = q.scoreMode } if q.minChildren != nil { query["min_children"] = *q.minChildren diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child_test.go index 887b2e263..745c263f9 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_child_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,7 +10,7 @@ import ( ) func TestHasChildQuery(t *testing.T) { - q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")) + q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")).ScoreMode("min") src, err := q.Source() if err != nil { t.Fatal(err) @@ -20,7 +20,7 @@ func TestHasChildQuery(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"has_child":{"query":{"term":{"tag":"something"}},"type":"blog_tag"}}` + expected := `{"has_child":{"query":{"term":{"tag":"something"}},"score_mode":"min","type":"blog_tag"}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go index 4db1dde7e..ee77d5cb4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -16,7 +16,7 @@ type HasParentQuery struct { query Query parentType string boost *float64 - scoreType string + score *bool queryName string innerHit *InnerHit } @@ -35,9 +35,9 @@ func (q *HasParentQuery) Boost(boost float64) *HasParentQuery { return q } -// ScoreType defines how the parent score is mapped into the child documents. -func (q *HasParentQuery) ScoreType(scoreType string) *HasParentQuery { - q.scoreType = scoreType +// Score defines if the parent score is mapped into the child documents. +func (q *HasParentQuery) Score(score bool) *HasParentQuery { + q.score = &score return q } @@ -80,8 +80,8 @@ func (q *HasParentQuery) Source() (interface{}, error) { if q.boost != nil { query["boost"] = *q.boost } - if q.scoreType != "" { - query["score_type"] = q.scoreType + if q.score != nil { + query["score"] = *q.score } if q.queryName != "" { query["_name"] = q.queryName diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent_test.go index b5daefda8..0fec395e3 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_has_parent_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,7 +10,7 @@ import ( ) func TestHasParentQueryTest(t *testing.T) { - q := NewHasParentQuery("blog", NewTermQuery("tag", "something")) + q := NewHasParentQuery("blog", NewTermQuery("tag", "something")).Score(true) src, err := q.Source() if err != nil { t.Fatal(err) @@ -20,7 +20,7 @@ func TestHasParentQueryTest(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"has_parent":{"parent_type":"blog","query":{"term":{"tag":"something"}}}}` + expected := `{"has_parent":{"parent_type":"blog","query":{"term":{"tag":"something"}},"score":true}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids.go index 96f463dc6..be70a65b7 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids_test.go index d1ff9a6b1..b36605b4d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_ids_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices.go index 56efab3dd..60c76a7c4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices_test.go index f011b9ac7..0c04499d1 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_indices_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_match.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match.go index b740b0f0d..e4fe6a3b8 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_match.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all.go index 5b5ca590e..2b3d8a71a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all_test.go index 0dcebb1f6..11cf5c5f7 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_all_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_test.go index ade59351f..af3fe688a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_match_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_missing.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_missing.go deleted file mode 100644 index 0fff3f55c..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_missing.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MissingQuery returns documents that have only null values or no value -// in the original field. -// -// For details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-missing-query.html -type MissingQuery struct { - name string - queryName string - nullValue *bool - existence *bool -} - -// NewMissingQuery creates and initializes a new MissingQuery. -func NewMissingQuery(name string) *MissingQuery { - return &MissingQuery{name: name} -} - -// QueryName sets the query name for the query that can be used when -// searching for matched filters hit. -func (q *MissingQuery) QueryName(queryName string) *MissingQuery { - q.queryName = queryName - return q -} - -// NullValue indicates whether the missing filter automatically includes -// fields with null value configured in the mappings. Defaults to false. -func (q *MissingQuery) NullValue(nullValue bool) *MissingQuery { - q.nullValue = &nullValue - return q -} - -// Existence indicates whether the missing filter includes documents where -// the field doesn't exist in the docs. -func (q *MissingQuery) Existence(existence bool) *MissingQuery { - q.existence = &existence - return q -} - -// Source returns JSON for the query. -func (q *MissingQuery) Source() (interface{}, error) { - // { - // "missing" : { - // "field" : "..." - // } - // } - - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["missing"] = params - params["field"] = q.name - if q.nullValue != nil { - params["null_value"] = *q.nullValue - } - if q.existence != nil { - params["existence"] = *q.existence - } - if q.queryName != "" { - params["_name"] = q.queryName - } - return source, nil -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go index f89379796..97f0730a1 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this_test.go index 64bfe4305..76691a33b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_more_like_this_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,6 +7,8 @@ package elastic import ( "encoding/json" "testing" + + "golang.org/x/net/context" ) func TestMoreLikeThisQuerySourceWithLikeText(t *testing.T) { @@ -56,22 +58,22 @@ func TestMoreLikeThisQuery(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -81,7 +83,7 @@ func TestMoreLikeThisQuery(t *testing.T) { res, err := client.Search(). Index(testIndexName). Query(mltq). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go index e367c3a70..3337ce59c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match_test.go index 508726bed..d897f7e72 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_multi_match_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested.go index 0a598f8bf..3b5655da4 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested_test.go index b068c59b1..af9740553 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_nested_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_not.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_not.go deleted file mode 100644 index 7a1ee8e08..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_not.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// NotQuery filters out matched documents using a query. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-not-query.html -type NotQuery struct { - filter Query - queryName string -} - -// NewNotQuery creates and initializes a new NotQuery. -func NewNotQuery(filter Query) *NotQuery { - return &NotQuery{ - filter: filter, - } -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *NotQuery) QueryName(queryName string) *NotQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the query. -func (q *NotQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["not"] = params - - src, err := q.filter.Source() - if err != nil { - return nil, err - } - params["query"] = src - if q.queryName != "" { - params["_name"] = q.queryName - } - return source, nil -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_percolator.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_percolator.go new file mode 100644 index 000000000..16f7611ed --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_percolator.go @@ -0,0 +1,115 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// PercolatorQuery can be used to match queries stored in an index. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html +type PercolatorQuery struct { + field string + documentType string + document interface{} + indexedDocumentIndex string + indexedDocumentType string + indexedDocumentId string + indexedDocumentRouting string + indexedDocumentPreference string + indexedDocumentVersion *int64 +} + +// NewPercolatorQuery creates and initializes a new Percolator query. +func NewPercolatorQuery() *PercolatorQuery { + return &PercolatorQuery{} +} + +func (q *PercolatorQuery) Field(field string) *PercolatorQuery { + q.field = field + return q +} + +func (q *PercolatorQuery) DocumentType(typ string) *PercolatorQuery { + q.documentType = typ + return q +} + +func (q *PercolatorQuery) Document(doc interface{}) *PercolatorQuery { + q.document = doc + return q +} + +func (q *PercolatorQuery) IndexedDocumentIndex(index string) *PercolatorQuery { + q.indexedDocumentIndex = index + return q +} + +func (q *PercolatorQuery) IndexedDocumentType(typ string) *PercolatorQuery { + q.indexedDocumentType = typ + return q +} + +func (q *PercolatorQuery) IndexedDocumentId(id string) *PercolatorQuery { + q.indexedDocumentId = id + return q +} + +func (q *PercolatorQuery) IndexedDocumentRouting(routing string) *PercolatorQuery { + q.indexedDocumentRouting = routing + return q +} + +func (q *PercolatorQuery) IndexedDocumentPreference(preference string) *PercolatorQuery { + q.indexedDocumentPreference = preference + return q +} + +func (q *PercolatorQuery) IndexedDocumentVersion(version int64) *PercolatorQuery { + q.indexedDocumentVersion = &version + return q +} + +// Source returns JSON for the percolate query. +func (q *PercolatorQuery) Source() (interface{}, error) { + if len(q.field) == 0 { + return nil, errors.New("elastic: Field is required in PercolatorQuery") + } + if len(q.documentType) == 0 { + return nil, errors.New("elastic: DocumentType is required in PercolatorQuery") + } + if q.document == nil { + return nil, errors.New("elastic: Document is required in PercolatorQuery") + } + + // { + // "percolate" : { ... } + // } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["percolate"] = params + params["field"] = q.field + params["document_type"] = q.documentType + params["document"] = q.document + if len(q.indexedDocumentIndex) > 0 { + params["index"] = q.indexedDocumentIndex + } + if len(q.indexedDocumentType) > 0 { + params["type"] = q.indexedDocumentType + } + if len(q.indexedDocumentId) > 0 { + params["id"] = q.indexedDocumentId + } + if len(q.indexedDocumentRouting) > 0 { + params["routing"] = q.indexedDocumentRouting + } + if len(q.indexedDocumentPreference) > 0 { + params["preference"] = q.indexedDocumentPreference + } + if q.indexedDocumentVersion != nil { + params["version"] = *q.indexedDocumentVersion + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_percolator_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_percolator_test.go new file mode 100644 index 000000000..8a22d4614 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_percolator_test.go @@ -0,0 +1,68 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPercolatorQuery(t *testing.T) { + q := NewPercolatorQuery(). + Field("query"). + DocumentType("doctype"). + Document(map[string]interface{}{ + "message": "Some message", + }) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percolate":{"document":{"message":"Some message"},"document_type":"doctype","field":"query"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercolatorQueryWithDetails(t *testing.T) { + q := NewPercolatorQuery(). + Field("query"). + DocumentType("doctype"). + Document(map[string]interface{}{ + "message": "Some message", + }). + IndexedDocumentIndex("index"). + IndexedDocumentType("type"). + IndexedDocumentId("1"). + IndexedDocumentRouting("route"). + IndexedDocumentPreference("one"). + IndexedDocumentVersion(1) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percolate":{"document":{"message":"Some message"},"document_type":"doctype","field":"query","id":"1","index":"index","preference":"one","routing":"route","type":"type","version":1}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercolatorQueryWithMissingFields(t *testing.T) { + q := NewPercolatorQuery() // no Field, Document, or Query + _, err := q.Source() + if err == nil { + t.Fatal("expected error, got nil") + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix.go index 1628ba8cc..6d2d53b6c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix_test.go index ce1b74e41..dcd47e2a1 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_prefix_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string.go index bb26e486e..6f14e29f2 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string_test.go index 74be3fde2..5030c3382 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_query_string_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_range.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_range.go index 23e28597f..e519d5ac2 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_range.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_range.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_range_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_range_test.go index bf373a85b..86d018a86 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_range_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_range_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp.go index ecd9f7fe0..b09033519 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp_test.go index f4dc2355b..d30c0a36d 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_regexp_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_script.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_script.go index 3baa90574..9086bcb1b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_script.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_script.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_script_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_script_test.go index e10510c10..8bf9f8a11 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_script_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_script_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go index 2fdb5910f..203c35020 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string_test.go index f6be3e5bd..6d4fe52a2 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -7,6 +7,8 @@ package elastic import ( "encoding/json" "testing" + + "golang.org/x/net/context" ) func TestSimpleQueryStringQuery(t *testing.T) { @@ -27,6 +29,7 @@ func TestSimpleQueryStringQuery(t *testing.T) { } func TestSimpleQueryStringQueryExec(t *testing.T) { + // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) client := setupTestClientAndCreateIndex(t) tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} @@ -34,32 +37,31 @@ func TestSimpleQueryStringQueryExec(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Match all should return all documents - query := NewSimpleQueryStringQuery("+Golang +Elasticsearch") searchResult, err := client.Search(). Index(testIndexName). - Query(query). - Do() + Query(NewSimpleQueryStringQuery("+Golang +Elasticsearch")). + Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_slice.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_slice.go new file mode 100644 index 000000000..5cc2f72fe --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_slice.go @@ -0,0 +1,53 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SliceQuery allows to partition the documents into several slices. +// It is used e.g. to slice scroll operations in Elasticsearch 5.0 or later. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/search-request-scroll.html#sliced-scroll +// for details. +type SliceQuery struct { + field string + id *int + max *int +} + +// NewSliceQuery creates a new SliceQuery. +func NewSliceQuery() *SliceQuery { + return &SliceQuery{} +} + +// Field is the name of the field to slice against (_uid by default). +func (s *SliceQuery) Field(field string) *SliceQuery { + s.field = field + return s +} + +// Id is the id of the slice. +func (s *SliceQuery) Id(id int) *SliceQuery { + s.id = &id + return s +} + +// Max is the maximum number of slices. +func (s *SliceQuery) Max(max int) *SliceQuery { + s.max = &max + return s +} + +// Source returns the JSON body. +func (s *SliceQuery) Source() (interface{}, error) { + m := make(map[string]interface{}) + if s.field != "" { + m["field"] = s.field + } + if s.id != nil { + m["id"] = *s.id + } + if s.max != nil { + m["max"] = *s.max + } + return m, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_slice_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_slice_test.go new file mode 100644 index 000000000..0589f4e29 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_slice_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSliceQuery(t *testing.T) { + q := NewSliceQuery().Field("date").Id(0).Max(2) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"field":"date","id":0,"max":2}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query.go deleted file mode 100644 index 0611c3ea4..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TemplateQuery is a query that accepts a query template and a -// map of key/value pairs to fill in template parameters. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-template-query.html -type TemplateQuery struct { - template string - templateType string - vars map[string]interface{} -} - -// NewTemplateQuery creates and initializes a new TemplateQuery. -func NewTemplateQuery(name string) *TemplateQuery { - return &TemplateQuery{ - template: name, - vars: make(map[string]interface{}), - } -} - -// Template specifies the name of the template. -func (q *TemplateQuery) Template(name string) *TemplateQuery { - q.template = name - return q -} - -// TemplateType defines which kind of query we use. The values can be: -// inline, indexed, or file. If undefined, inline is used. -func (q *TemplateQuery) TemplateType(typ string) *TemplateQuery { - q.templateType = typ - return q -} - -// Var sets a single parameter pair. -func (q *TemplateQuery) Var(name string, value interface{}) *TemplateQuery { - q.vars[name] = value - return q -} - -// Vars sets parameters for the template query. -func (q *TemplateQuery) Vars(vars map[string]interface{}) *TemplateQuery { - q.vars = vars - return q -} - -// Source returns the JSON serializable content for the search. -func (q *TemplateQuery) Source() (interface{}, error) { - // { - // "template" : { - // "query" : {"match_{{template}}": {}}, - // "params" : { - // "template": "all" - // } - // } - // } - - query := make(map[string]interface{}) - - tmpl := make(map[string]interface{}) - query["template"] = tmpl - - // TODO(oe): Implementation differs from online documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html - var fieldname string - switch q.templateType { - case "file": // file - fieldname = "file" - case "indexed", "id": // indexed - fieldname = "id" - default: // inline - fieldname = "query" - } - - tmpl[fieldname] = q.template - if len(q.vars) > 0 { - tmpl["params"] = q.vars - } - - return query, nil -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_term.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_term.go index c20c5c66e..c809959a8 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_term.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_term.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_term_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_term_test.go index 17c8c9848..f800fa954 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_term_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_term_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms.go index a7e158859..c95ea9307 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms_test.go index 6de743d14..8818de213 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_terms_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_type.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_type.go index 884d4ae7b..7356c8e34 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_type.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_type.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_type_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_type_test.go index bde0ed3d3..176b82abb 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_type_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_type_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go index 127332da3..44e594675 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard_test.go index 5cd529aff..658c513cc 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_queries_wildcard_test.go @@ -1,14 +1,15 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic_test import ( + "context" "encoding/json" "testing" - "gopkg.in/olivere/elastic.v3" + "gopkg.in/olivere/elastic.v5" ) func ExampleWildcardQuery() { @@ -22,9 +23,9 @@ func ExampleWildcardQuery() { // Define wildcard query q := elastic.NewWildcardQuery("user", "oli*er?").Boost(1.2) searchResult, err := client.Search(). - Index("twitter"). // search in index "twitter" - Query(q). // use wildcard query defined above - Do() // execute + Index("twitter"). // search in index "twitter" + Query(q). // use wildcard query defined above + Do(context.TODO()) // execute if err != nil { // Handle error panic(err) diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_request.go b/vendor/gopkg.in/olivere/elastic.v3/search_request.go index f294cdb7a..580a26313 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_request.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_request.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_request_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_request_test.go index c672b0705..c64a44cb6 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_request_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_request_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_source.go b/vendor/gopkg.in/olivere/elastic.v3/search_source.go index 031b06856..91641a39c 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_source.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_source.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -13,6 +13,7 @@ import ( type SearchSource struct { query Query postQuery Query + sliceQuery Query from int size int explain *bool @@ -22,8 +23,8 @@ type SearchSource struct { minScore *float64 timeout string terminateAfter *int - fieldNames []string - fieldDataFields []string + storedFieldNames []string + docvalueFields []string scriptFields []*ScriptField fetchSourceContext *FetchSourceContext aggregations map[string]Aggregation @@ -40,17 +41,12 @@ type SearchSource struct { // NewSearchSource initializes a new SearchSource. func NewSearchSource() *SearchSource { return &SearchSource{ - from: -1, - size: -1, - trackScores: false, - sorters: make([]Sorter, 0), - fieldDataFields: make([]string, 0), - scriptFields: make([]*ScriptField, 0), - aggregations: make(map[string]Aggregation), - rescores: make([]*Rescore, 0), - indexBoosts: make(map[string]float64), - stats: make([]string, 0), - innerHits: make(map[string]*InnerHit), + from: -1, + size: -1, + trackScores: false, + aggregations: make(map[string]Aggregation), + indexBoosts: make(map[string]float64), + innerHits: make(map[string]*InnerHit), } } @@ -68,6 +64,16 @@ func (s *SearchSource) PostFilter(postFilter Query) *SearchSource { return s } +// Slice allows partitioning the documents in multiple slices. +// It is e.g. used to slice a scroll operation, supported in +// Elasticsearch 5.0 or later. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/search-request-scroll.html#sliced-scroll +// for details. +func (s *SearchSource) Slice(sliceQuery Query) *SearchSource { + s.sliceQuery = sliceQuery + return s +} + // From index to start the search from. Defaults to 0. func (s *SearchSource) From(from int) *SearchSource { s.from = from @@ -218,45 +224,39 @@ func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext return s } -// NoFields indicates that no fields should be loaded, resulting in only +// NoStoredFields indicates that no fields should be loaded, resulting in only // id and type to be returned per field. -func (s *SearchSource) NoFields() *SearchSource { - s.fieldNames = make([]string, 0) +func (s *SearchSource) NoStoredFields() *SearchSource { + s.storedFieldNames = nil return s } -// Field adds a single field to load and return (note, must be stored) as +// StoredField adds a single field to load and return (note, must be stored) as // part of the search request. If none are specified, the source of the // document will be returned. -func (s *SearchSource) Field(fieldName string) *SearchSource { - if s.fieldNames == nil { - s.fieldNames = make([]string, 0) - } - s.fieldNames = append(s.fieldNames, fieldName) +func (s *SearchSource) StoredField(storedFieldName string) *SearchSource { + s.storedFieldNames = append(s.storedFieldNames, storedFieldName) return s } -// Fields sets the fields to load and return as part of the search request. +// StoredFields sets the fields to load and return as part of the search request. // If none are specified, the source of the document will be returned. -func (s *SearchSource) Fields(fieldNames ...string) *SearchSource { - if s.fieldNames == nil { - s.fieldNames = make([]string, 0) - } - s.fieldNames = append(s.fieldNames, fieldNames...) +func (s *SearchSource) StoredFields(storedFieldNames ...string) *SearchSource { + s.storedFieldNames = append(s.storedFieldNames, storedFieldNames...) return s } -// FieldDataField adds a single field to load from the field data cache +// DocvalueField adds a single field to load from the field data cache // and return as part of the search request. -func (s *SearchSource) FieldDataField(fieldDataField string) *SearchSource { - s.fieldDataFields = append(s.fieldDataFields, fieldDataField) +func (s *SearchSource) DocvalueField(fieldDataField string) *SearchSource { + s.docvalueFields = append(s.docvalueFields, fieldDataField) return s } -// FieldDataFields adds one or more fields to load from the field data cache +// DocvalueFields adds one or more fields to load from the field data cache // and return as part of the search request. -func (s *SearchSource) FieldDataFields(fieldDataFields ...string) *SearchSource { - s.fieldDataFields = append(s.fieldDataFields, fieldDataFields...) +func (s *SearchSource) DocvalueFields(docvalueFields ...string) *SearchSource { + s.docvalueFields = append(s.docvalueFields, docvalueFields...) return s } @@ -321,6 +321,13 @@ func (s *SearchSource) Source() (interface{}, error) { } source["post_filter"] = src } + if s.sliceQuery != nil { + src, err := s.sliceQuery.Source() + if err != nil { + return nil, err + } + source["slice"] = src + } if s.minScore != nil { source["min_score"] = *s.minScore } @@ -338,17 +345,17 @@ func (s *SearchSource) Source() (interface{}, error) { source["_source"] = src } - if s.fieldNames != nil { - switch len(s.fieldNames) { + if s.storedFieldNames != nil { + switch len(s.storedFieldNames) { case 1: - source["fields"] = s.fieldNames[0] + source["stored_fields"] = s.storedFieldNames[0] default: - source["fields"] = s.fieldNames + source["stored_fields"] = s.storedFieldNames } } - if len(s.fieldDataFields) > 0 { - source["fielddata_fields"] = s.fieldDataFields + if len(s.docvalueFields) > 0 { + source["docvalue_fields"] = s.docvalueFields } if len(s.scriptFields) > 0 { diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_source_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_source_test.go index 8c436b20a..5c54e5453 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_source_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_source_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -27,9 +27,9 @@ func TestSearchSourceMatchAllQuery(t *testing.T) { } } -func TestSearchSourceNoFields(t *testing.T) { +func TestSearchSourceNoStoredFields(t *testing.T) { matchAllQ := NewMatchAllQuery() - builder := NewSearchSource().Query(matchAllQ).NoFields() + builder := NewSearchSource().Query(matchAllQ).NoStoredFields() src, err := builder.Source() if err != nil { t.Fatal(err) @@ -39,15 +39,15 @@ func TestSearchSourceNoFields(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"fields":[],"query":{"match_all":{}}}` + expected := `{"query":{"match_all":{}}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } } -func TestSearchSourceFields(t *testing.T) { +func TestSearchSourceStoredFields(t *testing.T) { matchAllQ := NewMatchAllQuery() - builder := NewSearchSource().Query(matchAllQ).Fields("message", "tags") + builder := NewSearchSource().Query(matchAllQ).StoredFields("message", "tags") src, err := builder.Source() if err != nil { t.Fatal(err) @@ -57,7 +57,7 @@ func TestSearchSourceFields(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"fields":["message","tags"],"query":{"match_all":{}}}` + expected := `{"query":{"match_all":{}},"stored_fields":["message","tags"]}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } @@ -100,9 +100,9 @@ func TestSearchSourceFetchSourceByWildcards(t *testing.T) { } } -func TestSearchSourceFieldDataFields(t *testing.T) { +func TestSearchSourceDocvalueFields(t *testing.T) { matchAllQ := NewMatchAllQuery() - builder := NewSearchSource().Query(matchAllQ).FieldDataFields("test1", "test2") + builder := NewSearchSource().Query(matchAllQ).DocvalueFields("test1", "test2") src, err := builder.Source() if err != nil { t.Fatal(err) @@ -112,7 +112,7 @@ func TestSearchSourceFieldDataFields(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"fielddata_fields":["test1","test2"],"query":{"match_all":{}}}` + expected := `{"docvalue_fields":["test1","test2"],"query":{"match_all":{}}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_suggester_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_suggester_test.go index 02c552af2..94c3a6779 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_suggester_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_suggester_test.go @@ -1,13 +1,13 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic import ( - _ "encoding/json" - _ "net/http" "testing" + + "golang.org/x/net/context" ) func TestTermSuggester(t *testing.T) { @@ -18,29 +18,27 @@ func TestTermSuggester(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Match all should return all documents - all := NewMatchAllQuery() - tsName := "my-suggestions" ts := NewTermSuggester(tsName) ts = ts.Text("Goolang") @@ -48,9 +46,9 @@ func TestTermSuggester(t *testing.T) { searchResult, err := client.Search(). Index(testIndexName). - Query(all). + Query(NewMatchAllQuery()). Suggester(ts). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -85,12 +83,6 @@ func TestTermSuggester(t *testing.T) { if myOption.Text != "golang" { t.Errorf("expected Text = 'golang'; got %s", myOption.Text) } - if myOption.Score == float64(0.0) { - t.Errorf("expected Score != 0.0; got %v", myOption.Score) - } - if myOption.Freq == 0 { - t.Errorf("expected Freq != 0; got %v", myOption.Freq) - } } func TestPhraseSuggester(t *testing.T) { @@ -101,29 +93,27 @@ func TestPhraseSuggester(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Match all should return all documents - all := NewMatchAllQuery() - phraseSuggesterName := "my-suggestions" ps := NewPhraseSuggester(phraseSuggesterName) ps = ps.Text("Goolang") @@ -131,9 +121,9 @@ func TestPhraseSuggester(t *testing.T) { searchResult, err := client.Search(). Index(testIndexName). - Query(all). + Query(NewMatchAllQuery()). Suggester(ps). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -161,63 +151,59 @@ func TestPhraseSuggester(t *testing.T) { if mySuggestion.Length != 7 { t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) } - /* - if len(mySuggestion.Options) != 1 { - t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) - } - myOption := mySuggestion.Options[0] - if myOption.Text != "golang" { - t.Errorf("expected Text = 'golang'; got %s", myOption.Text) - } - if myOption.Score == float64(0.0) { - t.Errorf("expected Score != 0.0; got %v", myOption.Score) - } - */ } -// TODO(oe): I get a "Completion suggester not supported" exception on 0.90.2?! -/* func TestCompletionSuggester(t *testing.T) { - client := setupTestClientAndCreateIndex(t) + client := setupTestClientAndCreateIndex(t) // , SetTraceLog(log.New(os.Stdout, "", 0))) - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Suggest: NewSuggestField("Golang", "Elasticsearch"), + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Suggest: NewSuggestField("Another unrelated topic."), + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Suggest: NewSuggestField("Cycling is fun."), + } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } // Match all should return all documents - all := NewMatchAllQuery() - suggesterName := "my-suggestions" cs := NewCompletionSuggester(suggesterName) - cs = cs.Text("Goolang") - cs = cs.Field("message") + cs = cs.Text("Golang") + cs = cs.Field("suggest_field") searchResult, err := client.Search(). Index(testIndexName). - Query(&all). + Query(NewMatchAllQuery()). Suggester(cs). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -236,24 +222,20 @@ func TestCompletionSuggester(t *testing.T) { t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) } mySuggestion := mySuggestions[0] - if mySuggestion.Text != "Goolang" { - t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text) + if mySuggestion.Text != "Golang" { + t.Errorf("expected Text = 'Golang'; got %s", mySuggestion.Text) } if mySuggestion.Offset != 0 { t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) } - if mySuggestion.Length != 7 { + if mySuggestion.Length != 6 { t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) } if len(mySuggestion.Options) != 1 { t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) } myOption := mySuggestion.Options[0] - if myOption.Text != "golang" { - t.Errorf("expected Text = 'golang'; got %s", myOption.Text) - } - if myOption.Score == float64(0.0) { - t.Errorf("expected Score != 0.0; got %v", myOption.Score) + if myOption.Text != "Golang" { + t.Errorf("expected Text = 'Golang'; got %s", myOption.Text) } } -//*/ diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_templates_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_templates_test.go deleted file mode 100644 index 3f8bbcb65..000000000 --- a/vendor/gopkg.in/olivere/elastic.v3/search_templates_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestSearchTemplatesLifecycle(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // Template - tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}` - - // Create template - cresp, err := client.PutTemplate().Id("elastic-test").BodyString(tmpl).Do() - if err != nil { - t.Fatal(err) - } - if cresp == nil { - t.Fatalf("expected response != nil; got: %v", cresp) - } - if !cresp.Created { - t.Errorf("expected created = %v; got: %v", true, cresp.Created) - } - - // Get template - resp, err := client.GetTemplate().Id("elastic-test").Do() - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatalf("expected response != nil; got: %v", resp) - } - if resp.Template == "" { - t.Errorf("expected template != %q; got: %q", "", resp.Template) - } - - // Delete template - dresp, err := client.DeleteTemplate().Id("elastic-test").Do() - if err != nil { - t.Fatal(err) - } - if dresp == nil { - t.Fatalf("expected response != nil; got: %v", dresp) - } - if !dresp.Found { - t.Fatalf("expected found = %v; got: %v", true, dresp.Found) - } -} - -func TestSearchTemplatesInlineQuery(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Run query with (inline) search template - // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html - tq := NewTemplateQuery(`{"match_{{template}}": {}}`).Var("template", "all") - resp, err := client.Search(testIndexName).Query(tq).Do() - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatalf("expected response != nil; got: %v", resp) - } - if resp.Hits == nil { - t.Fatalf("expected response hits != nil; got: %v", resp.Hits) - } - if resp.Hits.TotalHits != 3 { - t.Fatalf("expected 3 hits; got: %d", resp.Hits.TotalHits) - } -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_test.go b/vendor/gopkg.in/olivere/elastic.v3/search_test.go index d8936af1a..e78ad1bcf 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/search_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -6,10 +6,11 @@ package elastic import ( "encoding/json" - _ "net/http" "reflect" "testing" "time" + + "golang.org/x/net/context" ) func TestSearchMatchAll(t *testing.T) { @@ -22,7 +23,7 @@ func TestSearchMatchAll(t *testing.T) { Query(NewMatchAllQuery()). Size(100). Pretty(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -59,7 +60,7 @@ func TestSearchMatchAllWithRequestCacheDisabled(t *testing.T) { Size(100). Pretty(true). RequestCache(false). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -80,7 +81,7 @@ func BenchmarkSearchMatchAll(b *testing.B) { for n := 0; n < b.N; n++ { // Match all should return all documents all := NewMatchAllQuery() - searchResult, err := client.Search().Index(testIndexName).Query(all).Do() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO()) if err != nil { b.Fatal(err) } @@ -96,13 +97,13 @@ func BenchmarkSearchMatchAll(b *testing.B) { func TestSearchResultTotalHits(t *testing.T) { client := setupTestClientAndCreateIndexAndAddDocs(t) - count, err := client.Count(testIndexName).Do() + count, err := client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } all := NewMatchAllQuery() - searchResult, err := client.Search().Index(testIndexName).Query(all).Do() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -124,7 +125,7 @@ func TestSearchResultEach(t *testing.T) { client := setupTestClientAndCreateIndexAndAddDocs(t) all := NewMatchAllQuery() - searchResult, err := client.Search().Index(testIndexName).Query(all).Do() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -201,22 +202,22 @@ func TestSearchSorting(t *testing.T) { } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -228,7 +229,7 @@ func TestSearchSorting(t *testing.T) { Query(all). Sort("created", false). Timeout("1s"). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -274,22 +275,22 @@ func TestSearchSortingBySorters(t *testing.T) { } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -301,7 +302,7 @@ func TestSearchSortingBySorters(t *testing.T) { Query(all). SortBy(NewFieldSort("created").Desc(), NewScoreSort()). Timeout("1s"). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -328,6 +329,7 @@ func TestSearchSortingBySorters(t *testing.T) { } func TestSearchSpecificFields(t *testing.T) { + // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) client := setupTestClientAndCreateIndex(t) tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} @@ -335,22 +337,22 @@ func TestSearchSpecificFields(t *testing.T) { tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -360,8 +362,8 @@ func TestSearchSpecificFields(t *testing.T) { searchResult, err := client.Search(). Index(testIndexName). Query(all). - Fields("message"). - Do() + StoredFields("message"). + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -408,6 +410,7 @@ func TestSearchSpecificFields(t *testing.T) { func TestSearchExplain(t *testing.T) { client := setupTestClientAndCreateIndex(t) + // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) tweet1 := tweet{ User: "olivere", Retweets: 108, @@ -426,22 +429,22 @@ func TestSearchExplain(t *testing.T) { } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -454,7 +457,7 @@ func TestSearchExplain(t *testing.T) { Explain(true). Timeout("1s"). // Pretty(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -504,22 +507,22 @@ func TestSearchSource(t *testing.T) { } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -534,7 +537,7 @@ func TestSearchSource(t *testing.T) { searchResult, err := client.Search(). Index(testIndexName). Source(source). // sets the JSON request - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -567,22 +570,22 @@ func TestSearchRawString(t *testing.T) { } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -591,7 +594,7 @@ func TestSearchRawString(t *testing.T) { searchResult, err := client.Search(). Index(testIndexName). Query(query). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -623,22 +626,22 @@ func TestSearchSearchSource(t *testing.T) { } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -652,7 +655,7 @@ func TestSearchSearchSource(t *testing.T) { searchResult, err := client.Search(). Index(testIndexName). SearchSource(ss). // sets the SearchSource - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -700,32 +703,32 @@ func TestSearchInnerHitsOnHasChild(t *testing.T) { comment3b := comment{User: "olivere", Comment: "It sure is."} // Add all documents - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do() + _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do() + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do() + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -739,7 +742,7 @@ func TestSearchInnerHitsOnHasChild(t *testing.T) { Index(testIndexName). Query(bq). Pretty(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -838,32 +841,32 @@ func TestSearchInnerHitsOnHasParent(t *testing.T) { comment3b := comment{User: "olivere", Comment: "It sure is."} // Add all documents - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do() + _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do() + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do() + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -877,7 +880,7 @@ func TestSearchInnerHitsOnHasParent(t *testing.T) { Index(testIndexName). Query(bq). Pretty(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -1020,25 +1023,3 @@ func TestSearchBuildURL(t *testing.T) { } } } - -func TestSearchResultHasShards(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - // Match all should return all documents - searchResult, err := client.Search(). - Index(testIndexName). - Query(NewMatchAllQuery()). - Size(100). - Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Shards == nil { - t.Errorf("expected SearchResult.Shards != nil; got nil") - } - if got, want := searchResult.Shards.Failed, 0; got != want { - t.Errorf("expected SearchResult.Shards.Failed = %d; got %d", want, got) - } -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/setup_test.go b/vendor/gopkg.in/olivere/elastic.v3/setup_test.go index 97af2bb27..74513c029 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/setup_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/setup_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -10,6 +10,8 @@ import ( "math/rand" "os" "time" + + "golang.org/x/net/context" ) const ( @@ -23,24 +25,28 @@ const ( }, "mappings":{ "_default_": { - "_timestamp": { - "enabled": true - }, - "_ttl": { + "_all": { "enabled": true } }, "tweet":{ "properties":{ + "user":{ + "type":"keyword" + }, + "message":{ + "type":"text", + "store": true, + "fielddata": true + }, "tags":{ - "type":"string" + "type":"keyword" }, "location":{ "type":"geo_point" }, "suggest_field":{ - "type":"completion", - "payloads":true + "type":"completion" } } }, @@ -52,11 +58,10 @@ const ( "order":{ "properties":{ "article":{ - "type":"string" + "type":"text" }, "manufacturer":{ - "type":"string", - "index" : "not_analyzed" + "type":"keyword" }, "price":{ "type":"float" @@ -66,6 +71,22 @@ const ( "format": "YYYY-MM-dd" } } + }, + "doctype":{ + "properties":{ + "message":{ + "type":"text", + "store": true, + "fielddata": true + } + } + }, + "queries":{ + "properties": { + "query": { + "type": "percolator" + } + } } } } @@ -108,6 +129,16 @@ func (o order) String() string { return fmt.Sprintf("order{Article:%q,Manufacturer:%q,Price:%v,Time:%v}", o.Article, o.Manufacturer, o.Price, o.Time) } +// doctype is required for Percolate tests. +type doctype struct { + Message string `json:"message"` +} + +// queries is required for Percolate tests. +type queries struct { + Query string `json:"query"` +} + func isTravis() bool { return os.Getenv("TRAVIS") != "" } @@ -135,8 +166,8 @@ func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) { t.Fatal(err) } - client.DeleteIndex(testIndexName).Do() - client.DeleteIndex(testIndexName2).Do() + client.DeleteIndex(testIndexName).Do(context.TODO()) + client.DeleteIndex(testIndexName2).Do(context.TODO()) return client } @@ -145,7 +176,7 @@ func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Clien client := setupTestClient(t, options...) // Create index - createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -154,7 +185,7 @@ func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Clien } // Create second index - createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do() + createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -178,19 +209,19 @@ func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFu tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} comment1 := comment{User: "nico", Comment: "You bet."} - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do() + _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -207,14 +238,14 @@ func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFu orders = append(orders, order{Article: "T-Shirt", Manufacturer: "h&m", Price: 19, Time: "2015-06-18"}) for i, o := range orders { id := fmt.Sprintf("%d", i) - _, err = client.Index().Index(testIndexName).Type("order").Id(id).BodyJson(&o).Do() + _, err = client.Index().Index(testIndexName).Type("order").Id(id).BodyJson(&o).Do(context.TODO()) if err != nil { t.Fatal(err) } } // Flush - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/sort.go b/vendor/gopkg.in/olivere/elastic.v3/sort.go index 9dd07de24..1817c191a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/sort.go +++ b/vendor/gopkg.in/olivere/elastic.v3/sort.go @@ -85,30 +85,30 @@ type ScoreSort struct { } // NewScoreSort creates a new ScoreSort. -func NewScoreSort() ScoreSort { - return ScoreSort{ascending: false} // Descending by default! +func NewScoreSort() *ScoreSort { + return &ScoreSort{ascending: false} // Descending by default! } // Order defines whether sorting ascending (default) or descending. -func (s ScoreSort) Order(ascending bool) ScoreSort { +func (s *ScoreSort) Order(ascending bool) *ScoreSort { s.ascending = ascending return s } // Asc sets ascending sort order. -func (s ScoreSort) Asc() ScoreSort { +func (s *ScoreSort) Asc() *ScoreSort { s.ascending = true return s } // Desc sets descending sort order. -func (s ScoreSort) Desc() ScoreSort { +func (s *ScoreSort) Desc() *ScoreSort { s.ascending = false return s } // Source returns the JSON-serializable data. -func (s ScoreSort) Source() (interface{}, error) { +func (s *ScoreSort) Source() (interface{}, error) { source := make(map[string]interface{}) x := make(map[string]interface{}) source["_score"] = x @@ -134,33 +134,33 @@ type FieldSort struct { } // NewFieldSort creates a new FieldSort. -func NewFieldSort(fieldName string) FieldSort { - return FieldSort{ +func NewFieldSort(fieldName string) *FieldSort { + return &FieldSort{ fieldName: fieldName, ascending: true, } } // FieldName specifies the name of the field to be used for sorting. -func (s FieldSort) FieldName(fieldName string) FieldSort { +func (s *FieldSort) FieldName(fieldName string) *FieldSort { s.fieldName = fieldName return s } // Order defines whether sorting ascending (default) or descending. -func (s FieldSort) Order(ascending bool) FieldSort { +func (s *FieldSort) Order(ascending bool) *FieldSort { s.ascending = ascending return s } // Asc sets ascending sort order. -func (s FieldSort) Asc() FieldSort { +func (s *FieldSort) Asc() *FieldSort { s.ascending = true return s } // Desc sets descending sort order. -func (s FieldSort) Desc() FieldSort { +func (s *FieldSort) Desc() *FieldSort { s.ascending = false return s } @@ -168,21 +168,21 @@ func (s FieldSort) Desc() FieldSort { // Missing sets the value to be used when a field is missing in a document. // You can also use "_last" or "_first" to sort missing last or first // respectively. -func (s FieldSort) Missing(missing interface{}) FieldSort { +func (s *FieldSort) Missing(missing interface{}) *FieldSort { s.missing = missing return s } // IgnoreUnmapped specifies what happens if the field does not exist in // the index. Set it to true to ignore, or set it to false to not ignore (default). -func (s FieldSort) IgnoreUnmapped(ignoreUnmapped bool) FieldSort { +func (s *FieldSort) IgnoreUnmapped(ignoreUnmapped bool) *FieldSort { s.ignoreUnmapped = &ignoreUnmapped return s } // UnmappedType sets the type to use when the current field is not mapped // in an index. -func (s FieldSort) UnmappedType(typ string) FieldSort { +func (s *FieldSort) UnmappedType(typ string) *FieldSort { s.unmappedType = &typ return s } @@ -190,27 +190,27 @@ func (s FieldSort) UnmappedType(typ string) FieldSort { // SortMode specifies what values to pick in case a document contains // multiple values for the targeted sort field. Possible values are: // min, max, sum, and avg. -func (s FieldSort) SortMode(sortMode string) FieldSort { +func (s *FieldSort) SortMode(sortMode string) *FieldSort { s.sortMode = &sortMode return s } // NestedFilter sets a filter that nested objects should match with // in order to be taken into account for sorting. -func (s FieldSort) NestedFilter(nestedFilter Query) FieldSort { +func (s *FieldSort) NestedFilter(nestedFilter Query) *FieldSort { s.nestedFilter = nestedFilter return s } // NestedPath is used if sorting occurs on a field that is inside a // nested object. -func (s FieldSort) NestedPath(nestedPath string) FieldSort { +func (s *FieldSort) NestedPath(nestedPath string) *FieldSort { s.nestedPath = &nestedPath return s } // Source returns the JSON-serializable data. -func (s FieldSort) Source() (interface{}, error) { +func (s *FieldSort) Source() (interface{}, error) { source := make(map[string]interface{}) x := make(map[string]interface{}) source[s.fieldName] = x @@ -262,8 +262,8 @@ type GeoDistanceSort struct { } // NewGeoDistanceSort creates a new sorter for geo distances. -func NewGeoDistanceSort(fieldName string) GeoDistanceSort { - return GeoDistanceSort{ +func NewGeoDistanceSort(fieldName string) *GeoDistanceSort { + return &GeoDistanceSort{ fieldName: fieldName, points: make([]*GeoPoint, 0), geohashes: make([]string, 0), @@ -272,43 +272,43 @@ func NewGeoDistanceSort(fieldName string) GeoDistanceSort { } // FieldName specifies the name of the (geo) field to use for sorting. -func (s GeoDistanceSort) FieldName(fieldName string) GeoDistanceSort { +func (s *GeoDistanceSort) FieldName(fieldName string) *GeoDistanceSort { s.fieldName = fieldName return s } // Order defines whether sorting ascending (default) or descending. -func (s GeoDistanceSort) Order(ascending bool) GeoDistanceSort { +func (s *GeoDistanceSort) Order(ascending bool) *GeoDistanceSort { s.ascending = ascending return s } // Asc sets ascending sort order. -func (s GeoDistanceSort) Asc() GeoDistanceSort { +func (s *GeoDistanceSort) Asc() *GeoDistanceSort { s.ascending = true return s } // Desc sets descending sort order. -func (s GeoDistanceSort) Desc() GeoDistanceSort { +func (s *GeoDistanceSort) Desc() *GeoDistanceSort { s.ascending = false return s } // Point specifies a point to create the range distance aggregations from. -func (s GeoDistanceSort) Point(lat, lon float64) GeoDistanceSort { +func (s *GeoDistanceSort) Point(lat, lon float64) *GeoDistanceSort { s.points = append(s.points, GeoPointFromLatLon(lat, lon)) return s } // Points specifies the geo point(s) to create the range distance aggregations from. -func (s GeoDistanceSort) Points(points ...*GeoPoint) GeoDistanceSort { +func (s *GeoDistanceSort) Points(points ...*GeoPoint) *GeoDistanceSort { s.points = append(s.points, points...) return s } // GeoHashes specifies the geo point to create the range distance aggregations from. -func (s GeoDistanceSort) GeoHashes(geohashes ...string) GeoDistanceSort { +func (s *GeoDistanceSort) GeoHashes(geohashes ...string) *GeoDistanceSort { s.geohashes = append(s.geohashes, geohashes...) return s } @@ -316,7 +316,7 @@ func (s GeoDistanceSort) GeoHashes(geohashes ...string) GeoDistanceSort { // GeoDistance represents how to compute the distance. // It can be sloppy_arc (default), arc, or plane. // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. -func (s GeoDistanceSort) GeoDistance(geoDistance string) GeoDistanceSort { +func (s *GeoDistanceSort) GeoDistance(geoDistance string) *GeoDistanceSort { s.geoDistance = &geoDistance return s } @@ -324,7 +324,7 @@ func (s GeoDistanceSort) GeoDistance(geoDistance string) GeoDistanceSort { // Unit specifies the distance unit to use. It defaults to km. // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#distance-units // for details. -func (s GeoDistanceSort) Unit(unit string) GeoDistanceSort { +func (s *GeoDistanceSort) Unit(unit string) *GeoDistanceSort { s.unit = unit return s } @@ -332,27 +332,27 @@ func (s GeoDistanceSort) Unit(unit string) GeoDistanceSort { // SortMode specifies what values to pick in case a document contains // multiple values for the targeted sort field. Possible values are: // min, max, sum, and avg. -func (s GeoDistanceSort) SortMode(sortMode string) GeoDistanceSort { +func (s *GeoDistanceSort) SortMode(sortMode string) *GeoDistanceSort { s.sortMode = &sortMode return s } // NestedFilter sets a filter that nested objects should match with // in order to be taken into account for sorting. -func (s GeoDistanceSort) NestedFilter(nestedFilter Query) GeoDistanceSort { +func (s *GeoDistanceSort) NestedFilter(nestedFilter Query) *GeoDistanceSort { s.nestedFilter = nestedFilter return s } // NestedPath is used if sorting occurs on a field that is inside a // nested object. -func (s GeoDistanceSort) NestedPath(nestedPath string) GeoDistanceSort { +func (s *GeoDistanceSort) NestedPath(nestedPath string) *GeoDistanceSort { s.nestedPath = &nestedPath return s } // Source returns the JSON-serializable data. -func (s GeoDistanceSort) Source() (interface{}, error) { +func (s *GeoDistanceSort) Source() (interface{}, error) { source := make(map[string]interface{}) x := make(map[string]interface{}) source["_geo_distance"] = x @@ -410,8 +410,8 @@ type ScriptSort struct { // NewScriptSort creates and initializes a new ScriptSort. // You must provide a script and a type, e.g. "string" or "number". -func NewScriptSort(script *Script, typ string) ScriptSort { - return ScriptSort{ +func NewScriptSort(script *Script, typ string) *ScriptSort { + return &ScriptSort{ script: script, typ: typ, ascending: true, @@ -419,25 +419,25 @@ func NewScriptSort(script *Script, typ string) ScriptSort { } // Type sets the script type, which can be either "string" or "number". -func (s ScriptSort) Type(typ string) ScriptSort { +func (s *ScriptSort) Type(typ string) *ScriptSort { s.typ = typ return s } // Order defines whether sorting ascending (default) or descending. -func (s ScriptSort) Order(ascending bool) ScriptSort { +func (s *ScriptSort) Order(ascending bool) *ScriptSort { s.ascending = ascending return s } // Asc sets ascending sort order. -func (s ScriptSort) Asc() ScriptSort { +func (s *ScriptSort) Asc() *ScriptSort { s.ascending = true return s } // Desc sets descending sort order. -func (s ScriptSort) Desc() ScriptSort { +func (s *ScriptSort) Desc() *ScriptSort { s.ascending = false return s } @@ -445,27 +445,27 @@ func (s ScriptSort) Desc() ScriptSort { // SortMode specifies what values to pick in case a document contains // multiple values for the targeted sort field. Possible values are: // min or max. -func (s ScriptSort) SortMode(sortMode string) ScriptSort { +func (s *ScriptSort) SortMode(sortMode string) *ScriptSort { s.sortMode = &sortMode return s } // NestedFilter sets a filter that nested objects should match with // in order to be taken into account for sorting. -func (s ScriptSort) NestedFilter(nestedFilter Query) ScriptSort { +func (s *ScriptSort) NestedFilter(nestedFilter Query) *ScriptSort { s.nestedFilter = nestedFilter return s } // NestedPath is used if sorting occurs on a field that is inside a // nested object. -func (s ScriptSort) NestedPath(nestedPath string) ScriptSort { +func (s *ScriptSort) NestedPath(nestedPath string) *ScriptSort { s.nestedPath = &nestedPath return s } // Source returns the JSON-serializable data. -func (s ScriptSort) Source() (interface{}, error) { +func (s *ScriptSort) Source() (interface{}, error) { if s.script == nil { return nil, errors.New("ScriptSort expected a script") } diff --git a/vendor/gopkg.in/olivere/elastic.v3/sort_test.go b/vendor/gopkg.in/olivere/elastic.v3/sort_test.go index d94fe183a..68ab355d1 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/sort_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/sort_test.go @@ -26,23 +26,6 @@ func TestSortInfo(t *testing.T) { } } -func TestSortByDoc(t *testing.T) { - builder := SortByDoc{} - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `"_doc"` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - func TestSortInfoComplex(t *testing.T) { builder := SortInfo{ Field: "price", diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggest.go b/vendor/gopkg.in/olivere/elastic.v3/suggest.go index 5459778de..6dee75762 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggest.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggest.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -12,79 +12,77 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // SuggestService returns suggestions for text. +// See https://www.elastic.co/guide/en/elasticsearch/reference/master/search-suggesters.html. type SuggestService struct { client *Client pretty bool routing string preference string - indices []string + index []string suggesters []Suggester } +// NewSuggestService creates a new instance of SuggestService. func NewSuggestService(client *Client) *SuggestService { builder := &SuggestService{ - client: client, - indices: make([]string, 0), - suggesters: make([]Suggester, 0), + client: client, } return builder } -func (s *SuggestService) Index(indices ...string) *SuggestService { - s.indices = append(s.indices, indices...) +// Index adds one or more indices to use for the suggestion request. +func (s *SuggestService) Index(index ...string) *SuggestService { + s.index = append(s.index, index...) return s } +// Pretty asks Elasticsearch to return indented JSON. func (s *SuggestService) Pretty(pretty bool) *SuggestService { s.pretty = pretty return s } +// Routing specifies the routing value. func (s *SuggestService) Routing(routing string) *SuggestService { s.routing = routing return s } +// Preference specifies the node or shard the operation should be +// performed on (default: random). func (s *SuggestService) Preference(preference string) *SuggestService { s.preference = preference return s } +// Suggester adds a suggester to the request. func (s *SuggestService) Suggester(suggester Suggester) *SuggestService { s.suggesters = append(s.suggesters, suggester) return s } -func (s *SuggestService) Do() (SuggestResult, error) { - return s.DoC(nil) -} - -func (s *SuggestService) DoC(ctx context.Context) (SuggestResult, error) { - // Build url - path := "/" +// buildURL builds the URL for the operation. +func (s *SuggestService) buildURL() (string, url.Values, error) { + var err error + var path string - // Indices part - var indexPart []string - for _, index := range s.indices { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": index, + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_suggest", map[string]string{ + "index": strings.Join(s.index, ","), }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) + } else { + path = "/_suggest" + } + if err != nil { + return "", url.Values{}, err } - path += strings.Join(indexPart, ",") - - // Suggest - path += "/_suggest" - // Parameters - params := make(url.Values) + // Add query string parameters + params := url.Values{} if s.pretty { params.Set("pretty", fmt.Sprintf("%v", s.pretty)) } @@ -94,6 +92,15 @@ func (s *SuggestService) DoC(ctx context.Context) (SuggestResult, error) { if s.preference != "" { params.Set("preference", s.preference) } + return path, params, nil +} + +// Do executes the request. +func (s *SuggestService) Do(ctx context.Context) (SuggestResult, error) { + path, params, err := s.buildURL() + if err != nil { + return nil, err + } // Set body body := make(map[string]interface{}) @@ -106,7 +113,7 @@ func (s *SuggestService) DoC(ctx context.Context) (SuggestResult, error) { } // Get response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, body) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return nil, err } @@ -132,8 +139,10 @@ func (s *SuggestService) DoC(ctx context.Context) (SuggestResult, error) { return ret, nil } +// SuggestResult is the outcome of SuggestService.Do. type SuggestResult map[string][]Suggestion +// Suggestion is a single suggester outcome. type Suggestion struct { Text string `json:"text"` Offset int `json:"offset"` diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggest_field.go b/vendor/gopkg.in/olivere/elastic.v3/suggest_field.go index 4738d9910..5cfa39371 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggest_field.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggest_field.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -13,14 +13,15 @@ import ( // http://www.elasticsearch.org/blog/you-complete-me/. type SuggestField struct { inputs []string - output *string - payload interface{} weight int contextQueries []SuggesterContextQuery } -func NewSuggestField() *SuggestField { - return &SuggestField{weight: -1} +func NewSuggestField(input ...string) *SuggestField { + return &SuggestField{ + inputs: input, + weight: -1, + } } func (f *SuggestField) Input(input ...string) *SuggestField { @@ -31,16 +32,6 @@ func (f *SuggestField) Input(input ...string) *SuggestField { return f } -func (f *SuggestField) Output(output string) *SuggestField { - f.output = &output - return f -} - -func (f *SuggestField) Payload(payload interface{}) *SuggestField { - f.payload = payload - return f -} - func (f *SuggestField) Weight(weight int) *SuggestField { f.weight = weight return f @@ -64,14 +55,6 @@ func (f *SuggestField) MarshalJSON() ([]byte, error) { } } - if f.output != nil { - source["output"] = *f.output - } - - if f.payload != nil { - source["payload"] = f.payload - } - if f.weight >= 0 { source["weight"] = f.weight } diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggest_field_test.go b/vendor/gopkg.in/olivere/elastic.v3/suggest_field_test.go index 1c0059dd9..c57c71755 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggest_field_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggest_field_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -12,7 +12,6 @@ import ( func TestSuggestField(t *testing.T) { field := NewSuggestField(). Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch"). - Output("Golang and Elasticsearch: An introduction."). Weight(1). ContextQuery( NewSuggesterCategoryMapping("color").FieldName("color_field").DefaultValues("red", "green", "blue"), @@ -23,7 +22,7 @@ func TestSuggestField(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"context":[{"color":{"default":["red","green","blue"],"path":"color_field","type":"category"}},{"location":{"default":{"lat":52.516275,"lon":13.377704},"neighbors":true,"precision":["5m"],"type":"geo"}}],"input":["Welcome to Golang and Elasticsearch.","Golang and Elasticsearch"],"output":"Golang and Elasticsearch: An introduction.","weight":1}` + expected := `{"context":[{"color":{"default":["red","green","blue"],"path":"color_field","type":"category"}},{"location":{"default":{"lat":52.516275,"lon":13.377704},"neighbors":true,"precision":["5m"],"type":"geo"}}],"input":["Welcome to Golang and Elasticsearch.","Golang and Elasticsearch"],"weight":1}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggest_test.go b/vendor/gopkg.in/olivere/elastic.v3/suggest_test.go index 50a4a0952..f43922132 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggest_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggest_test.go @@ -1,16 +1,51 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic import ( - _ "net/http" "testing" + + "golang.org/x/net/context" ) +func TestSuggestBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_suggest", + }, + { + []string{"index1"}, + "/index1/_suggest", + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_suggest", + }, + } + + for i, test := range tests { + path, _, err := client.Suggest().Index(test.Indices...).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + func TestSuggestService(t *testing.T) { client := setupTestClientAndCreateIndex(t) + // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) tweet1 := tweet{ User: "olivere", @@ -19,7 +54,6 @@ func TestSuggestService(t *testing.T) { Location: "48.1333,11.5667", // lat,lon Suggest: NewSuggestField(). Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch"). - Output("Golang and Elasticsearch: An introduction."). Weight(0), } tweet2 := tweet{ @@ -29,7 +63,6 @@ func TestSuggestService(t *testing.T) { Location: "48.1189,11.4289", // lat,lon Suggest: NewSuggestField(). Input("Another unrelated topic.", "Golang topic."). - Output("About Golang."). Weight(1), } tweet3 := tweet{ @@ -38,27 +71,26 @@ func TestSuggestService(t *testing.T) { Tags: []string{"sports", "cycling"}, Location: "47.7167,11.7167", // lat,lon Suggest: NewSuggestField(). - Input("Cycling is fun."). - Output("Cycling is a fun sport."), + Input("Cycling is fun."), } // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) if err != nil { t.Fatal(err) } - _, err = client.Flush().Index(testIndexName).Do() + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -76,7 +108,7 @@ func TestSuggestService(t *testing.T) { Suggester(termSuggester). Suggester(phraseSuggester). Suggester(completionSuggester). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -122,10 +154,10 @@ func TestSuggestService(t *testing.T) { if len(completionSuggestions[0].Options) != 2 { t.Errorf("expected 2 suggestion options; got %d", len(completionSuggestions[0].Options)) } - if completionSuggestions[0].Options[0].Text != "About Golang." { - t.Errorf("expected Suggest[%s][0].Options[0].Text == %q; got %q", completionSuggesterName, "About Golang.", completionSuggestions[0].Options[0].Text) + if have, want := completionSuggestions[0].Options[0].Text, "Golang topic."; have != want { + t.Errorf("expected Suggest[%s][0].Options[0].Text == %q; got %q", completionSuggesterName, want, have) } - if completionSuggestions[0].Options[1].Text != "Golang and Elasticsearch: An introduction." { - t.Errorf("expected Suggest[%s][0].Options[1].Text == %q; got %q", completionSuggesterName, "Golang and Elasticsearch: An introduction.", completionSuggestions[0].Options[1].Text) + if have, want := completionSuggestions[0].Options[1].Text, "Golang and Elasticsearch"; have != want { + t.Errorf("expected Suggest[%s][0].Options[1].Text == %q; got %q", completionSuggesterName, want, have) } } diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester.go b/vendor/gopkg.in/olivere/elastic.v3/suggester.go index c342b10d3..f7dc48f90 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_completion.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion.go index b7959f1f8..b85953209 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_completion.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -76,7 +76,7 @@ type completionSuggesterRequest struct { Completion interface{} `json:"completion"` } -// Source creates the JSON structure for the completion suggester. +// Creates the source for the completion suggester. func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) { cs := &completionSuggesterRequest{} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go index 7edfc09b2..871688149 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy_test.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy_test.go index 29fcba55f..aae1db11b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_test.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_test.go index e01b73482..6b7c3f420 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_completion_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_context.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_context.go index 0903f2171..caf477669 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_context.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_context.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category.go index 4b8e43f88..67a82edc3 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category_test.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category_test.go index 7ca045801..3a013d642 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_category_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo.go index 254e133c4..a895855cc 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -19,9 +19,7 @@ type SuggesterGeoMapping struct { // NewSuggesterGeoMapping creates a new SuggesterGeoMapping. func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping { return &SuggesterGeoMapping{ - name: name, - defaultLocations: make([]*GeoPoint, 0), - precision: make([]string, 0), + name: name, } } diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo_test.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo_test.go index 331276dab..b1ab2f495 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_context_geo_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase.go index fa19ccacb..989e40482 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase.go @@ -1,11 +1,13 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic +// PhraseSuggester provides an API to access word alternatives +// on a per token basis within a certain string distance. // For more details, see -// http://www.elasticsearch.org/guide/reference/api/search/phrase-suggest/ +// https://www.elastic.co/guide/en/elasticsearch/reference/master/search-suggesters-phrase.html. type PhraseSuggester struct { Suggester name string @@ -33,12 +35,11 @@ type PhraseSuggester struct { collatePrune *bool } -// Creates a new phrase suggester. +// NewPhraseSuggester creates a new PhraseSuggester. func NewPhraseSuggester(name string) *PhraseSuggester { return &PhraseSuggester{ - name: name, - contextQueries: make([]SuggesterContextQuery, 0), - collateParams: make(map[string]interface{}), + name: name, + collateParams: make(map[string]interface{}), } } @@ -173,7 +174,7 @@ func (q *PhraseSuggester) CollatePrune(collatePrune bool) *PhraseSuggester { return q } -// simplePhraseSuggesterRequest is necessary because the order in which +// phraseSuggesterRequest is necessary because the order in which // the JSON elements are routed to Elasticsearch is relevant. // We got into trouble when using plain maps because the text element // needs to go before the simple_phrase element. @@ -182,7 +183,7 @@ type phraseSuggesterRequest struct { Phrase interface{} `json:"phrase"` } -// Creates the source for the phrase suggester. +// Source generates the source for the phrase suggester. func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) { ps := &phraseSuggesterRequest{} diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase_test.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase_test.go index 1eb46ce44..fbcc676fe 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_phrase_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_term.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_term.go index eab7c8147..fb5987306 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_term.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_term.go @@ -1,11 +1,12 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic +// TermSuggester suggests terms based on edit distance. // For more details, see -// http://www.elasticsearch.org/guide/reference/api/search/term-suggest/ +// https://www.elastic.co/guide/en/elasticsearch/reference/master/search-suggesters-term.html. type TermSuggester struct { Suggester name string @@ -29,11 +30,10 @@ type TermSuggester struct { minDocFreq *float64 } -// Creates a new term suggester. +// NewTermSuggester creates a new TermSuggester. func NewTermSuggester(name string) *TermSuggester { return &TermSuggester{ - name: name, - contextQueries: make([]SuggesterContextQuery, 0), + name: name, } } @@ -135,7 +135,7 @@ type termSuggesterRequest struct { Term interface{} `json:"term"` } -// Creates the source for the term suggester. +// Source generates the source for the term suggester. func (q *TermSuggester) Source(includeName bool) (interface{}, error) { // "suggest" : { // "my-suggest-1" : { @@ -180,13 +180,13 @@ func (q *TermSuggester) Source(includeName bool) (interface{}, error) { } suggester["context"] = src default: - var ctxq []interface{} - for _, query := range q.contextQueries { + ctxq := make([]interface{}, len(q.contextQueries)) + for i, query := range q.contextQueries { src, err := query.Source() if err != nil { return nil, err } - ctxq = append(ctxq, src) + ctxq[i] = src } suggester["context"] = ctxq } diff --git a/vendor/gopkg.in/olivere/elastic.v3/suggester_term_test.go b/vendor/gopkg.in/olivere/elastic.v3/suggester_term_test.go index 869049890..bb10f03e2 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/suggester_term_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/suggester_term_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. diff --git a/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel.go b/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel.go index d8e04b8f7..56a26f894 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel.go +++ b/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel.go @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // TasksCancelService can cancel long-running tasks. @@ -119,12 +119,7 @@ func (s *TasksCancelService) Validate() error { } // Do executes the operation. -func (s *TasksCancelService) Do() (*TasksListResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *TasksCancelService) DoC(ctx context.Context) (*TasksListResponse, error) { +func (s *TasksCancelService) Do(ctx context.Context) (*TasksListResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -137,7 +132,7 @@ func (s *TasksCancelService) DoC(ctx context.Context) (*TasksListResponse, error } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, nil) + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel_test.go b/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel_test.go index 5f3f1b3c9..c9d863394 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/tasks_cancel_test.go @@ -4,9 +4,7 @@ package elastic -import ( - "testing" -) +import "testing" func TestTasksCancelBuildURL(t *testing.T) { client := setupTestClient(t) @@ -42,7 +40,7 @@ func TestTasksCancel(t *testing.T) { if esversion < "2.3.0" { t.Skipf("Elasticsearch %v does not support Tasks Management API yet", esversion) } - res, err := client.TasksCancel("1").Do() + res, err := client.TasksCancel("1").Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/tasks_list.go b/vendor/gopkg.in/olivere/elastic.v3/tasks_list.go index c64f9026f..685c031ea 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/tasks_list.go +++ b/vendor/gopkg.in/olivere/elastic.v3/tasks_list.go @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // TasksListService retrieves the list of currently executing tasks @@ -144,12 +144,7 @@ func (s *TasksListService) Validate() error { } // Do executes the operation. -func (s *TasksListService) Do() (*TasksListResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *TasksListService) DoC(ctx context.Context) (*TasksListResponse, error) { +func (s *TasksListService) Do(ctx context.Context) (*TasksListResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -162,7 +157,7 @@ func (s *TasksListService) DoC(ctx context.Context) (*TasksListResponse, error) } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, nil) + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/tasks_list_test.go b/vendor/gopkg.in/olivere/elastic.v3/tasks_list_test.go index b213cb43a..9ee80545e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/tasks_list_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/tasks_list_test.go @@ -4,7 +4,11 @@ package elastic -import "testing" +import ( + "testing" + + "golang.org/x/net/context" +) func TestTasksListBuildURL(t *testing.T) { client := setupTestClient(t) @@ -49,7 +53,7 @@ func TestTasksList(t *testing.T) { t.Skipf("Elasticsearch %v does not support Tasks Management API yet", esversion) } - res, err := client.TasksList().Pretty(true).Do() + res, err := client.TasksList().Pretty(true).Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/termvectors.go b/vendor/gopkg.in/olivere/elastic.v3/termvectors.go index 98888996f..244169556 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/termvectors.go +++ b/vendor/gopkg.in/olivere/elastic.v3/termvectors.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // TermvectorsService returns information and statistics on terms in the @@ -278,12 +278,7 @@ func (s *TermvectorsService) Validate() error { } // Do executes the operation. -func (s *TermvectorsService) Do() (*TermvectorsResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *TermvectorsService) DoC(ctx context.Context) (*TermvectorsResponse, error) { +func (s *TermvectorsService) Do(ctx context.Context) (*TermvectorsResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -322,7 +317,7 @@ func (s *TermvectorsService) DoC(ctx context.Context) (*TermvectorsResponse, err } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "GET", path, params, body) + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/termvectors_test.go b/vendor/gopkg.in/olivere/elastic.v3/termvectors_test.go index e487a24a4..fb0ede146 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/termvectors_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/termvectors_test.go @@ -1,10 +1,11 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic import ( + "context" "testing" "time" ) @@ -58,8 +59,8 @@ func TestTermVectorsWithId(t *testing.T) { Type("tweet"). Id("1"). BodyJson(&tweet1). - Refresh(true). - Do() + Refresh("true"). + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -74,7 +75,7 @@ func TestTermVectorsWithId(t *testing.T) { Fields(field). FieldStatistics(true). TermStatistics(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -84,9 +85,6 @@ func TestTermVectorsWithId(t *testing.T) { if !result.Found { t.Errorf("expected found to be %v; got: %v", true, result.Found) } - if result.Took <= 0 { - t.Errorf("expected took in millis > 0; got: %v", result.Took) - } } func TestTermVectorsWithDoc(t *testing.T) { @@ -111,7 +109,7 @@ func TestTermVectorsWithDoc(t *testing.T) { PerFieldAnalyzer(perFieldAnalyzer). FieldStatistics(true). TermStatistics(true). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -121,9 +119,6 @@ func TestTermVectorsWithDoc(t *testing.T) { if !result.Found { t.Errorf("expected found to be %v; got: %v", true, result.Found) } - if result.Took <= 0 { - t.Errorf("expected took in millis > 0; got: %v", result.Took) - } } func TestTermVectorsWithFilter(t *testing.T) { @@ -149,7 +144,7 @@ func TestTermVectorsWithFilter(t *testing.T) { FieldStatistics(true). TermStatistics(true). Filter(NewTermvectorsFilterSettings().MinTermFreq(1)). - Do() + Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -159,7 +154,4 @@ func TestTermVectorsWithFilter(t *testing.T) { if !result.Found { t.Errorf("expected found to be %v; got: %v", true, result.Found) } - if result.Took <= 0 { - t.Errorf("expected took in millis > 0; got: %v", result.Took) - } } diff --git a/vendor/gopkg.in/olivere/elastic.v3/update.go b/vendor/gopkg.in/olivere/elastic.v3/update.go index 418a29c17..c7bb2833e 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/update.go +++ b/vendor/gopkg.in/olivere/elastic.v3/update.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,34 +11,33 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // UpdateService updates a document in Elasticsearch. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-update.html +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-update.html // for details. type UpdateService struct { - client *Client - index string - typ string - id string - routing string - parent string - script *Script - fields []string - version *int64 - versionType string - retryOnConflict *int - refresh *bool - replicationType string - consistencyLevel string - upsert interface{} - scriptedUpsert *bool - docAsUpsert *bool - detectNoop *bool - doc interface{} - timeout string - pretty bool + client *Client + index string + typ string + id string + routing string + parent string + script *Script + fields []string + version *int64 + versionType string + retryOnConflict *int + refresh string + waitForActiveShards string + upsert interface{} + scriptedUpsert *bool + docAsUpsert *bool + detectNoop *bool + doc interface{} + timeout string + pretty bool } // NewUpdateService creates the service to update documents in Elasticsearch. @@ -113,21 +112,17 @@ func (b *UpdateService) VersionType(versionType string) *UpdateService { } // Refresh the index after performing the update. -func (b *UpdateService) Refresh(refresh bool) *UpdateService { - b.refresh = &refresh +func (b *UpdateService) Refresh(refresh string) *UpdateService { + b.refresh = refresh return b } -// ReplicationType is one of "sync" or "async". -func (b *UpdateService) ReplicationType(replicationType string) *UpdateService { - b.replicationType = replicationType - return b -} - -// ConsistencyLevel is one of "one", "quorum", or "all". -// It sets the write consistency setting for the update operation. -func (b *UpdateService) ConsistencyLevel(consistencyLevel string) *UpdateService { - b.consistencyLevel = consistencyLevel +// WaitForActiveShards sets the number of shard copies that must be active before +// proceeding with the update operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than +// or equal to the total number of copies for the shard (number of replicas + 1). +func (b *UpdateService) WaitForActiveShards(waitForActiveShards string) *UpdateService { + b.waitForActiveShards = waitForActiveShards return b } @@ -205,14 +200,11 @@ func (b *UpdateService) url() (string, url.Values, error) { if b.timeout != "" { params.Set("timeout", b.timeout) } - if b.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *b.refresh)) + if b.refresh != "" { + params.Set("refresh", b.refresh) } - if b.replicationType != "" { - params.Set("replication", b.replicationType) - } - if b.consistencyLevel != "" { - params.Set("consistency", b.consistencyLevel) + if b.waitForActiveShards != "" { + params.Set("wait_for_active_shards", b.waitForActiveShards) } if len(b.fields) > 0 { params.Set("fields", strings.Join(b.fields, ",")) @@ -264,12 +256,7 @@ func (b *UpdateService) body() (interface{}, error) { } // Do executes the update operation. -func (b *UpdateService) Do() (*UpdateResponse, error) { - return b.DoC(nil) -} - -// DoC executes the update operation. -func (b *UpdateService) DoC(ctx context.Context) (*UpdateResponse, error) { +func (b *UpdateService) Do(ctx context.Context) (*UpdateResponse, error) { path, params, err := b.url() if err != nil { return nil, err @@ -282,7 +269,7 @@ func (b *UpdateService) DoC(ctx context.Context) (*UpdateResponse, error) { } // Get response - res, err := b.client.PerformRequestC(ctx, "POST", path, params, body) + res, err := b.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v3/update_by_query.go b/vendor/gopkg.in/olivere/elastic.v3/update_by_query.go index ac012f479..a4e7bf25b 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/update_by_query.go +++ b/vendor/gopkg.in/olivere/elastic.v3/update_by_query.go @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) // UpdateByQueryService is documented at https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html. @@ -20,6 +20,9 @@ type UpdateByQueryService struct { pretty bool index []string typ []string + script *Script + query Query + body interface{} xSource []string xSourceExclude []string xSourceInclude []string @@ -27,21 +30,22 @@ type UpdateByQueryService struct { analyzeWildcard *bool analyzer string conflicts string - consistency string defaultOperator string + docvalueFields []string df string expandWildcards string explain *bool fielddataFields []string - fields []string from *int ignoreUnavailable *bool lenient *bool lowercaseExpandedTerms *bool + pipeline string preference string q string - refresh *bool + refresh string requestCache *bool + requestsPerSecond *int routing []string scroll string scrollSize *int @@ -50,6 +54,7 @@ type UpdateByQueryService struct { size *int sort []string stats []string + storedFields []string suggestField string suggestMode string suggestSize *int @@ -59,28 +64,24 @@ type UpdateByQueryService struct { trackScores *bool version *bool versionType *bool + waitForActiveShards string waitForCompletion *bool - script *Script - query Query - bodyJson interface{} - bodyString string } // NewUpdateByQueryService creates a new UpdateByQueryService. func NewUpdateByQueryService(client *Client) *UpdateByQueryService { return &UpdateByQueryService{ - client: client, - xSource: make([]string, 0), - xSourceExclude: make([]string, 0), - xSourceInclude: make([]string, 0), - fielddataFields: make([]string, 0), - fields: make([]string, 0), - routing: make([]string, 0), - sort: make([]string, 0), - stats: make([]string, 0), + client: client, } } +// Index is a list of index names to search; use `_all` or empty string to +// perform the operation on all indices. +func (s *UpdateByQueryService) Index(index ...string) *UpdateByQueryService { + s.index = append(s.index, index...) + return s +} + // Type is a list of document types to search; leave empty to perform // the operation on all types. func (s *UpdateByQueryService) Type(typ ...string) *UpdateByQueryService { @@ -88,10 +89,22 @@ func (s *UpdateByQueryService) Type(typ ...string) *UpdateByQueryService { return s } -// Index is a list of index names to search; use `_all` or empty string to -// perform the operation on all indices. -func (s *UpdateByQueryService) Index(index ...string) *UpdateByQueryService { - s.index = append(s.index, index...) +// Pretty indicates that the JSON response be indented and human readable. +func (s *UpdateByQueryService) Pretty(pretty bool) *UpdateByQueryService { + s.pretty = pretty + return s +} + +// Script sets an update script. +func (s *UpdateByQueryService) Script(script *Script) *UpdateByQueryService { + s.script = script + return s +} + +// Body specifies the body of the request. It overrides data being specified via +// SearchService or Script. +func (s *UpdateByQueryService) Body(body string) *UpdateByQueryService { + s.body = body return s } @@ -156,25 +169,24 @@ func (s *UpdateByQueryService) ProceedOnVersionConflict() *UpdateByQueryService return s } -// Consistency sets an explicit write consistency setting for the operation. -// Possible values are "one", "quorum", and "all". -func (s *UpdateByQueryService) Consistency(consistency string) *UpdateByQueryService { - s.consistency = consistency - return s -} - // DefaultOperator is the default operator for query string query (AND or OR). func (s *UpdateByQueryService) DefaultOperator(defaultOperator string) *UpdateByQueryService { s.defaultOperator = defaultOperator return s } -// Df specifies the field to use as default where no field prefix is given in the query string. -func (s *UpdateByQueryService) Df(df string) *UpdateByQueryService { +// DF specifies the field to use as default where no field prefix is given in the query string. +func (s *UpdateByQueryService) DF(df string) *UpdateByQueryService { s.df = df return s } +// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit. +func (s *UpdateByQueryService) DocvalueFields(docvalueFields ...string) *UpdateByQueryService { + s.docvalueFields = docvalueFields + return s +} + // ExpandWildcards indicates whether to expand wildcard expression to // concrete indices that are open, closed or both. func (s *UpdateByQueryService) ExpandWildcards(expandWildcards string) *UpdateByQueryService { @@ -196,12 +208,6 @@ func (s *UpdateByQueryService) FielddataFields(fielddataFields ...string) *Updat return s } -// Fields is a list of fields to return as part of a hit. -func (s *UpdateByQueryService) Fields(fields ...string) *UpdateByQueryService { - s.fields = append(s.fields, fields...) - return s -} - // From is the starting offset (default: 0). func (s *UpdateByQueryService) From(from int) *UpdateByQueryService { s.from = &from @@ -228,6 +234,12 @@ func (s *UpdateByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms boo return s } +// Pipeline specifies the ingest pipeline to set on index requests made by this action (default: none). +func (s *UpdateByQueryService) Pipeline(pipeline string) *UpdateByQueryService { + s.pipeline = pipeline + return s +} + // Preference specifies the node or shard the operation should be performed on // (default: random). func (s *UpdateByQueryService) Preference(preference string) *UpdateByQueryService { @@ -235,15 +247,21 @@ func (s *UpdateByQueryService) Preference(preference string) *UpdateByQueryServi return s } -// Query in the Lucene query string syntax. +// Q specifies the query in the Lucene query string syntax. func (s *UpdateByQueryService) Q(q string) *UpdateByQueryService { s.q = q return s } +// Query sets a query definition using the Query DSL. +func (s *UpdateByQueryService) Query(query Query) *UpdateByQueryService { + s.query = query + return s +} + // Refresh indicates whether the effected indexes should be refreshed. -func (s *UpdateByQueryService) Refresh(refresh bool) *UpdateByQueryService { - s.refresh = &refresh +func (s *UpdateByQueryService) Refresh(refresh string) *UpdateByQueryService { + s.refresh = refresh return s } @@ -254,6 +272,13 @@ func (s *UpdateByQueryService) RequestCache(requestCache bool) *UpdateByQuerySer return s } +// RequestsPerSecond sets the throttle on this request in sub-requests per second. +// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. +func (s *UpdateByQueryService) RequestsPerSecond(requestsPerSecond int) *UpdateByQueryService { + s.requestsPerSecond = &requestsPerSecond + return s +} + // Routing is a list of specific routing values. func (s *UpdateByQueryService) Routing(routing ...string) *UpdateByQueryService { s.routing = append(s.routing, routing...) @@ -315,6 +340,12 @@ func (s *UpdateByQueryService) Stats(stats ...string) *UpdateByQueryService { return s } +// StoredFields specifies the list of stored fields to return as part of a hit. +func (s *UpdateByQueryService) StoredFields(storedFields ...string) *UpdateByQueryService { + s.storedFields = storedFields + return s +} + // SuggestField specifies which field to use for suggestions. func (s *UpdateByQueryService) SuggestField(suggestField string) *UpdateByQueryService { s.suggestField = suggestField @@ -380,42 +411,18 @@ func (s *UpdateByQueryService) VersionType(versionType bool) *UpdateByQueryServi return s } -// WaitForCompletion indicates if the request should block until the reindex is complete. -func (s *UpdateByQueryService) WaitForCompletion(waitForCompletion bool) *UpdateByQueryService { - s.waitForCompletion = &waitForCompletion - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *UpdateByQueryService) Pretty(pretty bool) *UpdateByQueryService { - s.pretty = pretty - return s -} - -// Script sets an update script. -func (s *UpdateByQueryService) Script(script *Script) *UpdateByQueryService { - s.script = script +// WaitForActiveShards sets the number of shard copies that must be active before proceeding +// with the update by query operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal +// to the total number of copies for the shard (number of replicas + 1). +func (s *UpdateByQueryService) WaitForActiveShards(waitForActiveShards string) *UpdateByQueryService { + s.waitForActiveShards = waitForActiveShards return s } -// Query sets a query definition using the Query DSL. -func (s *UpdateByQueryService) Query(query Query) *UpdateByQueryService { - s.query = query - return s -} - -// BodyJson specifies e.g. the query to restrict the results specified with the -// Query DSL (optional). The interface{} will be serialized to a JSON document, -// so use a map[string]interface{}. -func (s *UpdateByQueryService) BodyJson(body interface{}) *UpdateByQueryService { - s.bodyJson = body - return s -} - -// Body specifies e.g. a query to restrict the results specified with -// the Query DSL (optional). -func (s *UpdateByQueryService) BodyString(body string) *UpdateByQueryService { - s.bodyString = body +// WaitForCompletion indicates if the request should block until the reindex is complete. +func (s *UpdateByQueryService) WaitForCompletion(waitForCompletion bool) *UpdateByQueryService { + s.waitForCompletion = &waitForCompletion return s } @@ -424,21 +431,15 @@ func (s *UpdateByQueryService) buildURL() (string, url.Values, error) { // Build URL var err error var path string - if len(s.index) > 0 && len(s.typ) > 0 { + if len(s.typ) > 0 { path, err = uritemplates.Expand("/{index}/{type}/_update_by_query", map[string]string{ "index": strings.Join(s.index, ","), "type": strings.Join(s.typ, ","), }) - } else if len(s.index) > 0 && len(s.typ) == 0 { + } else { path, err = uritemplates.Expand("/{index}/_update_by_query", map[string]string{ "index": strings.Join(s.index, ","), }) - } else if len(s.index) == 0 && len(s.typ) > 0 { - path, err = uritemplates.Expand("/_all/{type}/_update_by_query", map[string]string{ - "type": strings.Join(s.typ, ","), - }) - } else { - path = "/_all/_update_by_query" } if err != nil { return "", url.Values{}, err @@ -461,18 +462,15 @@ func (s *UpdateByQueryService) buildURL() (string, url.Values, error) { if s.allowNoIndices != nil { params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) } - if s.analyzeWildcard != nil { - params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) - } if s.analyzer != "" { params.Set("analyzer", s.analyzer) } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } if s.conflicts != "" { params.Set("conflicts", s.conflicts) } - if s.consistency != "" { - params.Set("consistency", s.consistency) - } if s.defaultOperator != "" { params.Set("default_operator", s.defaultOperator) } @@ -485,12 +483,15 @@ func (s *UpdateByQueryService) buildURL() (string, url.Values, error) { if s.explain != nil { params.Set("explain", fmt.Sprintf("%v", *s.explain)) } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) + } + if len(s.docvalueFields) > 0 { + params.Set("docvalue_fields", strings.Join(s.docvalueFields, ",")) + } if len(s.fielddataFields) > 0 { params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } if s.from != nil { params.Set("from", fmt.Sprintf("%d", *s.from)) } @@ -503,14 +504,17 @@ func (s *UpdateByQueryService) buildURL() (string, url.Values, error) { if s.lowercaseExpandedTerms != nil { params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) } + if s.pipeline != "" { + params.Set("pipeline", s.pipeline) + } if s.preference != "" { params.Set("preference", s.preference) } if s.q != "" { params.Set("q", s.q) } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + if s.refresh != "" { + params.Set("refresh", s.refresh) } if s.requestCache != nil { params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) @@ -566,28 +570,36 @@ func (s *UpdateByQueryService) buildURL() (string, url.Values, error) { if s.versionType != nil { params.Set("version_type", fmt.Sprintf("%v", *s.versionType)) } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } if s.waitForCompletion != nil { params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) } + if s.requestsPerSecond != nil { + params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) + } return path, params, nil } // Validate checks if the operation is valid. func (s *UpdateByQueryService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } return nil } -// body returns the body part of the document request. -func (s *UpdateByQueryService) body() (interface{}, error) { - if s.bodyJson != nil { - return s.bodyJson, nil +// getBody returns the body part of the document request. +func (s *UpdateByQueryService) getBody() (interface{}, error) { + if s.body != nil { + return s.body, nil } - if s.bodyString != "" { - return s.bodyString, nil - } - source := make(map[string]interface{}) - if s.script != nil { src, err := s.script.Source() if err != nil { @@ -595,7 +607,6 @@ func (s *UpdateByQueryService) body() (interface{}, error) { } source["script"] = src } - if s.query != nil { src, err := s.query.Source() if err != nil { @@ -603,17 +614,11 @@ func (s *UpdateByQueryService) body() (interface{}, error) { } source["query"] = src } - return source, nil } // Do executes the operation. -func (s *UpdateByQueryService) Do() (*UpdateByQueryResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *UpdateByQueryService) DoC(ctx context.Context) (*UpdateByQueryResponse, error) { +func (s *UpdateByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -626,37 +631,21 @@ func (s *UpdateByQueryService) DoC(ctx context.Context) (*UpdateByQueryResponse, } // Setup HTTP request body - body, err := s.body() + body, err := s.getBody() if err != nil { return nil, err } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "POST", path, params, body) + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) if err != nil { return nil, err } - // Return operation response - ret := new(UpdateByQueryResponse) + // Return operation response (BulkIndexByScrollResponse is defined in DeleteByQuery) + ret := new(BulkIndexByScrollResponse) if err := s.client.decoder.Decode(res.Body, ret); err != nil { return nil, err } return ret, nil } - -// UpdateByQueryResponse is the response of UpdateByQueryService.Do. -type UpdateByQueryResponse struct { - Took int64 `json:"took"` - TimedOut bool `json:"timed_out"` - Total int64 `json:"total"` - Updated int64 `json:"updated"` - Created int64 `json:"created"` - Deleted int64 `json:"deleted"` - Batches int64 `json:"batches"` - VersionConflicts int64 `json:"version_conflicts"` - Noops int64 `json:"noops"` - Retries int64 `json:"retries"` - Canceled string `json:"canceled"` - Failures []shardOperationFailure `json:"failures"` -} diff --git a/vendor/gopkg.in/olivere/elastic.v3/update_by_query_test.go b/vendor/gopkg.in/olivere/elastic.v3/update_by_query_test.go index d03ae8947..8f84758ad 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/update_by_query_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/update_by_query_test.go @@ -7,63 +7,82 @@ package elastic import ( "encoding/json" "testing" + + "golang.org/x/net/context" ) func TestUpdateByQueryBuildURL(t *testing.T) { client := setupTestClient(t) tests := []struct { - Indices []string - Types []string - Expected string + Indices []string + Types []string + Expected string + ExpectErr bool }{ { []string{}, []string{}, - "/_all/_update_by_query", + "", + true, }, { []string{"index1"}, []string{}, "/index1/_update_by_query", + false, }, { []string{"index1", "index2"}, []string{}, "/index1%2Cindex2/_update_by_query", + false, }, { []string{}, []string{"type1"}, - "/_all/type1/_update_by_query", + "", + true, }, { []string{"index1"}, []string{"type1"}, "/index1/type1/_update_by_query", + false, }, { []string{"index1", "index2"}, []string{"type1", "type2"}, "/index1%2Cindex2/type1%2Ctype2/_update_by_query", + false, }, } for i, test := range tests { - path, _, err := client.UpdateByQuery().Index(test.Indices...).Type(test.Types...).buildURL() + builder := client.UpdateByQuery().Index(test.Indices...).Type(test.Types...) + err := builder.Validate() if err != nil { - t.Errorf("case #%d: %v", i+1, err) - continue - } - if path != test.Expected { - t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + if !test.ExpectErr { + t.Errorf("case #%d: %v", i+1, err) + continue + } + } else { + // err == nil + if test.ExpectErr { + t.Errorf("case #%d: expected error", i+1) + continue + } + path, _, _ := builder.buildURL() + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } } } } func TestUpdateByQueryBodyWithQuery(t *testing.T) { client := setupTestClient(t) - out, err := client.UpdateByQuery().Query(NewTermQuery("user", "olivere")).body() + out, err := client.UpdateByQuery().Query(NewTermQuery("user", "olivere")).getBody() if err != nil { t.Fatal(err) } @@ -83,7 +102,7 @@ func TestUpdateByQueryBodyWithQueryAndScript(t *testing.T) { out, err := client.UpdateByQuery(). Query(NewTermQuery("user", "olivere")). Script(NewScriptInline("ctx._source.likes++")). - body() + getBody() if err != nil { t.Fatal(err) } @@ -108,7 +127,7 @@ func TestUpdateByQuery(t *testing.T) { t.Skipf("Elasticsearch %v does not support update-by-query yet", esversion) } - sourceCount, err := client.Count(testIndexName).Do() + sourceCount, err := client.Count(testIndexName).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -116,7 +135,7 @@ func TestUpdateByQuery(t *testing.T) { t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) } - res, err := client.UpdateByQuery(testIndexName).ProceedOnVersionConflict().Do() + res, err := client.UpdateByQuery(testIndexName).ProceedOnVersionConflict().Do(context.TODO()) if err != nil { t.Fatal(err) } diff --git a/vendor/gopkg.in/olivere/elastic.v3/update_test.go b/vendor/gopkg.in/olivere/elastic.v3/update_test.go index 57b26dc0e..79fe415dd 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/update_test.go +++ b/vendor/gopkg.in/olivere/elastic.v3/update_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -204,7 +204,7 @@ func TestUpdateViaDocAndUpsert(t *testing.T) { Doc(map[string]interface{}{"name": "new_name"}). DocAsUpsert(true). Timeout("1s"). - Refresh(true) + Refresh("true") path, params, err := update.url() if err != nil { t.Fatalf("expected to return URL, got: %v", err) @@ -231,82 +231,3 @@ func TestUpdateViaDocAndUpsert(t *testing.T) { t.Errorf("expected\n%s\ngot:\n%s", expected, got) } } - -func TestUpdateViaScriptIntegration(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - if esversion >= "1.4.3" || (esversion < "1.4.0" && esversion >= "1.3.8") { - t.Skip("groovy scripting has been disabled as for [1.3.8,1.4.0) and 1.4.3+") - return - } - - tweet1 := tweet{User: "olivere", Retweets: 10, Message: "Welcome to Golang and Elasticsearch."} - - // Add a document - indexResult, err := client.Index(). - Index(testIndexName). - Type("tweet"). - Id("1"). - BodyJson(&tweet1). - Do() - if err != nil { - t.Fatal(err) - } - if indexResult == nil { - t.Errorf("expected result to be != nil; got: %v", indexResult) - } - - // Update number of retweets - increment := 1 - script := NewScript("ctx._source.retweets += num"). - Params(map[string]interface{}{"num": increment}). - Lang("groovy") // Use "groovy" as default language as 1.3 uses MVEL by default - update, err := client.Update().Index(testIndexName).Type("tweet").Id("1"). - Script(script). - Do() - if err != nil { - t.Fatal(err) - } - if update == nil { - t.Errorf("expected update to be != nil; got %v", update) - } - if update.Version != indexResult.Version+1 { - t.Errorf("expected version to be %d; got %d", indexResult.Version+1, update.Version) - } - - // Get document - getResult, err := client.Get(). - Index(testIndexName). - Type("tweet"). - Id("1"). - Do() - if err != nil { - t.Fatal(err) - } - if getResult.Index != testIndexName { - t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) - } - if getResult.Type != "tweet" { - t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) - } - if getResult.Id != "1" { - t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id) - } - if getResult.Source == nil { - t.Errorf("expected GetResult.Source to be != nil; got nil") - } - - // Decode the Source field - var tweetGot tweet - err = json.Unmarshal(*getResult.Source, &tweetGot) - if err != nil { - t.Fatal(err) - } - if tweetGot.Retweets != tweet1.Retweets+increment { - t.Errorf("expected Tweet.Retweets to be %d; got %d", tweet1.Retweets+increment, tweetGot.Retweets) - } -} diff --git a/vendor/github.com/nats-io/go-nats/.gitignore b/vendor/gopkg.in/olivere/elastic.v5/.gitignore similarity index 68% rename from vendor/github.com/nats-io/go-nats/.gitignore rename to vendor/gopkg.in/olivere/elastic.v5/.gitignore index 3d5981fa9..89340bbe7 100644 --- a/vendor/github.com/nats-io/go-nats/.gitignore +++ b/vendor/gopkg.in/olivere/elastic.v5/.gitignore @@ -21,19 +21,11 @@ _testmain.go *.exe -# Emacs -*~ -\#*\# -.\#* +/generator +/cluster-test/cluster-test +/cluster-test/*.log +/cluster-test/es-chaos-monkey +/spec +/tmp +/CHANGELOG-3.0.html -# vi/vim -.??*.swp - -# Mac -.DS_Store - -# Eclipse -.project -.settings/ - -# bin diff --git a/vendor/gopkg.in/olivere/elastic.v5/.travis.yml b/vendor/gopkg.in/olivere/elastic.v5/.travis.yml new file mode 100644 index 000000000..8493f920e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/.travis.yml @@ -0,0 +1,19 @@ +sudo: required +language: go +go: + - 1.7 +services: + - docker +before_script: + # - mkdir ${HOME}/elasticsearch + # - wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ES_VERSION}.tar.gz + # - tar -xzvf elasticsearch-${ES_VERSION}.tar.gz -C ${HOME}/elasticsearch + # - ls -alFR ${HOME}/elasticsearch + # - cp -r config/* ${HOME}/elasticsearch/elasticsearch-${ES_VERSION}/config/ + # - cat ${HOME}/elasticsearch/elasticsearch-${ES_VERSION}/config/elasticsearch.yml + # - ${HOME}/elasticsearch/elasticsearch-${ES_VERSION}/bin/elasticsearch >& /dev/null & + - mkdir -p /tmp/elasticsearch/config + - cp -r config/* /tmp/elasticsearch/config/ + - sudo sysctl -w vm.max_map_count=262144 + - docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "/tmp/elasticsearch/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.1 elasticsearch >& /dev/null & + - sleep 15 diff --git a/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-3.0.md b/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-3.0.md new file mode 100644 index 000000000..07f3e66bf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-3.0.md @@ -0,0 +1,363 @@ +# Elastic 3.0 + +Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes. + +We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft. + +So, to summarize: + +1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained. +2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch. + +The rest of the document is a list of all changes in Elastic 3.0. + +## Pointer types + +All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example: + +Example for Elastic 2.0 (old): + +```go +q := elastic.NewMatchAllQuery() +res, err := elastic.Search("one").Query(&q).Do() // notice the & here +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewMatchAllQuery() +res, err := elastic.Search("one").Query(q).Do() // no more & +// ... which can be simplified as: +res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do() +``` + +It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046). + +## Query/filter merge + +One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`). + +The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay! + +Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before. + +Example for Elastic 2.0 (old): + +```go +q := elastic.NewMatchAllQuery() +f := elastic.NewTermFilter("tag", "important") +res, err := elastic.Search().Index("one").Query(&q).PostFilter(f) +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewMatchAllQuery() +f := elastic.NewTermQuery("tag", "important") // it's a query now! +res, err := elastic.Search().Index("one").Query(q).PostFilter(f) +``` + +## Facets are removed + +[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now. + +## Errors + +Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer. + +Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59). + +### HTTP Status 404 (Not Found) + +When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0. + +Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error. + +To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below). + +The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0. + +Example for Elastic 2.0 (old): + +```go +res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() +if err != nil { + // Something else went wrong (but 404 is NOT an error in Elastic 2.0) +} +if !res.Found { + // Document has not been found +} +``` + +Example for Elastic 3.0 (new): + +```go +res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() +if err != nil { + if elastic.IsNotFound(err) { + // Document has not been found + } else { + // Something else went wrong + } +} +``` + +### HTTP Status 408 (Timeouts) + +Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API. + +To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper. + +Example for Elastic 2.0 (old): + +```go +health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() +if err != nil { + // ... +} +if health.TimedOut { + // We have a timeout +} +``` + +Example for Elastic 3.0 (new): + +```go +health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() +if elastic.IsTimeout(err) { + // We have a timeout +} +``` + +### Bulk Errors + +The error response of a bulk operation used to be a simple string in Elasticsearch 1.x. +In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error. +These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206). + +### Removed specific Elastic errors + +The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message. + +## Numeric types + +Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`. + +## Pluralization + +Some services accept zero, one or more indices or types to operate on. +E.g. in the `SearchService` accepts a list of zero, one, or more indices to +search and therefor had a func called `Index(index string)` and a func +called `Indices(indices ...string)`. + +Elastic 3.0 now only uses the singular form that, when applicable, accepts a +variadic type. E.g. in the case of the `SearchService`, you now only have +one func with the following signature: `Index(indices ...string)`. + +Notice this is only limited to `Index(...)` and `Type(...)`. There are other +services with variadic functions. These have not been changed. + +## Multiple calls to variadic functions + +Some services with variadic functions have cleared the underlying slice when +called while other services just add to the existing slice. This has now been +normalized to always add to the underlying slice. + +Example for Elastic 2.0 (old): + +```go +// Would only cleared scroll id "two" +// because ScrollId cleared the values when called multiple times +client.ClearScroll().ScrollId("one").ScrollId("two").Do() +``` + +Example for Elastic 3.0 (new): + +```go +// Now (correctly) clears both scroll id "one" and "two" +// because ScrollId no longer clears the values when called multiple times +client.ClearScroll().ScrollId("one").ScrollId("two").Do() +``` + +## Ping service requires URL + +The `Ping` service raised some issues because it is different from all +other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`. + +Users expected to ping the cluster, but that is not possible as the cluster +can be a set of many nodes: So which node do we ping then? + +To make it more clear, the `Ping` function on the client now requires users +to explicitly set the URL of the node to ping. + +## Meta fields + +Many of the meta fields e.g. `_parent` or `_routing` are now +[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields) +and are no longer returned as parts of the `fields` object. We had to change +larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0. + +Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default). + +## HasParentQuery / HasChildQuery + +`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API. + +Example for Elastic 2.0 (old): + +```go +allQ := elastic.NewMatchAllQuery() +q := elastic.NewHasChildFilter("tweet").Query(&allQ) +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery()) +``` + +## SetBasicAuth client option + +You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html). + +Example: + +```go +client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret")) +if err != nil { + log.Fatal(err) +} +``` + +## Delete-by-Query API + +The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). + +Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404. + +An older version of this document stated the following: + +> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed. +> +> Example for Elastic 3.0 (new): +> +> ```go +> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do() +> if err == elastic.ErrPluginNotFound { +> // Delete By Query API is not available +> } +> ``` + +I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch. + +If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play. + +## HasPlugin and SetRequiredPlugins + +Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). + +You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client. + +Example for Elastic 3.0 (new): + +```go +err, found := client.HasPlugin("delete-by-query") +if err == nil && found { + // ... Delete By Query API is available +} +``` + +To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place. + +```go +// Will raise an error if the "delete-by-query" plugin is NOT installed +client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query")) +if err != nil { + log.Fatal(err) +} +``` + +Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file. + +## Common Query has been renamed to Common Terms Query + +The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring). + +## Remove `MoreLikeThis` and `MoreLikeThisField` + +The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`. + +## Remove Filtered Query + +With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). + +## Remove FuzzyLikeThis and FuzzyLikeThisField + +Both have been removed from Elasticsearch 2.0 as well. + +## Remove LimitFilter + +The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects. + +## Remove `_cache` and `_cache_key` from filters + +Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching). + +## Partial fields are gone + +Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html). + +## Scripting + +A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type. + +Example for Elastic 2.0 (old): + +```go +update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script("ctx._source.retweets += num"). + ScriptParams(map[string]interface{}{"num": 1}). + Upsert(map[string]interface{}{"retweets": 0}). + Do() +``` + +Example for Elastic 3.0 (new): + +```go +update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)). + Upsert(map[string]interface{}{"retweets": 0}). + Do() +``` + +## Cluster State + +The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`. + +## Unexported structs in response + +Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example. + +## Add offset to Histogram aggregation + +Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option. + +## Services + +### REST API specification + +As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure. + +Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process. + +This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes. + +At the same time, the file names of the services are renamed to match the REST API specification naming. + +### REST API Test Suite + +The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well. + +This process in not completed though. + + diff --git a/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md b/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md new file mode 100644 index 000000000..161c6a1ce --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md @@ -0,0 +1,195 @@ +# Changes in Elastic 5.0 + +## Enforce context.Context in PerformRequest and Do + +We enforce the usage of `context.Context` everywhere you execute a request. +You need to change all your `Do()` calls to pass a context: `Do(ctx)`. +This enables automatic request cancelation and many other patterns. + +If you don't need this, simply pass `context.TODO()` or `context.Background()`. + +## Warmers removed + +Warmers are no longer necessary and have been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_index_apis.html#_warmers). + +## Optimize removed + +Optimize was deprecated in ES 2.0 and has been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_rest_api_changes.html#_literal__optimize_literal_endpoint_removed). +Use [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html) instead. + +## Missing Query removed + +The `missing` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-exists-query.html#_literal_missing_literal_query). +Use `exists` query with `must_not` in `bool` query instead. + +## And Query removed + +The `and` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use `must` clauses in a `bool` query instead. + +## Not Query removed + +TODO Is it removed? + +## Or Query removed + +The `or` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use `should` clauses in a `bool` query instead. + +## Filtered Query removed + +The `filtered` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use `bool` query instead, which supports `filter` clauses too. + +## Limit Query removed + +The `limit` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use the `terminate_after` parameter instead. + +# Template Query removed + +The `template` query has been [deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-template-query.html). You should use +Search Templates instead. + +We remove it from Elastic 5.0 as the 5.0 update is already a good opportunity +to get rid of old stuff. + +## `_timestamp` and `_ttl` removed + +Both of these fields were deprecated and are now [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_mapping_changes.html#_literal__timestamp_literal_and_literal__ttl_literal). + +## Search template Put/Delete API returns `acknowledged` only + +The response type for Put/Delete search templates has changed. +It only returns a single `acknowledged` flag now. + +## Fields has been renamed to Stored Fields + +The `fields` parameter has been renamed to `stored_fields`. +See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fields_literal_parameter). + +## Fielddatafields has been renamed to Docvaluefields + +The `fielddata_fields` parameter [has been renamed](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fielddata_fields_literal_parameter) +to `docvalue_fields`. + +## Type exists endpoint changed + +The endpoint for checking whether a type exists has been changed from +`HEAD {index}/{type}` to `HEAD {index}/_mapping/{type}`. +See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking_50_rest_api_changes.html#_literal_head_index_type_literal_replaced_with_literal_head_index__mapping_type_literal). + +## Refresh parameter changed + +The `?refresh` parameter previously could be a boolean value. It indicated +whether changes made by a request (e.g. by the Bulk API) should be immediately +visible in search, or not. Using `refresh=true` had the positive effect of +immediately seeing the changes when searching; the negative effect is that +it is a rather big performance hit. + +With 5.0, you now have the choice between these 3 values. + +* `"true"` - Refresh immediately +* `"false"` - Do not refresh (the default value) +* `"wait_for"` - Wait until ES made the document visible in search + +See [?refresh](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-refresh.html) in the documentation. + +Notice that `true` and `false` (the boolean values) are no longer available +now in Elastic. You must use a string instead, with one of the above values. + +## ReindexerService removed + +The `ReindexerService` was a custom solution that was started in the ES 1.x era +to automate reindexing data, from one index to another or even between clusters. + +ES 2.3 introduced its own [Reindex API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html) +so we're going to remove our custom solution and ask you to use the native reindexer. + +The `ReindexService` is available via `client.Reindex()` (which used to point +to the custom reindexer). + +## Delete By Query back in core + +The [Delete By Query API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html) +was moved into a plugin in 2.0. Now its back in core with a complete rewrite based on the Bulk API. + +It has it's own endpoint at `/_delete_by_query`. + +Delete By Query, Reindex, and Update By Query are very similar under the hood. + +## Reindex, Delete By Query, and Update By Query response changed + +The response from the above APIs changed a bit. E.g. the `retries` value +used to be an `int64` and returns separate values for `bulk` and `search` now: + +``` +// Old +{ + ... + "retries": 123, + ... +} +``` + +``` +// New +{ + ... + "retries": { + "bulk": 123, + "search": 0 + }, + ... +} +``` + +## ScanService removed + +The `ScanService` is removed. Use the (new) `ScrollService` instead. + +## New ScrollService + +There was confusion around `ScanService` and `ScrollService` doing basically +the same. One was returning slices and didn't support all query details, the +other returned one document after another and wasn't safe for concurrent use. +So we merged the two and merged it into a new `ScrollService` that +removes all the problems with the older services. + +In other words: +If you used `ScanService`, switch to `ScrollService`. +If you used the old `ScrollService`, you might need to fix some things but +overall it should just work. + +Changes: +- We replaced `elastic.EOS` with `io.EOF` to indicate the "end of scroll". + +TODO Not implemented yet + +## Suggesters + +They have been [completely rewritten in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html). + +Some changes: +- Suggesters no longer have an [output](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html#_simpler_completion_indexing). + +TODO Fix all structural changes in suggesters + +## Percolator + +Percolator has [changed considerably](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_percolator.html). + +Elastic 5.0 adds the new +[Percolator Query](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html) +which can be used in combination with the new +[Percolator type](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/percolator.html). + +The Percolate service is removed from Elastic 5.0. + +## Remove Consistency, add WaitForActiveShards + +The `consistency` parameter has been removed in a lot of places, e.g. the Bulk, +Index, Delete, Delete-by-Query, Reindex, Update, and Update-by-Query API. + +It has been replaced by a somewhat similar `wait_for_active_shards` parameter. +See https://github.com/elastic/elasticsearch/pull/19454. diff --git a/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTING.md b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTING.md new file mode 100644 index 000000000..4fbc79dd0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# How to contribute + +Elastic is an open-source project and we are looking forward to each +contribution. + +Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level +overview of the features of Elasticsearch. However, Elastic tries to resemble +the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch). + +This explains why you might think that some options are strange or missing +in Elastic, while often they're just different. Please check the Java API first. + +Having said that: Elasticsearch is moving fast and it might be very likely +that we missed some features or changes. Feel free to change that. + +## Your Pull Request + +To make it easy to review and understand your changes, please keep the +following things in mind before submitting your pull request: + +* You compared the existing implemenation with the Java API, did you? +* Please work on the latest possible state of `olivere/elastic`. + Use `release-branch.v2` for targeting Elasticsearch 1.x and + `release-branch.v3` for targeting 2.x. +* Create a branch dedicated to your change. +* If possible, write a test case which confirms your change. +* Make sure your changes and your tests work with all recent versions of + Elasticsearch. We currently support Elasticsearch 1.7.x in the + release-branch.v2 and Elasticsearch 2.x in the release-branch.v3. +* Test your changes before creating a pull request (`go test ./...`). +* Don't mix several features or bug fixes in one pull request. +* Create a meaningful commit message. +* Explain your change, e.g. provide a link to the issue you are fixing and + probably a link to the Elasticsearch documentation and/or source code. +* Format your source with `go fmt`. + +## Additional Resources + +* [GitHub documentation](http://help.github.com/) +* [GitHub pull request documentation](http://help.github.com/send-pull-requests/) diff --git a/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS new file mode 100644 index 000000000..4f69f8cbe --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS @@ -0,0 +1,70 @@ +# This is a list of people who have contributed code +# to the Elastic repository. +# +# It is just my small "thank you" to all those that helped +# making Elastic what it is. +# +# Please keep this list sorted. + +Adam Alix [@adamalix](https://github.com/adamalix) +Adam Weiner [@adamweiner](https://github.com/adamweiner) +Alex [@akotlar](https://github.com/akotlar) +Alexander Staubo [@atombender](https://github.com/atombender) +Alexey Sharov [@nizsheanez](https://github.com/nizsheanez) +Andrew Gaul [@andrewgaul](https://github.com/andrewgaul) +Benjamin Fernandes [@LotharSee](https://github.com/LotharSee) +Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux) +Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va) +Brady Love [@bradylove](https://github.com/bradylove) +Bruce Zhou [@brucez-isell](https://github.com/brucez-isell) +Chris M [@tebriel](https://github.com/tebriel) +Christophe Courtaut [@kri5](https://github.com/kri5) +Conrad Pankoff [@deoxxa](https://github.com/deoxxa) +Corey Scott [@corsc](https://github.com/corsc) +Daniel Barrett [@shendaras](https://github.com/shendaras) +Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath) +Daniel Imfeld [@dimfeld](https://github.com/dimfeld) +Dwayne Schultz [@myshkin5](https://github.com/myshkin5) +Faolan C-P [@fcheslack](https://github.com/fcheslack) +Gerhard Häring [@ghaering](https://github.com/ghaering) +Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos) +Guillaume J. Charmes [@creack](https://github.com/creack) +Han Yu [@MoonighT](https://github.com/MoonighT) +Harrison Wright [@wright8191](https://github.com/wright8191) +Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy) +Isaac Saldana [@isaldana](https://github.com/isaldana) +Jack Lindamood [@cep21](https://github.com/cep21) +Jacob [@jdelgad](https://github.com/jdelgad) +Jayme Rotsaert [@jrots](https://github.com/jrots) +Joe Buck [@four2five](https://github.com/four2five) +John Barker [@j16r](https://github.com/j16r) +John Goodall [@jgoodall](https://github.com/jgoodall) +John Stanford [@jxstanford](https://github.com/jxstanford) +Junpei Tsuji [@jun06t](https://github.com/jun06t) +Kenta SUZUKI [@suzuken](https://github.com/suzuken) +Kyle Brandt [@kylebrandt](https://github.com/kylebrandt) +Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh) +Mara Kim [@autochthe](https://github.com/autochthe) +Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato) +Medhi Bechina [@mdzor](https://github.com/mdzor) +naimulhaider [@naimulhaider](https://github.com/naimulhaider) +navins [@ishare](https://github.com/ishare) +Naoya Tsutsumi [@tutuming](https://github.com/tutuming) +Nicholas Wolff [@nwolff](https://github.com/nwolff) +Nick Whyte [@nickw444](https://github.com/nickw444) +Orne Brocaar [@brocaar](https://github.com/brocaar) +Radoslaw Wesolowski [r--w](https://github.com/r--w) +Ryan Schmukler [@rschmukler](https://github.com/rschmukler) +Sacheendra talluri [@sacheendra](https://github.com/sacheendra) +Sean DuBois [@Sean-Der](https://github.com/Sean-Der) +Shalin LK [@shalinlk](https://github.com/shalinlk) +Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic) +Stuart Warren [@Woz](https://github.com/stuart-warren) +Sundar [@sundarv85](https://github.com/sundarv85) +Take [ww24](https://github.com/ww24) +Tetsuya Morimoto [@t2y](https://github.com/t2y) +TimeEmit [@TimeEmit](https://github.com/timeemit) +TusharM [@tusharm](https://github.com/tusharm) +wolfkdy [@wolfkdy](https://github.com/wolfkdy) +zakthomas [@zakthomas](https://github.com/zakthomas) +singham [@zhaochenxiao90](https://github.com/zhaochenxiao90) diff --git a/vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md b/vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..c5eb690a7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md @@ -0,0 +1,17 @@ +Please use the following questions as a guideline to help me answer +your issue/question without further inquiry. Thank you. + +### Which version of Elastic are you using? + +[ ] elastic.v2 (for Elasticsearch 1.x) +[ ] elastic.v3 (for Elasticsearch 2.x) +[ ] elastic.v5 (for Elasticsearch 5.x) + +### Please describe the expected behavior + + +### Please describe the actual behavior + + +### Any steps to reproduce the behavior? + diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/gopkg.in/olivere/elastic.v5/LICENSE similarity index 66% rename from vendor/github.com/eapache/go-resiliency/LICENSE rename to vendor/gopkg.in/olivere/elastic.v5/LICENSE index 698a3f513..8b22cdb60 100644 --- a/vendor/github.com/eapache/go-resiliency/LICENSE +++ b/vendor/gopkg.in/olivere/elastic.v5/LICENSE @@ -1,22 +1,20 @@ The MIT License (MIT) - -Copyright (c) 2014 Evan Huus +Copyright © 2012-2015 Oliver Eilhard Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal +of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/vendor/gopkg.in/olivere/elastic.v5/README.md b/vendor/gopkg.in/olivere/elastic.v5/README.md new file mode 100644 index 000000000..af9a2765e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/README.md @@ -0,0 +1,457 @@ +# Elastic + +Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the +[Go](http://www.golang.org/) programming language. + +[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v5)](https://travis-ci.org/olivere/elastic) +[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v5) +[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE) + +See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic. + + +## Releases + +**The release branches (e.g. [`release-branch.v5`](https://github.com/olivere/elastic/tree/release-branch.v5)) +are actively being worked on and can break at any time. +If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).** + +Here's the version matrix: + +Elasticsearch version | Elastic version -| Package URL +----------------------|------------------|------------ +5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5)) +2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) +1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) +0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) + +**Example:** + +You have installed Elasticsearch 5.0.0 and want to use Elastic. +As listed above, you should use Elastic 5.0. +So you first install the stable release of Elastic 5.0 from gopkg.in. + +```sh +$ go get gopkg.in/olivere/elastic.v5 +``` + +You then import it with this import path: + +```go +import elastic "gopkg.in/olivere/elastic.v5" +``` + +### Elastic 5.0 + +Elastic 5.0 targets Elasticsearch 5.0.0 and later. Elasticsearch 5.0.0 was +[released on 26th October 2016](https://www.elastic.co/blog/elasticsearch-5-0-0-released). + +Notice that there are will be a lot of [breaking changes in Elasticsearch 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking-changes-5.0.html) +and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v5/CHANGELOG-5.0.md) +as we did in the transition from Elastic 2.0 (for Elasticsearch 1.x) to Elastic 3.0 (for Elasticsearch 2.x). + +Furthermore, the jump in version numbers will give us a chance to be in sync with the Elastic Stack. + +### Elastic 3.0 + +Elastic 3.0 targets Elasticsearch 2.x and is published via [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3). + +Elastic 3.0 will only get critical bug fixes. You should update to a recent version. + +### Elastic 2.0 + +Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2). + +Elastic 2.0 will only get critical bug fixes. You should update to a recent version. + +### Elastic 1.0 + +Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic +to a recent version. + +However, if you cannot update for some reason, don't worry. Version 1.0 is +still available. All you need to do is go-get it and change your import path +as described above. + + +## Status + +We use Elastic in production since 2012. Elastic is stable but the API changes +now and then. We strive for API compatibility. +However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html) +and we sometimes have to adapt. + +Having said that, there have been no big API changes that required you +to rewrite your application big time. More often than not it's renaming APIs +and adding/removing features so that Elastic is in sync with Elasticsearch. + +Elastic has been used in production with the following Elasticsearch versions: +0.90, 1.0-1.7, and 2.0-2.4.1. Furthermore, we use [Travis CI](https://travis-ci.org/) +to test Elastic with the most recent versions of Elasticsearch and Go. +See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml) +file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic) +for the results. + +Elasticsearch has quite a few features. Most of them are implemented +by Elastic. I add features and APIs as required. It's straightforward +to implement missing pieces. I'm accepting pull requests :-) + +Having said that, I hope you find the project useful. + + +## Getting Started + +The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). +The client connects to Elasticsearch on `http://127.0.0.1:9200` by default. + +You typically create one client for your app. Here's a complete example of +creating a client, creating an index, adding a document, executing a search etc. + +```go +// Create a client +client, err := elastic.NewClient() +if err != nil { + // Handle error +} + +// Create an index +_, err = client.CreateIndex("twitter").Do() +if err != nil { + // Handle error + panic(err) +} + +// Add a document to the index +tweet := Tweet{User: "olivere", Message: "Take Five"} +_, err = client.Index(). + Index("twitter"). + Type("tweet"). + Id("1"). + BodyJson(tweet). + Refresh(true). + Do() +if err != nil { + // Handle error + panic(err) +} + +// Search with a term query +termQuery := elastic.NewTermQuery("user", "olivere") +searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do() // execute +if err != nil { + // Handle error + panic(err) +} + +// searchResult is of type SearchResult and returns hits, suggestions, +// and all kinds of other information from Elasticsearch. +fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + +// Each is a convenience function that iterates over hits in a search result. +// It makes sure you don't need to check for nil values in the response. +// However, it ignores errors in serialization. If you want full control +// over iterating the hits, see below. +var ttyp Tweet +for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + if t, ok := item.(Tweet); ok { + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } +} +// TotalHits is another convenience function that works even when something goes wrong. +fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + +// Here's how you iterate through results with full control over each step. +if searchResult.Hits.TotalHits > 0 { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } +} else { + // No hits + fmt.Print("Found no tweets\n") +} + +// Delete the index again +_, err = client.DeleteIndex("twitter").Do() +if err != nil { + // Handle error + panic(err) +} +``` + +Here's a [link to a complete working example](https://gist.github.com/olivere/114347ff9d9cfdca7bdc0ecea8b82263). + +See the [wiki](https://github.com/olivere/elastic/wiki) for more details. + + +## API Status + +### Document APIs + +- [x] Index API +- [x] Get API +- [x] Delete API +- [x] Delete By Query API +- [x] Update API +- [x] Update By Query API +- [x] Multi Get API +- [x] Bulk API +- [x] Reindex API +- [x] Term Vectors +- [x] Multi termvectors API + +### Search APIs + +- [x] Search +- [x] Search Template +- [ ] Multi Search Template +- [ ] Search Shards API +- [x] Suggesters + - [x] Term Suggester + - [x] Phrase Suggester + - [x] Completion Suggester + - [x] Context Suggester +- [x] Multi Search API +- [x] Count API +- [ ] Search Exists API +- [ ] Validate API +- [x] Explain API +- [ ] Profile API +- [x] Field Stats API + +### Aggregations + +- Metrics Aggregations + - [x] Avg + - [x] Cardinality + - [x] Extended Stats + - [x] Geo Bounds + - [ ] Geo Centroid + - [x] Max + - [x] Min + - [x] Percentiles + - [x] Percentile Ranks + - [ ] Scripted Metric + - [x] Stats + - [x] Sum + - [x] Top Hits + - [x] Value Count +- Bucket Aggregations + - [x] Children + - [x] Date Histogram + - [x] Date Range + - [x] Filter + - [x] Filters + - [x] Geo Distance + - [ ] GeoHash Grid + - [x] Global + - [x] Histogram + - [x] IP Range + - [x] Missing + - [x] Nested + - [x] Range + - [x] Reverse Nested + - [x] Sampler + - [x] Significant Terms + - [x] Terms +- Pipeline Aggregations + - [x] Avg Bucket + - [x] Derivative + - [x] Max Bucket + - [x] Min Bucket + - [x] Sum Bucket + - [ ] Stats Bucket + - [ ] Extended Stats Bucket + - [ ] Percentiles Bucket + - [x] Moving Average + - [x] Cumulative Sum + - [x] Bucket Script + - [x] Bucket Selector + - [x] Serial Differencing +- [ ] Matrix Aggregations + - [ ] Matrix Stats +- [x] Aggregation Metadata + +### Indices APIs + +- [x] Create Index +- [x] Delete Index +- [x] Get Index +- [x] Indices Exists +- [x] Open / Close Index +- [x] Shrink Index +- [ ] Rollover Index +- [x] Put Mapping +- [x] Get Mapping +- [ ] Get Field Mapping +- [ ] Types Exists +- [x] Index Aliases +- [x] Update Indices Settings +- [x] Get Settings +- [ ] Analyze +- [x] Index Templates +- [ ] Shadow Replica Indices +- [x] Indices Stats +- [ ] Indices Segments +- [ ] Indices Recovery +- [ ] Indices Shard Stores +- [ ] Clear Cache +- [x] Flush +- [x] Refresh +- [x] Force Merge +- [ ] Upgrade + +### cat APIs + +The cat APIs are not implemented as of now. We think they are better suited for operating with Elasticsearch on the command line. + +- [ ] cat aliases +- [ ] cat allocation +- [ ] cat count +- [ ] cat fielddata +- [ ] cat health +- [ ] cat indices +- [ ] cat master +- [ ] cat nodeattrs +- [ ] cat nodes +- [ ] cat pending tasks +- [ ] cat plugins +- [ ] cat recovery +- [ ] cat repositories +- [ ] cat thread pool +- [ ] cat shards +- [ ] cat segments +- [ ] cat snapshots + +### Cluster APIs + +- [x] Cluster Health +- [x] Cluster State +- [x] Cluster Stats +- [ ] Pending Cluster Tasks +- [ ] Cluster Reroute +- [ ] Cluster Update Settings +- [x] Nodes Stats +- [x] Nodes Info +- [x] Task Management API +- [ ] Nodes hot_threads +- [ ] Cluster Allocation Explain API + +### Query DSL + +- [x] Match All Query +- [x] Inner hits +- Full text queries + - [x] Match Query + - [x] Match Phrase Query + - [x] Match Phrase Prefix Query + - [x] Multi Match Query + - [x] Common Terms Query + - [x] Query String Query + - [x] Simple Query String Query +- Term level queries + - [x] Term Query + - [x] Terms Query + - [x] Range Query + - [x] Exists Query + - [x] Prefix Query + - [x] Wildcard Query + - [x] Regexp Query + - [x] Fuzzy Query + - [x] Type Query + - [x] Ids Query +- Compound queries + - [x] Constant Score Query + - [x] Bool Query + - [x] Dis Max Query + - [x] Function Score Query + - [x] Boosting Query + - [x] Indices Query +- Joining queries + - [x] Nested Query + - [x] Has Child Query + - [x] Has Parent Query + - [ ] Parent Id Query +- Geo queries + - [ ] GeoShape Query + - [x] Geo Bounding Box Query + - [x] Geo Distance Query + - [ ] Geo Distance Range Query + - [x] Geo Polygon Query + - [ ] Geohash Cell Query +- Specialized queries + - [x] More Like This Query + - [x] Template Query + - [x] Script Query + - [x] Percolate Query +- Span queries + - [ ] Span Term Query + - [ ] Span Multi Term Query + - [ ] Span First Query + - [ ] Span Near Query + - [ ] Span Or Query + - [ ] Span Not Query + - [ ] Span Containing Query + - [ ] Span Within Query + - [ ] Span Field Masking Query +- [ ] Minimum Should Match +- [ ] Multi Term Query Rewrite + +### Modules + +- [ ] Snapshot and Restore + +### Sorting + +- [x] Sort by score +- [x] Sort by field +- [x] Sort by geo distance +- [x] Sort by script +- [x] Sort by doc + +### Scrolling + +Scrolling is supported via a `ScrollService`. It supports an iterator-like interface. +The `ClearScroll` API is implemented as well. + +A pattern for [efficiently scrolling in parallel](https://github.com/olivere/elastic/wiki/ScrollParallel) +is described in the [Wiki](https://github.com/olivere/elastic/wiki). + +## How to contribute + +Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md). + +## Credits + +Thanks a lot for the great folks working hard on +[Elasticsearch](https://www.elastic.co/products/elasticsearch) +and +[Go](https://golang.org/). + +Elastic uses portions of the +[uritemplates](https://github.com/jtacoma/uritemplates) library +by Joshua Tacoma and +[backoff](https://github.com/cenkalti/backoff) by Cenk Altı. + +## LICENSE + +MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/) +or the LICENSE file provided in the repository for details. diff --git a/vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go b/vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go new file mode 100644 index 000000000..83f954f44 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go @@ -0,0 +1,11 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AcknowledgedResponse is returned from various APIs. It simply indicates +// whether the operation is ack'd or not. +type AcknowledgedResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/backoff/LICENSE b/vendor/gopkg.in/olivere/elastic.v5/backoff/LICENSE new file mode 100644 index 000000000..f6f2dcc97 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/backoff/LICENSE @@ -0,0 +1,22 @@ +Portions of this code rely on this LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/gopkg.in/olivere/elastic.v5/backoff/backoff.go b/vendor/gopkg.in/olivere/elastic.v5/backoff/backoff.go new file mode 100644 index 000000000..23381cedc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/backoff/backoff.go @@ -0,0 +1,159 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package backoff + +import ( + "math" + "math/rand" + "sync" + "sync/atomic" + "time" +) + +// Backoff is an interface for different types of backoff algorithms. +type Backoff interface { + Next() time.Duration + Reset() +} + +// Stop is used as a signal to indicate that no more retries should be made. +const Stop time.Duration = -1 + +// -- Simple Backoff -- + +// SimpleBackoff takes a list of fixed values for backoff intervals. +// Each call to Next returns the next value from that fixed list. +// After each value is returned, subsequent calls to Next will only return +// the last element. The caller may specify if the values are "jittered". +type SimpleBackoff struct { + sync.Mutex + ticks []int + index int + jitter bool + stop bool +} + +// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified +// list of fixed intervals in milliseconds. +func NewSimpleBackoff(ticks ...int) *SimpleBackoff { + return &SimpleBackoff{ + ticks: ticks, + index: 0, + jitter: false, + stop: false, + } +} + +// Jitter, when set, randomizes to return a value of [0.5*value .. 1.5*value]. +func (b *SimpleBackoff) Jitter(doJitter bool) *SimpleBackoff { + b.Lock() + defer b.Unlock() + b.jitter = doJitter + return b +} + +// SendStop, when enables, makes Next to return Stop once +// the list of values is exhausted. +func (b *SimpleBackoff) SendStop(doStop bool) *SimpleBackoff { + b.Lock() + defer b.Unlock() + b.stop = doStop + return b +} + +// Next returns the next wait interval. +func (b *SimpleBackoff) Next() time.Duration { + b.Lock() + defer b.Unlock() + + i := b.index + if i >= len(b.ticks) { + if b.stop { + return Stop + } + i = len(b.ticks) - 1 + b.index = i + } else { + b.index++ + } + + ms := b.ticks[i] + if b.jitter { + ms = jitter(ms) + } + return time.Duration(ms) * time.Millisecond +} + +// Reset resets SimpleBackoff. +func (b *SimpleBackoff) Reset() { + b.Lock() + b.index = 0 + b.Unlock() +} + +// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis]. +func jitter(millis int) int { + if millis <= 0 { + return 0 + } + return millis/2 + rand.Intn(millis) +} + +// -- Exponential -- + +// ExponentialBackoff implements the simple exponential backoff described by +// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html. +type ExponentialBackoff struct { + sync.Mutex + t float64 // initial timeout (in msec) + f float64 // exponential factor (e.g. 2) + m float64 // maximum timeout (in msec) + n int64 // number of retries + stop bool // indicates whether Next should send "Stop" whan max timeout is reached +} + +// NewExponentialBackoff returns a ExponentialBackoff backoff policy. +// Use initialTimeout to set the first/minimal interval +// and maxTimeout to set the maximum wait interval. +func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff { + return &ExponentialBackoff{ + t: float64(int64(initialTimeout / time.Millisecond)), + f: 2.0, + m: float64(int64(maxTimeout / time.Millisecond)), + n: 0, + stop: false, + } +} + +// SendStop, when enables, makes Next to return Stop once +// the maximum timeout is reached. +func (b *ExponentialBackoff) SendStop(doStop bool) *ExponentialBackoff { + b.Lock() + defer b.Unlock() + b.stop = doStop + return b +} + +// Next returns the next wait interval. +func (t *ExponentialBackoff) Next() time.Duration { + t.Lock() + defer t.Unlock() + + n := float64(atomic.AddInt64(&t.n, 1)) + r := 1.0 + rand.Float64() // random number in [1..2] + m := math.Min(r*t.t*math.Pow(t.f, n), t.m) + if t.stop && m >= t.m { + return Stop + } + d := time.Duration(int64(m)) * time.Millisecond + return d +} + +// Reset resets the backoff policy so that it can be reused. +func (t *ExponentialBackoff) Reset() { + t.Lock() + t.n = 0 + t.Unlock() +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/backoff/backoff_test.go b/vendor/gopkg.in/olivere/elastic.v5/backoff/backoff_test.go new file mode 100644 index 000000000..8f602e637 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/backoff/backoff_test.go @@ -0,0 +1,146 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package backoff + +import ( + "math/rand" + "testing" + "time" +) + +func TestSimpleBackoff(t *testing.T) { + b := NewSimpleBackoff(1, 2, 7) + + if got, want := b.Next(), time.Duration(1)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(2)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + b.Reset() + + if got, want := b.Next(), time.Duration(1)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(2)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestSimpleBackoffWithStop(t *testing.T) { + b := NewSimpleBackoff(1, 2, 7).SendStop(true) + + // It should eventually return Stop (-1) after some loops. + var last time.Duration + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + b.Reset() + + // It should eventually return Stop (-1) after some loops. + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestExponentialBackoff(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + + min := time.Duration(8) * time.Millisecond + max := time.Duration(256) * time.Millisecond + b := NewExponentialBackoff(min, max) + + between := func(value time.Duration, a, b int) bool { + x := int(value / time.Millisecond) + return a <= x && x <= b + } + + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + + b.Reset() + + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } +} + +func TestExponentialBackoffWithStop(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + + min := time.Duration(8) * time.Millisecond + max := time.Duration(256) * time.Millisecond + b := NewExponentialBackoff(min, max).SendStop(true) + + // It should eventually return Stop (-1) after some loops. + var last time.Duration + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + b.Reset() + + // It should eventually return Stop (-1) after some loops. + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/backoff/retry.go b/vendor/gopkg.in/olivere/elastic.v5/backoff/retry.go new file mode 100644 index 000000000..249b640b4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/backoff/retry.go @@ -0,0 +1,53 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// This file is (c) 2014 Cenk Altı and governed by the MIT license. +// See https://github.com/cenkalti/backoff for original source. + +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the function f until it does not return error or BackOff stops. +// f is guaranteed to be run at least once. +// It is the caller's responsibility to reset b after Retry returns. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b Backoff, notify Notify) error { + var err error + var next time.Duration + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if next = b.Next(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + time.Sleep(next) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/backoff/retry_test.go b/vendor/gopkg.in/olivere/elastic.v5/backoff/retry_test.go new file mode 100644 index 000000000..578c7a23d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/backoff/retry_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// This file is (c) 2014 Cenk Altı and governed by the MIT license. +// See https://github.com/cenkalti/backoff for original source. + +package backoff + +import ( + "errors" + "log" + "testing" + "time" +) + +func TestRetry(t *testing.T) { + const successOn = 3 + var i = 0 + + // This function is successfull on "successOn" calls. + f := func() error { + i++ + log.Printf("function is called %d. time\n", i) + + if i == successOn { + log.Println("OK") + return nil + } + + log.Println("error") + return errors.New("error") + } + + min := time.Duration(8) * time.Millisecond + max := time.Duration(256) * time.Millisecond + err := Retry(f, NewExponentialBackoff(min, max).SendStop(true)) + if err != nil { + t.Errorf("unexpected error: %s", err.Error()) + } + if i != successOn { + t.Errorf("invalid number of retries: %d", i) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk.go b/vendor/gopkg.in/olivere/elastic.v5/bulk.go new file mode 100644 index 000000000..6dc08d6d9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk.go @@ -0,0 +1,397 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "errors" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// BulkService allows for batching bulk requests and sending them to +// Elasticsearch in one roundtrip. Use the Add method with BulkIndexRequest, +// BulkUpdateRequest, and BulkDeleteRequest to add bulk requests to a batch, +// then use Do to send them to Elasticsearch. +// +// BulkService will be reset after each Do call. In other words, you can +// reuse BulkService to send many batches. You do not have to create a new +// BulkService for each batch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-bulk.html +// for more details. +type BulkService struct { + client *Client + + index string + typ string + requests []BulkableRequest + pipeline string + timeout string + refresh string + routing string + waitForActiveShards string + pretty bool + + // estimated bulk size in bytes, up to the request index sizeInBytesCursor + sizeInBytes int64 + sizeInBytesCursor int +} + +// NewBulkService initializes a new BulkService. +func NewBulkService(client *Client) *BulkService { + builder := &BulkService{ + client: client, + } + return builder +} + +func (s *BulkService) reset() { + s.requests = make([]BulkableRequest, 0) + s.sizeInBytes = 0 + s.sizeInBytesCursor = 0 +} + +// Index specifies the index to use for all batches. You may also leave +// this blank and specify the index in the individual bulk requests. +func (s *BulkService) Index(index string) *BulkService { + s.index = index + return s +} + +// Type specifies the type to use for all batches. You may also leave +// this blank and specify the type in the individual bulk requests. +func (s *BulkService) Type(typ string) *BulkService { + s.typ = typ + return s +} + +// Timeout is a global timeout for processing bulk requests. This is a +// server-side timeout, i.e. it tells Elasticsearch the time after which +// it should stop processing. +func (s *BulkService) Timeout(timeout string) *BulkService { + s.timeout = timeout + return s +} + +// Refresh controls when changes made by this request are made visible +// to search. The allowed values are: "true" (refresh the relevant +// primary and replica shards immediately), "wait_for" (wait for the +// changes to be made visible by a refresh before applying), or "false" +// (no refresh related actions). +func (s *BulkService) Refresh(refresh string) *BulkService { + s.refresh = refresh + return s +} + +// Routing specifies the routing value. +func (s *BulkService) Routing(routing string) *BulkService { + s.routing = routing + return s +} + +// Pipeline specifies the pipeline id to preprocess incoming documents with. +func (s *BulkService) Pipeline(pipeline string) *BulkService { + s.pipeline = pipeline + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active +// before proceeding with the bulk operation. Defaults to 1, meaning the +// primary shard only. Set to `all` for all shard copies, otherwise set to +// any non-negative value less than or equal to the total number of copies +// for the shard (number of replicas + 1). +func (s *BulkService) WaitForActiveShards(waitForActiveShards string) *BulkService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pretty tells Elasticsearch whether to return a formatted JSON response. +func (s *BulkService) Pretty(pretty bool) *BulkService { + s.pretty = pretty + return s +} + +// Add adds bulkable requests, i.e. BulkIndexRequest, BulkUpdateRequest, +// and/or BulkDeleteRequest. +func (s *BulkService) Add(requests ...BulkableRequest) *BulkService { + for _, r := range requests { + s.requests = append(s.requests, r) + } + return s +} + +// EstimatedSizeInBytes returns the estimated size of all bulkable +// requests added via Add. +func (s *BulkService) EstimatedSizeInBytes() int64 { + if s.sizeInBytesCursor == len(s.requests) { + return s.sizeInBytes + } + for _, r := range s.requests[s.sizeInBytesCursor:] { + s.sizeInBytes += s.estimateSizeInBytes(r) + s.sizeInBytesCursor++ + } + return s.sizeInBytes +} + +// estimateSizeInBytes returns the estimates size of the given +// bulkable request, i.e. BulkIndexRequest, BulkUpdateRequest, and +// BulkDeleteRequest. +func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 { + lines, _ := r.Source() + size := 0 + for _, line := range lines { + // +1 for the \n + size += len(line) + 1 + } + return int64(size) +} + +// NumberOfActions returns the number of bulkable requests that need to +// be sent to Elasticsearch on the next batch. +func (s *BulkService) NumberOfActions() int { + return len(s.requests) +} + +func (s *BulkService) bodyAsString() (string, error) { + var buf bytes.Buffer + + for _, req := range s.requests { + source, err := req.Source() + if err != nil { + return "", err + } + for _, line := range source { + buf.WriteString(line) + buf.WriteByte('\n') + } + } + + return buf.String(), nil +} + +// Do sends the batched requests to Elasticsearch. Note that, when successful, +// you can reuse the BulkService for the next batch as the list of bulk +// requests is cleared on success. +func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) { + // No actions? + if s.NumberOfActions() == 0 { + return nil, errors.New("elastic: No bulk actions to commit") + } + + // Get body + body, err := s.bodyAsString() + if err != nil { + return nil, err + } + + // Build url + path := "/" + if len(s.index) > 0 { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": s.index, + }) + if err != nil { + return nil, err + } + path += index + "/" + } + if len(s.typ) > 0 { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": s.typ, + }) + if err != nil { + return nil, err + } + path += typ + "/" + } + path += "_bulk" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.pipeline != "" { + params.Set("pipeline", s.pipeline) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + + // Get response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return results + ret := new(BulkResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + + // Reset so the request can be reused + s.reset() + + return ret, nil +} + +// BulkResponse is a response to a bulk execution. +// +// Example: +// { +// "took":3, +// "errors":false, +// "items":[{ +// "index":{ +// "_index":"index1", +// "_type":"tweet", +// "_id":"1", +// "_version":3, +// "status":201 +// } +// },{ +// "index":{ +// "_index":"index2", +// "_type":"tweet", +// "_id":"2", +// "_version":3, +// "status":200 +// } +// },{ +// "delete":{ +// "_index":"index1", +// "_type":"tweet", +// "_id":"1", +// "_version":4, +// "status":200, +// "found":true +// } +// },{ +// "update":{ +// "_index":"index2", +// "_type":"tweet", +// "_id":"2", +// "_version":4, +// "status":200 +// } +// }] +// } +type BulkResponse struct { + Took int `json:"took,omitempty"` + Errors bool `json:"errors,omitempty"` + Items []map[string]*BulkResponseItem `json:"items,omitempty"` +} + +// BulkResponseItem is the result of a single bulk request. +type BulkResponseItem struct { + Index string `json:"_index,omitempty"` + Type string `json:"_type,omitempty"` + Id string `json:"_id,omitempty"` + Version int `json:"_version,omitempty"` + Status int `json:"status,omitempty"` + Found bool `json:"found,omitempty"` + Error *ErrorDetails `json:"error,omitempty"` +} + +// Indexed returns all bulk request results of "index" actions. +func (r *BulkResponse) Indexed() []*BulkResponseItem { + return r.ByAction("index") +} + +// Created returns all bulk request results of "create" actions. +func (r *BulkResponse) Created() []*BulkResponseItem { + return r.ByAction("create") +} + +// Updated returns all bulk request results of "update" actions. +func (r *BulkResponse) Updated() []*BulkResponseItem { + return r.ByAction("update") +} + +// Deleted returns all bulk request results of "delete" actions. +func (r *BulkResponse) Deleted() []*BulkResponseItem { + return r.ByAction("delete") +} + +// ByAction returns all bulk request results of a certain action, +// e.g. "index" or "delete". +func (r *BulkResponse) ByAction(action string) []*BulkResponseItem { + if r.Items == nil { + return nil + } + var items []*BulkResponseItem + for _, item := range r.Items { + if result, found := item[action]; found { + items = append(items, result) + } + } + return items +} + +// ById returns all bulk request results of a given document id, +// regardless of the action ("index", "delete" etc.). +func (r *BulkResponse) ById(id string) []*BulkResponseItem { + if r.Items == nil { + return nil + } + var items []*BulkResponseItem + for _, item := range r.Items { + for _, result := range item { + if result.Id == id { + items = append(items, result) + } + } + } + return items +} + +// Failed returns those items of a bulk response that have errors, +// i.e. those that don't have a status code between 200 and 299. +func (r *BulkResponse) Failed() []*BulkResponseItem { + if r.Items == nil { + return nil + } + var errors []*BulkResponseItem + for _, item := range r.Items { + for _, result := range item { + if !(result.Status >= 200 && result.Status <= 299) { + errors = append(errors, result) + } + } + } + return errors +} + +// Succeeded returns those items of a bulk response that have no errors, +// i.e. those have a status code between 200 and 299. +func (r *BulkResponse) Succeeded() []*BulkResponseItem { + if r.Items == nil { + return nil + } + var succeeded []*BulkResponseItem + for _, item := range r.Items { + for _, result := range item { + if result.Status >= 200 && result.Status <= 299 { + succeeded = append(succeeded, result) + } + } + } + return succeeded +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go new file mode 100644 index 000000000..c475c6d63 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go @@ -0,0 +1,145 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// -- Bulk delete request -- + +// BulkDeleteRequest is a request to remove a document from Elasticsearch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-bulk.html +// for details. +type BulkDeleteRequest struct { + BulkableRequest + index string + typ string + id string + parent string + routing string + version int64 // default is MATCH_ANY + versionType string // default is "internal" + + source []string +} + +// NewBulkDeleteRequest returns a new BulkDeleteRequest. +func NewBulkDeleteRequest() *BulkDeleteRequest { + return &BulkDeleteRequest{} +} + +// Index specifies the Elasticsearch index to use for this delete request. +// If unspecified, the index set on the BulkService will be used. +func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest { + r.index = index + r.source = nil + return r +} + +// Type specifies the Elasticsearch type to use for this delete request. +// If unspecified, the type set on the BulkService will be used. +func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest { + r.typ = typ + r.source = nil + return r +} + +// Id specifies the identifier of the document to delete. +func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest { + r.id = id + r.source = nil + return r +} + +// Parent specifies the parent of the request, which is used in parent/child +// mappings. +func (r *BulkDeleteRequest) Parent(parent string) *BulkDeleteRequest { + r.parent = parent + r.source = nil + return r +} + +// Routing specifies a routing value for the request. +func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest { + r.routing = routing + r.source = nil + return r +} + +// Version indicates the version to be deleted as part of an optimistic +// concurrency model. +func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest { + r.version = version + r.source = nil + return r +} + +// VersionType can be "internal" (default), "external", "external_gte", +// "external_gt", or "force". +func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest { + r.versionType = versionType + r.source = nil + return r +} + +// String returns the on-wire representation of the delete request, +// concatenated as a single string. +func (r *BulkDeleteRequest) String() string { + lines, err := r.Source() + if err != nil { + return fmt.Sprintf("error: %v", err) + } + return strings.Join(lines, "\n") +} + +// Source returns the on-wire representation of the delete request, +// split into an action-and-meta-data line and an (optional) source line. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// for details. +func (r *BulkDeleteRequest) Source() ([]string, error) { + if r.source != nil { + return r.source, nil + } + lines := make([]string, 1) + + source := make(map[string]interface{}) + deleteCommand := make(map[string]interface{}) + if r.index != "" { + deleteCommand["_index"] = r.index + } + if r.typ != "" { + deleteCommand["_type"] = r.typ + } + if r.id != "" { + deleteCommand["_id"] = r.id + } + if r.parent != "" { + deleteCommand["_parent"] = r.parent + } + if r.routing != "" { + deleteCommand["_routing"] = r.routing + } + if r.version > 0 { + deleteCommand["_version"] = r.version + } + if r.versionType != "" { + deleteCommand["_version_type"] = r.versionType + } + source["delete"] = deleteCommand + + body, err := json.Marshal(source) + if err != nil { + return nil, err + } + + lines[0] = string(body) + r.source = lines + + return lines, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go new file mode 100644 index 000000000..6ac429d8b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go @@ -0,0 +1,68 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestBulkDeleteRequestSerialization(t *testing.T) { + tests := []struct { + Request BulkableRequest + Expected []string + }{ + // #0 + { + Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"), + Expected: []string{ + `{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`, + }, + }, + // #1 + { + Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1").Parent("2"), + Expected: []string{ + `{"delete":{"_id":"1","_index":"index1","_parent":"2","_type":"tweet"}}`, + }, + }, + // #2 + { + Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1").Routing("3"), + Expected: []string{ + `{"delete":{"_id":"1","_index":"index1","_routing":"3","_type":"tweet"}}`, + }, + }, + } + + for i, test := range tests { + lines, err := test.Request.Source() + if err != nil { + t.Fatalf("case #%d: expected no error, got: %v", i, err) + } + if lines == nil { + t.Fatalf("case #%d: expected lines, got nil", i) + } + if len(lines) != len(test.Expected) { + t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) + } + for j, line := range lines { + if line != test.Expected[j] { + t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line) + } + } + } +} + +var bulkDeleteRequestSerializationResult string + +func BenchmarkBulkDeleteRequestSerialization(b *testing.B) { + r := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + var s string + for n := 0; n < b.N; n++ { + s = r.String() + r.source = nil // Don't let caching spoil the benchmark + } + bulkDeleteRequestSerializationResult = s // ensure the compiler doesn't optimize +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go new file mode 100644 index 000000000..6e9e0951f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go @@ -0,0 +1,225 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// BulkIndexRequest is a request to add a document to Elasticsearch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-bulk.html +// for details. +type BulkIndexRequest struct { + BulkableRequest + index string + typ string + id string + opType string + routing string + parent string + version int64 // default is MATCH_ANY + versionType string // default is "internal" + doc interface{} + pipeline string + retryOnConflict *int + ttl string + + source []string +} + +// NewBulkIndexRequest returns a new BulkIndexRequest. +// The operation type is "index" by default. +func NewBulkIndexRequest() *BulkIndexRequest { + return &BulkIndexRequest{ + opType: "index", + } +} + +// Index specifies the Elasticsearch index to use for this index request. +// If unspecified, the index set on the BulkService will be used. +func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest { + r.index = index + r.source = nil + return r +} + +// Type specifies the Elasticsearch type to use for this index request. +// If unspecified, the type set on the BulkService will be used. +func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest { + r.typ = typ + r.source = nil + return r +} + +// Id specifies the identifier of the document to index. +func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest { + r.id = id + r.source = nil + return r +} + +// OpType specifies if this request should follow create-only or upsert +// behavior. This follows the OpType of the standard document index API. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#operation-type +// for details. +func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest { + r.opType = opType + r.source = nil + return r +} + +// Routing specifies a routing value for the request. +func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest { + r.routing = routing + r.source = nil + return r +} + +// Parent specifies the identifier of the parent document (if available). +func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest { + r.parent = parent + r.source = nil + return r +} + +// Version indicates the version of the document as part of an optimistic +// concurrency model. +func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest { + r.version = version + r.source = nil + return r +} + +// VersionType specifies how versions are created. It can be e.g. internal, +// external, external_gte, or force. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning +// for details. +func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest { + r.versionType = versionType + r.source = nil + return r +} + +// Doc specifies the document to index. +func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest { + r.doc = doc + r.source = nil + return r +} + +// RetryOnConflict specifies how often to retry in case of a version conflict. +func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexRequest { + r.retryOnConflict = &retryOnConflict + r.source = nil + return r +} + +// TTL is an expiration time for the document. +func (r *BulkIndexRequest) TTL(ttl string) *BulkIndexRequest { + r.ttl = ttl + r.source = nil + return r +} + +// Pipeline to use while processing the request. +func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest { + r.pipeline = pipeline + r.source = nil + return r +} + +// String returns the on-wire representation of the index request, +// concatenated as a single string. +func (r *BulkIndexRequest) String() string { + lines, err := r.Source() + if err != nil { + return fmt.Sprintf("error: %v", err) + } + return strings.Join(lines, "\n") +} + +// Source returns the on-wire representation of the index request, +// split into an action-and-meta-data line and an (optional) source line. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// for details. +func (r *BulkIndexRequest) Source() ([]string, error) { + // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + // { "field1" : "value1" } + + if r.source != nil { + return r.source, nil + } + + lines := make([]string, 2) + + // "index" ... + command := make(map[string]interface{}) + indexCommand := make(map[string]interface{}) + if r.index != "" { + indexCommand["_index"] = r.index + } + if r.typ != "" { + indexCommand["_type"] = r.typ + } + if r.id != "" { + indexCommand["_id"] = r.id + } + if r.routing != "" { + indexCommand["_routing"] = r.routing + } + if r.parent != "" { + indexCommand["_parent"] = r.parent + } + if r.version > 0 { + indexCommand["_version"] = r.version + } + if r.versionType != "" { + indexCommand["_version_type"] = r.versionType + } + if r.retryOnConflict != nil { + indexCommand["_retry_on_conflict"] = *r.retryOnConflict + } + if r.ttl != "" { + indexCommand["_ttl"] = r.ttl + } + if r.pipeline != "" { + indexCommand["pipeline"] = r.pipeline + } + command[r.opType] = indexCommand + line, err := json.Marshal(command) + if err != nil { + return nil, err + } + lines[0] = string(line) + + // "field1" ... + if r.doc != nil { + switch t := r.doc.(type) { + default: + body, err := json.Marshal(r.doc) + if err != nil { + return nil, err + } + lines[1] = string(body) + case json.RawMessage: + lines[1] = string(t) + case *json.RawMessage: + lines[1] = string(*t) + case string: + lines[1] = t + case *string: + lines[1] = *t + } + } else { + lines[1] = "{}" + } + + r.source = lines + return lines, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go new file mode 100644 index 000000000..fe95bd65c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go @@ -0,0 +1,103 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + "time" +) + +func TestBulkIndexRequestSerialization(t *testing.T) { + tests := []struct { + Request BulkableRequest + Expected []string + }{ + // #0 + { + Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #1 + { + Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #2 + { + Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #3 + { + Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").RetryOnConflict(42). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_retry_on_conflict":42,"_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #4 + { + Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").Pipeline("my_pipeline"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_type":"tweet","pipeline":"my_pipeline"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #5 + { + Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").TTL("1m"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_ttl":"1m","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + } + + for i, test := range tests { + lines, err := test.Request.Source() + if err != nil { + t.Fatalf("case #%d: expected no error, got: %v", i, err) + } + if lines == nil { + t.Fatalf("case #%d: expected lines, got nil", i) + } + if len(lines) != len(test.Expected) { + t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) + } + for j, line := range lines { + if line != test.Expected[j] { + t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line) + } + } + } +} + +var bulkIndexRequestSerializationResult string + +func BenchmarkBulkIndexRequestSerialization(b *testing.B) { + r := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}) + var s string + for n := 0; n < b.N; n++ { + s = r.String() + r.source = nil // Don't let caching spoil the benchmark + } + bulkIndexRequestSerializationResult = s // ensure the compiler doesn't optimize +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go new file mode 100644 index 000000000..b69d9b89c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go @@ -0,0 +1,543 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/backoff" +) + +// BulkProcessorService allows to easily process bulk requests. It allows setting +// policies when to flush new bulk requests, e.g. based on a number of actions, +// on the size of the actions, and/or to flush periodically. It also allows +// to control the number of concurrent bulk requests allowed to be executed +// in parallel. +// +// BulkProcessorService, by default, commits either every 1000 requests or when the +// (estimated) size of the bulk requests exceeds 5 MB. However, it does not +// commit periodically. BulkProcessorService also does retry by default, using +// an exponential backoff algorithm. +// +// The caller is responsible for setting the index and type on every +// bulk request added to BulkProcessorService. +// +// BulkProcessorService takes ideas from the BulkProcessor of the +// Elasticsearch Java API as documented in +// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html. +type BulkProcessorService struct { + c *Client + beforeFn BulkBeforeFunc + afterFn BulkAfterFunc + name string // name of processor + numWorkers int // # of workers (>= 1) + bulkActions int // # of requests after which to commit + bulkSize int // # of bytes after which to commit + flushInterval time.Duration // periodic flush interval + wantStats bool // indicates whether to gather statistics + initialTimeout time.Duration // initial wait time before retry on errors + maxTimeout time.Duration // max time to wait for retry on errors +} + +// NewBulkProcessorService creates a new BulkProcessorService. +func NewBulkProcessorService(client *Client) *BulkProcessorService { + return &BulkProcessorService{ + c: client, + numWorkers: 1, + bulkActions: 1000, + bulkSize: 5 << 20, // 5 MB + initialTimeout: time.Duration(200) * time.Millisecond, + maxTimeout: time.Duration(10000) * time.Millisecond, + } +} + +// BulkBeforeFunc defines the signature of callbacks that are executed +// before a commit to Elasticsearch. +type BulkBeforeFunc func(executionId int64, requests []BulkableRequest) + +// BulkAfterFunc defines the signature of callbacks that are executed +// after a commit to Elasticsearch. The err parameter signals an error. +type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) + +// Before specifies a function to be executed before bulk requests get comitted +// to Elasticsearch. +func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService { + s.beforeFn = fn + return s +} + +// After specifies a function to be executed when bulk requests have been +// comitted to Elasticsearch. The After callback executes both when the +// commit was successful as well as on failures. +func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService { + s.afterFn = fn + return s +} + +// Name is an optional name to identify this bulk processor. +func (s *BulkProcessorService) Name(name string) *BulkProcessorService { + s.name = name + return s +} + +// Workers is the number of concurrent workers allowed to be +// executed. Defaults to 1 and must be greater or equal to 1. +func (s *BulkProcessorService) Workers(num int) *BulkProcessorService { + s.numWorkers = num + return s +} + +// BulkActions specifies when to flush based on the number of actions +// currently added. Defaults to 1000 and can be set to -1 to be disabled. +func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService { + s.bulkActions = bulkActions + return s +} + +// BulkSize specifies when to flush based on the size (in bytes) of the actions +// currently added. Defaults to 5 MB and can be set to -1 to be disabled. +func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService { + s.bulkSize = bulkSize + return s +} + +// FlushInterval specifies when to flush at the end of the given interval. +// This is disabled by default. If you want the bulk processor to +// operate completely asynchronously, set both BulkActions and BulkSize to +// -1 and set the FlushInterval to a meaningful interval. +func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService { + s.flushInterval = interval + return s +} + +// Stats tells bulk processor to gather stats while running. +// Use Stats to return the stats. This is disabled by default. +func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService { + s.wantStats = wantStats + return s +} + +// Do creates a new BulkProcessor and starts it. +// Consider the BulkProcessor as a running instance that accepts bulk requests +// and commits them to Elasticsearch, spreading the work across one or more +// workers. +// +// You can interoperate with the BulkProcessor returned by Do, e.g. Start and +// Stop (or Close) it. +// +// Calling Do several times returns new BulkProcessors. You probably don't +// want to do this. BulkProcessorService implements just a builder pattern. +func (s *BulkProcessorService) Do() (*BulkProcessor, error) { + p := newBulkProcessor( + s.c, + s.beforeFn, + s.afterFn, + s.name, + s.numWorkers, + s.bulkActions, + s.bulkSize, + s.flushInterval, + s.wantStats, + s.initialTimeout, + s.maxTimeout) + + err := p.Start() + if err != nil { + return nil, err + } + return p, nil +} + +// -- Bulk Processor Statistics -- + +// BulkProcessorStats contains various statistics of a bulk processor +// while it is running. Use the Stats func to return it while running. +type BulkProcessorStats struct { + Flushed int64 // number of times the flush interval has been invoked + Committed int64 // # of times workers committed bulk requests + Indexed int64 // # of requests indexed + Created int64 // # of requests that ES reported as creates (201) + Updated int64 // # of requests that ES reported as updates + Deleted int64 // # of requests that ES reported as deletes + Succeeded int64 // # of requests that ES reported as successful + Failed int64 // # of requests that ES reported as failed + + Workers []*BulkProcessorWorkerStats // stats for each worker +} + +// BulkProcessorWorkerStats represents per-worker statistics. +type BulkProcessorWorkerStats struct { + Queued int64 // # of requests queued in this worker + LastDuration time.Duration // duration of last commit +} + +// newBulkProcessorStats initializes and returns a BulkProcessorStats struct. +func newBulkProcessorStats(workers int) *BulkProcessorStats { + stats := &BulkProcessorStats{ + Workers: make([]*BulkProcessorWorkerStats, workers), + } + for i := 0; i < workers; i++ { + stats.Workers[i] = &BulkProcessorWorkerStats{} + } + return stats +} + +func (st *BulkProcessorStats) dup() *BulkProcessorStats { + dst := new(BulkProcessorStats) + dst.Flushed = st.Flushed + dst.Committed = st.Committed + dst.Indexed = st.Indexed + dst.Created = st.Created + dst.Updated = st.Updated + dst.Deleted = st.Deleted + dst.Succeeded = st.Succeeded + dst.Failed = st.Failed + for _, src := range st.Workers { + dst.Workers = append(dst.Workers, src.dup()) + } + return dst +} + +func (st *BulkProcessorWorkerStats) dup() *BulkProcessorWorkerStats { + dst := new(BulkProcessorWorkerStats) + dst.Queued = st.Queued + dst.LastDuration = st.LastDuration + return dst +} + +// -- Bulk Processor -- + +// BulkProcessor encapsulates a task that accepts bulk requests and +// orchestrates committing them to Elasticsearch via one or more workers. +// +// BulkProcessor is returned by setting up a BulkProcessorService and +// calling the Do method. +type BulkProcessor struct { + c *Client + beforeFn BulkBeforeFunc + afterFn BulkAfterFunc + name string + bulkActions int + bulkSize int + numWorkers int + executionId int64 + requestsC chan BulkableRequest + workerWg sync.WaitGroup + workers []*bulkWorker + flushInterval time.Duration + flusherStopC chan struct{} + wantStats bool + initialTimeout time.Duration // initial wait time before retry on errors + maxTimeout time.Duration // max time to wait for retry on errors + + startedMu sync.Mutex // guards the following block + started bool + + statsMu sync.Mutex // guards the following block + stats *BulkProcessorStats +} + +func newBulkProcessor( + client *Client, + beforeFn BulkBeforeFunc, + afterFn BulkAfterFunc, + name string, + numWorkers int, + bulkActions int, + bulkSize int, + flushInterval time.Duration, + wantStats bool, + initialTimeout time.Duration, + maxTimeout time.Duration) *BulkProcessor { + return &BulkProcessor{ + c: client, + beforeFn: beforeFn, + afterFn: afterFn, + name: name, + numWorkers: numWorkers, + bulkActions: bulkActions, + bulkSize: bulkSize, + flushInterval: flushInterval, + wantStats: wantStats, + initialTimeout: initialTimeout, + maxTimeout: maxTimeout, + } +} + +// Start starts the bulk processor. If the processor is already started, +// nil is returned. +func (p *BulkProcessor) Start() error { + p.startedMu.Lock() + defer p.startedMu.Unlock() + + if p.started { + return nil + } + + // We must have at least one worker. + if p.numWorkers < 1 { + p.numWorkers = 1 + } + + p.requestsC = make(chan BulkableRequest) + p.executionId = 0 + p.stats = newBulkProcessorStats(p.numWorkers) + + // Create and start up workers. + p.workers = make([]*bulkWorker, p.numWorkers) + for i := 0; i < p.numWorkers; i++ { + p.workerWg.Add(1) + p.workers[i] = newBulkWorker(p, i) + go p.workers[i].work() + } + + // Start the ticker for flush (if enabled) + if int64(p.flushInterval) > 0 { + p.flusherStopC = make(chan struct{}) + go p.flusher(p.flushInterval) + } + + p.started = true + + return nil +} + +// Stop is an alias for Close. +func (p *BulkProcessor) Stop() error { + return p.Close() +} + +// Close stops the bulk processor previously started with Do. +// If it is already stopped, this is a no-op and nil is returned. +// +// By implementing Close, BulkProcessor implements the io.Closer interface. +func (p *BulkProcessor) Close() error { + p.startedMu.Lock() + defer p.startedMu.Unlock() + + // Already stopped? Do nothing. + if !p.started { + return nil + } + + // Stop flusher (if enabled) + if p.flusherStopC != nil { + p.flusherStopC <- struct{}{} + <-p.flusherStopC + close(p.flusherStopC) + p.flusherStopC = nil + } + + // Stop all workers. + close(p.requestsC) + p.workerWg.Wait() + + p.started = false + + return nil +} + +// Stats returns the latest bulk processor statistics. +// Collecting stats must be enabled first by calling Stats(true) on +// the service that created this processor. +func (p *BulkProcessor) Stats() BulkProcessorStats { + p.statsMu.Lock() + defer p.statsMu.Unlock() + return *p.stats.dup() +} + +// Add adds a single request to commit by the BulkProcessorService. +// +// The caller is responsible for setting the index and type on the request. +func (p *BulkProcessor) Add(request BulkableRequest) { + p.requestsC <- request +} + +// Flush manually asks all workers to commit their outstanding requests. +// It returns only when all workers acknowledge completion. +func (p *BulkProcessor) Flush() error { + p.statsMu.Lock() + p.stats.Flushed++ + p.statsMu.Unlock() + + for _, w := range p.workers { + w.flushC <- struct{}{} + <-w.flushAckC // wait for completion + } + return nil +} + +// flusher is a single goroutine that periodically asks all workers to +// commit their outstanding bulk requests. It is only started if +// FlushInterval is greater than 0. +func (p *BulkProcessor) flusher(interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: // Periodic flush + p.Flush() // TODO swallow errors here? + + case <-p.flusherStopC: + p.flusherStopC <- struct{}{} + return + } + } +} + +// -- Bulk Worker -- + +// bulkWorker encapsulates a single worker, running in a goroutine, +// receiving bulk requests and eventually committing them to Elasticsearch. +// It is strongly bound to a BulkProcessor. +type bulkWorker struct { + p *BulkProcessor + i int + bulkActions int + bulkSize int + service *BulkService + flushC chan struct{} + flushAckC chan struct{} +} + +// newBulkWorker creates a new bulkWorker instance. +func newBulkWorker(p *BulkProcessor, i int) *bulkWorker { + return &bulkWorker{ + p: p, + i: i, + bulkActions: p.bulkActions, + bulkSize: p.bulkSize, + service: NewBulkService(p.c), + flushC: make(chan struct{}), + flushAckC: make(chan struct{}), + } +} + +// work waits for bulk requests and manual flush calls on the respective +// channels and is invoked as a goroutine when the bulk processor is started. +func (w *bulkWorker) work() { + defer func() { + w.p.workerWg.Done() + close(w.flushAckC) + close(w.flushC) + }() + + var stop bool + for !stop { + select { + case req, open := <-w.p.requestsC: + if open { + // Received a new request + w.service.Add(req) + if w.commitRequired() { + w.commit() // TODO swallow errors here? + } + } else { + // Channel closed: Stop. + stop = true + if w.service.NumberOfActions() > 0 { + w.commit() // TODO swallow errors here? + } + } + + case <-w.flushC: + // Commit outstanding requests + if w.service.NumberOfActions() > 0 { + w.commit() // TODO swallow errors here? + } + w.flushAckC <- struct{}{} + } + } +} + +// commit commits the bulk requests in the given service, +// invoking callbacks as specified. +func (w *bulkWorker) commit() error { + var res *BulkResponse + + // commitFunc will commit bulk requests and, on failure, be retried + // via exponential backoff + commitFunc := func() error { + var err error + res, err = w.service.Do(context.Background()) + return err + } + // notifyFunc will be called if retry fails + notifyFunc := func(err error, d time.Duration) { + w.p.c.errorf("elastic: bulk processor %q failed but will retry in %v: %v", w.p.name, d, err) + } + + id := atomic.AddInt64(&w.p.executionId, 1) + + // Update # documents in queue before eventual retries + w.p.statsMu.Lock() + if w.p.wantStats { + w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) + } + w.p.statsMu.Unlock() + + // Save requests because they will be reset in commitFunc + reqs := w.service.requests + + // Invoke before callback + if w.p.beforeFn != nil { + w.p.beforeFn(id, reqs) + } + + // Commit bulk requests + policy := backoff.NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout).SendStop(true) + err := backoff.RetryNotify(commitFunc, policy, notifyFunc) + w.updateStats(res) + if err != nil { + w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err) + } + + // Invoke after callback + if w.p.afterFn != nil { + w.p.afterFn(id, reqs, res, err) + } + + return err +} + +func (w *bulkWorker) updateStats(res *BulkResponse) { + // Update stats + if res != nil { + w.p.statsMu.Lock() + if w.p.wantStats { + w.p.stats.Committed++ + if res != nil { + w.p.stats.Indexed += int64(len(res.Indexed())) + w.p.stats.Created += int64(len(res.Created())) + w.p.stats.Updated += int64(len(res.Updated())) + w.p.stats.Deleted += int64(len(res.Deleted())) + w.p.stats.Succeeded += int64(len(res.Succeeded())) + w.p.stats.Failed += int64(len(res.Failed())) + } + w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) + w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond + } + w.p.statsMu.Unlock() + } +} + +// commitRequired returns true if the service has to commit its +// bulk requests. This can be either because the number of actions +// or the estimated size in bytes is larger than specified in the +// BulkProcessorService. +func (w *bulkWorker) commitRequired() bool { + if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions { + return true + } + if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) { + return true + } + return false +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go new file mode 100644 index 000000000..89e096322 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go @@ -0,0 +1,423 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "math/rand" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestBulkProcessorDefaults(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + p := client.BulkProcessor() + if p == nil { + t.Fatalf("expected BulkProcessorService; got: %v", p) + } + if got, want := p.name, ""; got != want { + t.Errorf("expected %q; got: %q", want, got) + } + if got, want := p.numWorkers, 1; got != want { + t.Errorf("expected %d; got: %d", want, got) + } + if got, want := p.bulkActions, 1000; got != want { + t.Errorf("expected %d; got: %d", want, got) + } + if got, want := p.bulkSize, 5*1024*1024; got != want { + t.Errorf("expected %d; got: %d", want, got) + } + if got, want := p.flushInterval, time.Duration(0); got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := p.wantStats, false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestBulkProcessorCommitOnBulkActions(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Actions-1"). + Workers(1). + BulkActions(100). + BulkSize(-1), + ) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Actions-2"). + Workers(2). + BulkActions(100). + BulkSize(-1), + ) +} + +func TestBulkProcessorCommitOnBulkSize(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Size-1"). + Workers(1). + BulkActions(-1). + BulkSize(64*1024), + ) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Size-2"). + Workers(2). + BulkActions(-1). + BulkSize(64*1024), + ) +} + +func TestBulkProcessorBasedOnFlushInterval(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + var beforeRequests int64 + var befores int64 + var afters int64 + var failures int64 + var afterRequests int64 + + beforeFn := func(executionId int64, requests []BulkableRequest) { + atomic.AddInt64(&beforeRequests, int64(len(requests))) + atomic.AddInt64(&befores, 1) + } + afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { + atomic.AddInt64(&afters, 1) + if err != nil { + atomic.AddInt64(&failures, 1) + } + atomic.AddInt64(&afterRequests, int64(len(requests))) + } + + svc := client.BulkProcessor(). + Name("FlushInterval-1"). + Workers(2). + BulkActions(-1). + BulkSize(-1). + FlushInterval(1 * time.Second). + Before(beforeFn). + After(afterFn) + + p, err := svc.Do() + if err != nil { + t.Fatal(err) + } + + const numDocs = 1000 // low-enough number that flush should be invoked + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + // Should flush at least once + time.Sleep(2 * time.Second) + + err = p.Close() + if err != nil { + t.Fatal(err) + } + + if p.stats.Flushed == 0 { + t.Errorf("expected at least 1 flush; got: %d", p.stats.Flushed) + } + if got, want := beforeRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to before callback; got: %d", want, got) + } + if got, want := afterRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to after callback; got: %d", want, got) + } + if befores == 0 { + t.Error("expected at least 1 call to before callback") + } + if afters == 0 { + t.Error("expected at least 1 call to after callback") + } + if failures != 0 { + t.Errorf("expected 0 calls to failure callback; got: %d", failures) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} + +func TestBulkProcessorClose(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + var beforeRequests int64 + var befores int64 + var afters int64 + var failures int64 + var afterRequests int64 + + beforeFn := func(executionId int64, requests []BulkableRequest) { + atomic.AddInt64(&beforeRequests, int64(len(requests))) + atomic.AddInt64(&befores, 1) + } + afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { + atomic.AddInt64(&afters, 1) + if err != nil { + atomic.AddInt64(&failures, 1) + } + atomic.AddInt64(&afterRequests, int64(len(requests))) + } + + p, err := client.BulkProcessor(). + Name("FlushInterval-1"). + Workers(2). + BulkActions(-1). + BulkSize(-1). + FlushInterval(30 * time.Second). // 30 seconds to flush + Before(beforeFn).After(afterFn). + Do() + if err != nil { + t.Fatal(err) + } + + const numDocs = 1000 // low-enough number that flush should be invoked + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + // Should not flush because 30s > 1s + time.Sleep(1 * time.Second) + + // Close should flush + err = p.Close() + if err != nil { + t.Fatal(err) + } + + if p.stats.Flushed != 0 { + t.Errorf("expected no flush; got: %d", p.stats.Flushed) + } + if got, want := beforeRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to before callback; got: %d", want, got) + } + if got, want := afterRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to after callback; got: %d", want, got) + } + if befores == 0 { + t.Error("expected at least 1 call to before callback") + } + if afters == 0 { + t.Error("expected at least 1 call to after callback") + } + if failures != 0 { + t.Errorf("expected 0 calls to failure callback; got: %d", failures) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} + +func TestBulkProcessorFlush(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + p, err := client.BulkProcessor(). + Name("ManualFlush"). + Workers(10). + BulkActions(-1). + BulkSize(-1). + FlushInterval(30 * time.Second). // 30 seconds to flush + Stats(true). + Do() + if err != nil { + t.Fatal(err) + } + + const numDocs = 100 + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + // Should not flush because 30s > 1s + time.Sleep(1 * time.Second) + + // No flush yet + stats := p.Stats() + if stats.Flushed != 0 { + t.Errorf("expected no flush; got: %d", p.stats.Flushed) + } + + // Manual flush + err = p.Flush() + if err != nil { + t.Fatal(err) + } + + time.Sleep(1 * time.Second) + + // Now flushed + stats = p.Stats() + if got, want := p.stats.Flushed, int64(1); got != want { + t.Errorf("expected %d flush; got: %d", want, got) + } + + // Close should not start another flush + err = p.Close() + if err != nil { + t.Fatal(err) + } + + // Still 1 flush + stats = p.Stats() + if got, want := p.stats.Flushed, int64(1); got != want { + t.Errorf("expected %d flush; got: %d", want, got) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} + +// -- Helper -- + +func testBulkProcessor(t *testing.T, numDocs int, svc *BulkProcessorService) { + var beforeRequests int64 + var befores int64 + var afters int64 + var failures int64 + var afterRequests int64 + + beforeFn := func(executionId int64, requests []BulkableRequest) { + atomic.AddInt64(&beforeRequests, int64(len(requests))) + atomic.AddInt64(&befores, 1) + } + afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { + atomic.AddInt64(&afters, 1) + if err != nil { + atomic.AddInt64(&failures, 1) + } + atomic.AddInt64(&afterRequests, int64(len(requests))) + } + + p, err := svc.Before(beforeFn).After(afterFn).Stats(true).Do() + if err != nil { + t.Fatal(err) + } + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%07d. %s", i, randomString(1+rand.Intn(63)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + err = p.Close() + if err != nil { + t.Fatal(err) + } + + stats := p.Stats() + + if stats.Flushed != 0 { + t.Errorf("expected no flush; got: %d", stats.Flushed) + } + if stats.Committed <= 0 { + t.Errorf("expected committed > %d; got: %d", 0, stats.Committed) + } + if got, want := stats.Indexed, int64(numDocs); got != want { + t.Errorf("expected indexed = %d; got: %d", want, got) + } + if got, want := stats.Created, int64(0); got != want { + t.Errorf("expected created = %d; got: %d", want, got) + } + if got, want := stats.Updated, int64(0); got != want { + t.Errorf("expected updated = %d; got: %d", want, got) + } + if got, want := stats.Deleted, int64(0); got != want { + t.Errorf("expected deleted = %d; got: %d", want, got) + } + if got, want := stats.Succeeded, int64(numDocs); got != want { + t.Errorf("expected succeeded = %d; got: %d", want, got) + } + if got, want := stats.Failed, int64(0); got != want { + t.Errorf("expected failed = %d; got: %d", want, got) + } + if got, want := beforeRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to before callback; got: %d", want, got) + } + if got, want := afterRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to after callback; got: %d", want, got) + } + if befores == 0 { + t.Error("expected at least 1 call to before callback") + } + if afters == 0 { + t.Error("expected at least 1 call to after callback") + } + if failures != 0 { + t.Errorf("expected 0 calls to failure callback; got: %d", failures) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_request.go new file mode 100644 index 000000000..ce3bf0768 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_request.go @@ -0,0 +1,17 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// -- Bulkable request (index/update/delete) -- + +// BulkableRequest is a generic interface to bulkable requests. +type BulkableRequest interface { + fmt.Stringer + Source() ([]string, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_test.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_test.go new file mode 100644 index 000000000..5a57871cb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_test.go @@ -0,0 +1,508 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestBulk(t *testing.T) { + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + + bulkRequest := client.Bulk() + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + + if bulkRequest.NumberOfActions() != 3 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions()) + } + + bulkResponse, err := bulkRequest.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + if bulkRequest.NumberOfActions() != 0 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) + } + + // Document with Id="1" should not exist + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } + + // Document with Id="2" should exist + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } + + // Update + updateDoc := struct { + Retweets int `json:"retweets"` + }{ + 42, + } + update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").Doc(&updateDoc) + bulkRequest = client.Bulk() + bulkRequest = bulkRequest.Add(update1Req) + + if bulkRequest.NumberOfActions() != 1 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions()) + } + + bulkResponse, err = bulkRequest.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + if bulkRequest.NumberOfActions() != 0 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) + } + + // Document with Id="1" should have a retweets count of 42 + doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if doc == nil { + t.Fatal("expected doc to be != nil; got nil") + } + if !doc.Found { + t.Fatalf("expected doc to be found; got found = %v", doc.Found) + } + if doc.Source == nil { + t.Fatal("expected doc source to be != nil; got nil") + } + var updatedTweet tweet + err = json.Unmarshal(*doc.Source, &updatedTweet) + if err != nil { + t.Fatal(err) + } + if updatedTweet.Retweets != 42 { + t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets) + } + + // Update with script + update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). + RetryOnConflict(3). + Script(NewScript("ctx._source.retweets += params.v").Param("v", 1)) + bulkRequest = client.Bulk() + bulkRequest = bulkRequest.Add(update2Req) + if bulkRequest.NumberOfActions() != 1 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions()) + } + bulkResponse, err = bulkRequest.Refresh("wait_for").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + if bulkRequest.NumberOfActions() != 0 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) + } + + // Document with Id="1" should have a retweets count of 43 + doc, err = client.Get().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if doc == nil { + t.Fatal("expected doc to be != nil; got nil") + } + if !doc.Found { + t.Fatalf("expected doc to be found; got found = %v", doc.Found) + } + if doc.Source == nil { + t.Fatal("expected doc source to be != nil; got nil") + } + err = json.Unmarshal(*doc.Source, &updatedTweet) + if err != nil { + t.Fatal(err) + } + if updatedTweet.Retweets != 43 { + t.Errorf("expected updated tweet retweets = %v; got %v", 43, updatedTweet.Retweets) + } +} + +func TestBulkWithIndexSetOnClient(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + + bulkRequest := client.Bulk().Index(testIndexName).Type("tweet") + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + + if bulkRequest.NumberOfActions() != 3 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions()) + } + + bulkResponse, err := bulkRequest.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + // Document with Id="1" should not exist + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } + + // Document with Id="2" should exist + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } +} + +func TestBulkRequestsSerialization(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). + Doc(struct { + Retweets int `json:"retweets"` + }{ + Retweets: 42, + }) + + bulkRequest := client.Bulk() + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + bulkRequest = bulkRequest.Add(update2Req) + + if bulkRequest.NumberOfActions() != 4 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions()) + } + + expected := `{"index":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}} +{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"} +{"create":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}} +{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"} +{"delete":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}} +{"update":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}} +{"doc":{"retweets":42}} +` + got, err := bulkRequest.bodyAsString() + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } + + // Run the bulk request + bulkResponse, err := bulkRequest.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + if bulkResponse.Took == 0 { + t.Errorf("expected took to be > 0; got %d", bulkResponse.Took) + } + if bulkResponse.Errors { + t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors) + } + if len(bulkResponse.Items) != 4 { + t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items)) + } + + // Indexed actions + indexed := bulkResponse.Indexed() + if indexed == nil { + t.Fatal("expected indexed to be != nil; got nil") + } + if len(indexed) != 1 { + t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed)) + } + if indexed[0].Id != "1" { + t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id) + } + if indexed[0].Status != 201 { + t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status) + } + + // Created actions + created := bulkResponse.Created() + if created == nil { + t.Fatal("expected created to be != nil; got nil") + } + if len(created) != 1 { + t.Fatalf("expected len(created) == %d; got %d", 1, len(created)) + } + if created[0].Id != "2" { + t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id) + } + if created[0].Status != 201 { + t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status) + } + + // Deleted actions + deleted := bulkResponse.Deleted() + if deleted == nil { + t.Fatal("expected deleted to be != nil; got nil") + } + if len(deleted) != 1 { + t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted)) + } + if deleted[0].Id != "1" { + t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id) + } + if deleted[0].Status != 200 { + t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status) + } + if !deleted[0].Found { + t.Errorf("expected deleted[0].Found == %v; got %v", true, deleted[0].Found) + } + + // Updated actions + updated := bulkResponse.Updated() + if updated == nil { + t.Fatal("expected updated to be != nil; got nil") + } + if len(updated) != 1 { + t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated)) + } + if updated[0].Id != "2" { + t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id) + } + if updated[0].Status != 200 { + t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status) + } + if updated[0].Version != 2 { + t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version) + } + + // Succeeded actions + succeeded := bulkResponse.Succeeded() + if succeeded == nil { + t.Fatal("expected succeeded to be != nil; got nil") + } + if len(succeeded) != 4 { + t.Fatalf("expected len(succeeded) == %d; got %d", 4, len(succeeded)) + } + + // ById + id1Results := bulkResponse.ById("1") + if id1Results == nil { + t.Fatal("expected id1Results to be != nil; got nil") + } + if len(id1Results) != 2 { + t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results)) + } + if id1Results[0].Id != "1" { + t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id) + } + if id1Results[0].Status != 201 { + t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status) + } + if id1Results[0].Version != 1 { + t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version) + } + if id1Results[1].Id != "1" { + t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id) + } + if id1Results[1].Status != 200 { + t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status) + } + if id1Results[1].Version != 2 { + t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version) + } +} + +func TestFailedBulkRequests(t *testing.T) { + js := `{ + "took" : 2, + "errors" : true, + "items" : [ { + "index" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "1", + "_version" : 1, + "status" : 201 + } + }, { + "create" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "2", + "_version" : 1, + "status" : 423, + "error" : { + "type":"routing_missing_exception", + "reason":"routing is required for [elastic-test2]/[comment]/[1]" + } + } + }, { + "delete" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "1", + "_version" : 2, + "status" : 404, + "found" : false + } + }, { + "update" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "2", + "_version" : 2, + "status" : 200 + } + } ] +}` + + var resp BulkResponse + err := json.Unmarshal([]byte(js), &resp) + if err != nil { + t.Fatal(err) + } + failed := resp.Failed() + if len(failed) != 2 { + t.Errorf("expected %d failed items; got: %d", 2, len(failed)) + } +} + +func TestBulkEstimatedSizeInBytes(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). + Doc(struct { + Retweets int `json:"retweets"` + }{ + Retweets: 42, + }) + + bulkRequest := client.Bulk() + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + bulkRequest = bulkRequest.Add(update2Req) + + if bulkRequest.NumberOfActions() != 4 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions()) + } + + // The estimated size of the bulk request in bytes must be at least + // the length of the body request. + raw, err := bulkRequest.bodyAsString() + if err != nil { + t.Fatal(err) + } + rawlen := int64(len([]byte(raw))) + + if got, want := bulkRequest.EstimatedSizeInBytes(), rawlen; got < want { + t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got) + } + + // Reset should also reset the calculated estimated byte size + bulkRequest.reset() + + if got, want := bulkRequest.EstimatedSizeInBytes(), int64(0); got != want { + t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got) + } +} + +func TestBulkEstimateSizeInBytesLength(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + s := client.Bulk() + r := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + s = s.Add(r) + if got, want := s.estimateSizeInBytes(r), int64(1+len(r.String())); got != want { + t.Fatalf("expected %d; got: %d", want, got) + } +} + +var benchmarkBulkEstimatedSizeInBytes int64 + +func BenchmarkBulkEstimatedSizeInBytesWith1Request(b *testing.B) { + client := setupTestClientAndCreateIndex(b) + s := client.Bulk() + var result int64 + for n := 0; n < b.N; n++ { + s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"1"})) + s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"2"})) + s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")) + result = s.EstimatedSizeInBytes() + s.reset() + } + b.ReportAllocs() + benchmarkBulkEstimatedSizeInBytes = result // ensure the compiler doesn't optimize +} + +func BenchmarkBulkEstimatedSizeInBytesWith100Requests(b *testing.B) { + client := setupTestClientAndCreateIndex(b) + s := client.Bulk() + var result int64 + for n := 0; n < b.N; n++ { + for i := 0; i < 100; i++ { + s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"1"})) + s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"2"})) + s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")) + } + result = s.EstimatedSizeInBytes() + s.reset() + } + b.ReportAllocs() + benchmarkBulkEstimatedSizeInBytes = result // ensure the compiler doesn't optimize +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go new file mode 100644 index 000000000..7f77eb3db --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go @@ -0,0 +1,243 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// BulkUpdateRequest is a request to update a document in Elasticsearch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-bulk.html +// for details. +type BulkUpdateRequest struct { + BulkableRequest + index string + typ string + id string + + routing string + parent string + script *Script + version int64 // default is MATCH_ANY + versionType string // default is "internal" + retryOnConflict *int + upsert interface{} + docAsUpsert *bool + doc interface{} + + source []string +} + +// NewBulkUpdateRequest returns a new BulkUpdateRequest. +func NewBulkUpdateRequest() *BulkUpdateRequest { + return &BulkUpdateRequest{} +} + +// Index specifies the Elasticsearch index to use for this update request. +// If unspecified, the index set on the BulkService will be used. +func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest { + r.index = index + r.source = nil + return r +} + +// Type specifies the Elasticsearch type to use for this update request. +// If unspecified, the type set on the BulkService will be used. +func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest { + r.typ = typ + r.source = nil + return r +} + +// Id specifies the identifier of the document to update. +func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest { + r.id = id + r.source = nil + return r +} + +// Routing specifies a routing value for the request. +func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest { + r.routing = routing + r.source = nil + return r +} + +// Parent specifies the identifier of the parent document (if available). +func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest { + r.parent = parent + r.source = nil + return r +} + +// Script specifies an update script. +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-bulk.html#bulk-update +// and https://www.elastic.co/guide/en/elasticsearch/reference/2.x/modules-scripting.html +// for details. +func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest { + r.script = script + r.source = nil + return r +} + +// RetryOnConflict specifies how often to retry in case of a version conflict. +func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest { + r.retryOnConflict = &retryOnConflict + r.source = nil + return r +} + +// Version indicates the version of the document as part of an optimistic +// concurrency model. +func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest { + r.version = version + r.source = nil + return r +} + +// VersionType can be "internal" (default), "external", "external_gte", +// "external_gt", or "force". +func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest { + r.versionType = versionType + r.source = nil + return r +} + +// Doc specifies the updated document. +func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest { + r.doc = doc + r.source = nil + return r +} + +// DocAsUpsert indicates whether the contents of Doc should be used as +// the Upsert value. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/docs-update.html#_literal_doc_as_upsert_literal +// for details. +func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest { + r.docAsUpsert = &docAsUpsert + r.source = nil + return r +} + +// Upsert specifies the document to use for upserts. It will be used for +// create if the original document does not exist. +func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest { + r.upsert = doc + r.source = nil + return r +} + +// String returns the on-wire representation of the update request, +// concatenated as a single string. +func (r *BulkUpdateRequest) String() string { + lines, err := r.Source() + if err != nil { + return fmt.Sprintf("error: %v", err) + } + return strings.Join(lines, "\n") +} + +func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) { + switch t := data.(type) { + default: + body, err := json.Marshal(data) + if err != nil { + return "", err + } + return string(body), nil + case json.RawMessage: + return string(t), nil + case *json.RawMessage: + return string(*t), nil + case string: + return t, nil + case *string: + return *t, nil + } +} + +// Source returns the on-wire representation of the update request, +// split into an action-and-meta-data line and an (optional) source line. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +// for details. +func (r BulkUpdateRequest) Source() ([]string, error) { + // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } + // { "doc" : { "field1" : "value1", ... } } + // or + // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } + // { "script" : { ... } } + + if r.source != nil { + return r.source, nil + } + + lines := make([]string, 2) + + // "update" ... + command := make(map[string]interface{}) + updateCommand := make(map[string]interface{}) + if r.index != "" { + updateCommand["_index"] = r.index + } + if r.typ != "" { + updateCommand["_type"] = r.typ + } + if r.id != "" { + updateCommand["_id"] = r.id + } + if r.routing != "" { + updateCommand["_routing"] = r.routing + } + if r.parent != "" { + updateCommand["_parent"] = r.parent + } + if r.version > 0 { + updateCommand["_version"] = r.version + } + if r.versionType != "" { + updateCommand["_version_type"] = r.versionType + } + if r.retryOnConflict != nil { + updateCommand["_retry_on_conflict"] = *r.retryOnConflict + } + command["update"] = updateCommand + line, err := json.Marshal(command) + if err != nil { + return nil, err + } + lines[0] = string(line) + + // 2nd line: {"doc" : { ... }} or {"script": {...}} + source := make(map[string]interface{}) + if r.docAsUpsert != nil { + source["doc_as_upsert"] = *r.docAsUpsert + } + if r.upsert != nil { + source["upsert"] = r.upsert + } + if r.doc != nil { + // {"doc":{...}} + source["doc"] = r.doc + } else if r.script != nil { + // {"script":...} + src, err := r.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + lines[1], err = r.getSourceAsString(source) + if err != nil { + return nil, err + } + + r.source = lines + return lines, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go new file mode 100644 index 000000000..b7d06774e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go @@ -0,0 +1,93 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestBulkUpdateRequestSerialization(t *testing.T) { + tests := []struct { + Request BulkableRequest + Expected []string + }{ + // #0 + { + Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct { + Counter int64 `json:"counter"` + }{ + Counter: 42, + }), + Expected: []string{ + `{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"doc":{"counter":42}}`, + }, + }, + // #1 + { + Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1"). + RetryOnConflict(3). + DocAsUpsert(true). + Doc(struct { + Counter int64 `json:"counter"` + }{ + Counter: 42, + }), + Expected: []string{ + `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`, + `{"doc":{"counter":42},"doc_as_upsert":true}`, + }, + }, + // #2 + { + Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1"). + RetryOnConflict(3). + Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)). + Upsert(struct { + Counter int64 `json:"counter"` + }{ + Counter: 42, + }), + Expected: []string{ + `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`, + `{"script":{"inline":"ctx._source.retweets += param1","lang":"javascript","params":{"param1":42}},"upsert":{"counter":42}}`, + }, + }, + } + + for i, test := range tests { + lines, err := test.Request.Source() + if err != nil { + t.Fatalf("case #%d: expected no error, got: %v", i, err) + } + if lines == nil { + t.Fatalf("case #%d: expected lines, got nil", i) + } + if len(lines) != len(test.Expected) { + t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) + } + for j, line := range lines { + if line != test.Expected[j] { + t.Errorf("case #%d: expected line #%d to be\n%s\nbut got:\n%s", i, j, test.Expected[j], line) + } + } + } +} + +var bulkUpdateRequestSerializationResult string + +func BenchmarkBulkUpdateRequestSerialization(b *testing.B) { + r := NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct { + Counter int64 `json:"counter"` + }{ + Counter: 42, + }) + var s string + for n := 0; n < b.N; n++ { + s = r.String() + r.source = nil // Don't let caching spoil the benchmark + } + bulkUpdateRequestSerializationResult = s // ensure the compiler doesn't optimize +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/canonicalize.go b/vendor/gopkg.in/olivere/elastic.v5/canonicalize.go new file mode 100644 index 000000000..1473f1466 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/canonicalize.go @@ -0,0 +1,38 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "net/url" + +// canonicalize takes a list of URLs and returns its canonicalized form, i.e. +// remove anything but scheme, userinfo, host, path, and port. +// It also removes all trailing slashes. It also skips invalid URLs or +// URLs that do not use protocol http or https. +// +// Example: +// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200 +// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1 +// 127.0.0.1:9200 -> http://127.0.0.1:9200 +func canonicalize(rawurls ...string) []string { + var canonicalized []string + for _, rawurl := range rawurls { + u, err := url.Parse(rawurl) + if err == nil { + if len(u.Scheme) == 0 { + u.Scheme = DefaultScheme + } + if u.Scheme == "http" || u.Scheme == "https" { + // Trim trailing slashes + for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' { + u.Path = u.Path[0 : len(u.Path)-1] + } + u.Fragment = "" + u.RawQuery = "" + canonicalized = append(canonicalized, u.String()) + } + } + } + return canonicalized +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/canonicalize_test.go b/vendor/gopkg.in/olivere/elastic.v5/canonicalize_test.go new file mode 100644 index 000000000..ab597331f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/canonicalize_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "reflect" + "testing" +) + +func TestCanonicalize(t *testing.T) { + tests := []struct { + Input []string + Output []string + }{ + { + Input: []string{"http://127.0.0.1/"}, + Output: []string{"http://127.0.0.1"}, + }, + { + Input: []string{"http://127.0.0.1:9200/", "gopher://golang.org/", "http://127.0.0.1:9201"}, + Output: []string{"http://127.0.0.1:9200", "http://127.0.0.1:9201"}, + }, + { + Input: []string{"http://user:secret@127.0.0.1/path?query=1#fragment"}, + Output: []string{"http://user:secret@127.0.0.1/path"}, + }, + { + Input: []string{"https://somewhere.on.mars:9999/path?query=1#fragment"}, + Output: []string{"https://somewhere.on.mars:9999/path"}, + }, + { + Input: []string{"https://prod1:9999/one?query=1#fragment", "https://prod2:9998/two?query=1#fragment"}, + Output: []string{"https://prod1:9999/one", "https://prod2:9998/two"}, + }, + { + Input: []string{"http://127.0.0.1/one/"}, + Output: []string{"http://127.0.0.1/one"}, + }, + { + Input: []string{"http://127.0.0.1/one///"}, + Output: []string{"http://127.0.0.1/one"}, + }, + { + Input: []string{"127.0.0.1/"}, + Output: []string{"http://127.0.0.1"}, + }, + { + Input: []string{"127.0.0.1:9200"}, + Output: []string{"http://127.0.0.1:9200"}, + }, + } + + for _, test := range tests { + got := canonicalize(test.Input...) + if !reflect.DeepEqual(got, test.Output) { + t.Errorf("expected %v; got: %v", test.Output, got) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go b/vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go new file mode 100644 index 000000000..1310e169b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go @@ -0,0 +1,103 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// ClearScrollService clears one or more scroll contexts by their ids. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api +// for details. +type ClearScrollService struct { + client *Client + pretty bool + scrollId []string +} + +// NewClearScrollService creates a new ClearScrollService. +func NewClearScrollService(client *Client) *ClearScrollService { + return &ClearScrollService{ + client: client, + scrollId: make([]string, 0), + } +} + +// ScrollId is a list of scroll IDs to clear. +// Use _all to clear all search contexts. +func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService { + s.scrollId = append(s.scrollId, scrollIds...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClearScrollService) buildURL() (string, url.Values, error) { + // Build URL + path := "/_search/scroll/" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClearScrollService) Validate() error { + var invalid []string + if len(s.scrollId) == 0 { + invalid = append(invalid, "ScrollId") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body := strings.Join(s.scrollId, ",") + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClearScrollResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClearScrollResponse is the response of ClearScrollService.Do. +type ClearScrollResponse struct { +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go b/vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go new file mode 100644 index 000000000..15e3a4414 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go @@ -0,0 +1,88 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + _ "net/http" + "testing" + + "golang.org/x/net/context" +) + +func TestClearScroll(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + res, err := client.Scroll(testIndexName).Size(1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected results != nil; got nil") + } + if res.ScrollId == "" { + t.Fatalf("expected scrollId in results; got %q", res.ScrollId) + } + + // Search should succeed + _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Clear scroll id + clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if clearScrollRes == nil { + t.Fatal("expected results != nil; got nil") + } + + // Search result should fail + _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do(context.TODO()) + if err == nil { + t.Fatalf("expected scroll to fail") + } +} + +func TestClearScrollValidate(t *testing.T) { + client := setupTestClient(t) + + // No scroll id -> fail with error + res, err := NewClearScrollService(client).Do(context.TODO()) + if err == nil { + t.Fatalf("expected ClearScroll to fail without scroll ids") + } + if res != nil { + t.Fatalf("expected result to be nil; got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/client.go b/vendor/gopkg.in/olivere/elastic.v5/client.go new file mode 100644 index 000000000..47e199d3c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/client.go @@ -0,0 +1,1608 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math/rand" + "net/http" + "net/http/httputil" + "net/url" + "regexp" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // Version is the current version of Elastic. + Version = "5.0.11" + + // DefaultUrl is the default endpoint of Elasticsearch on the local machine. + // It is used e.g. when initializing a new Client without a specific URL. + DefaultURL = "http://127.0.0.1:9200" + + // DefaultScheme is the default protocol scheme to use when sniffing + // the Elasticsearch cluster. + DefaultScheme = "http" + + // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default. + DefaultHealthcheckEnabled = true + + // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits + // for a response from Elasticsearch on startup, i.e. when creating a + // client. After the client is started, a shorter timeout is commonly used + // (its default is specified in DefaultHealthcheckTimeout). + DefaultHealthcheckTimeoutStartup = 5 * time.Second + + // DefaultHealthcheckTimeout specifies the time a running client waits for + // a response from Elasticsearch. Notice that the healthcheck timeout + // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup). + DefaultHealthcheckTimeout = 1 * time.Second + + // DefaultHealthcheckInterval is the default interval between + // two health checks of the nodes in the cluster. + DefaultHealthcheckInterval = 60 * time.Second + + // DefaultSnifferEnabled specifies if the sniffer is enabled by default. + DefaultSnifferEnabled = true + + // DefaultSnifferInterval is the interval between two sniffing procedures, + // i.e. the lookup of all nodes in the cluster and their addition/removal + // from the list of actual connections. + DefaultSnifferInterval = 15 * time.Minute + + // DefaultSnifferTimeoutStartup is the default timeout for the sniffing + // process that is initiated while creating a new client. For subsequent + // sniffing processes, DefaultSnifferTimeout is used (by default). + DefaultSnifferTimeoutStartup = 5 * time.Second + + // DefaultSnifferTimeout is the default timeout after which the + // sniffing process times out. Notice that for the initial sniffing + // process, DefaultSnifferTimeoutStartup is used. + DefaultSnifferTimeout = 2 * time.Second + + // DefaultMaxRetries is the number of retries for a single request after + // Elastic will give up and return an error. It is zero by default, so + // retry is disabled by default. + DefaultMaxRetries = 0 + + // DefaultSendGetBodyAs is the HTTP method to use when elastic is sending + // a GET request with a body. + DefaultSendGetBodyAs = "GET" + + // DefaultGzipEnabled specifies if gzip compression is enabled by default. + DefaultGzipEnabled = false + + // off is used to disable timeouts. + off = -1 * time.Second +) + +var ( + // ErrNoClient is raised when no Elasticsearch node is available. + ErrNoClient = errors.New("no Elasticsearch node available") + + // ErrRetry is raised when a request cannot be executed after the configured + // number of retries. + ErrRetry = errors.New("cannot connect after several retries") + + // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus + // didn't return in time. + ErrTimeout = errors.New("timeout") +) + +// ClientOptionFunc is a function that configures a Client. +// It is used in NewClient. +type ClientOptionFunc func(*Client) error + +// Client is an Elasticsearch client. Create one by calling NewClient. +type Client struct { + c *http.Client // net/http Client to use for requests + + connsMu sync.RWMutex // connsMu guards the next block + conns []*conn // all connections + cindex int // index into conns + + mu sync.RWMutex // guards the next block + urls []string // set of URLs passed initially to the client + running bool // true if the client's background processes are running + errorlog Logger // error log for critical messages + infolog Logger // information log for e.g. response times + tracelog Logger // trace log for debugging + maxRetries int // max. number of retries + scheme string // http or https + healthcheckEnabled bool // healthchecks enabled or disabled + healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup + healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch + healthcheckInterval time.Duration // interval between healthchecks + healthcheckStop chan bool // notify healthchecker to stop, and notify back + snifferEnabled bool // sniffer enabled or disabled + snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup + snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API + snifferInterval time.Duration // interval between sniffing + snifferStop chan bool // notify sniffer to stop, and notify back + decoder Decoder // used to decode data sent from Elasticsearch + basicAuth bool // indicates whether to send HTTP Basic Auth credentials + basicAuthUsername string // username for HTTP Basic Auth + basicAuthPassword string // password for HTTP Basic Auth + sendGetBodyAs string // override for when sending a GET with a body + requiredPlugins []string // list of required plugins + gzipEnabled bool // gzip compression enabled or disabled (default) +} + +// NewClient creates a new client to work with Elasticsearch. +// +// NewClient, by default, is meant to be long-lived and shared across +// your application. If you need a short-lived client, e.g. for request-scope, +// consider using NewSimpleClient instead. +// +// The caller can configure the new client by passing configuration options +// to the func. +// +// Example: +// +// client, err := elastic.NewClient( +// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"), +// elastic.SetMaxRetries(10), +// elastic.SetBasicAuth("user", "secret")) +// +// If no URL is configured, Elastic uses DefaultURL by default. +// +// If the sniffer is enabled (the default), the new client then sniffes +// the cluster via the Nodes Info API +// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info). +// It uses the URLs specified by the caller. The caller is responsible +// to only pass a list of URLs of nodes that belong to the same cluster. +// This sniffing process is run on startup and periodically. +// Use SnifferInterval to set the interval between two sniffs (default is +// 15 minutes). In other words: By default, the client will find new nodes +// in the cluster and remove those that are no longer available every +// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient. +// +// The list of nodes found in the sniffing process will be used to make +// connections to the REST API of Elasticsearch. These nodes are also +// periodically checked in a shorter time frame. This process is called +// a health check. By default, a health check is done every 60 seconds. +// You can set a shorter or longer interval by SetHealthcheckInterval. +// Disabling health checks is not recommended, but can be done by +// SetHealthcheck(false). +// +// Connections are automatically marked as dead or healthy while +// making requests to Elasticsearch. When a request fails, Elastic will +// retry up to a maximum number of retries configured with SetMaxRetries. +// Retries are disabled by default. +// +// If no HttpClient is configured, then http.DefaultClient is used. +// You can use your own http.Client with some http.Transport for +// advanced scenarios. +// +// An error is also returned when some configuration option is invalid or +// the new client cannot sniff the cluster (if enabled). +func NewClient(options ...ClientOptionFunc) (*Client, error) { + // Set up the client + c := &Client{ + c: http.DefaultClient, + conns: make([]*conn, 0), + cindex: -1, + scheme: DefaultScheme, + decoder: &DefaultDecoder{}, + maxRetries: DefaultMaxRetries, + healthcheckEnabled: DefaultHealthcheckEnabled, + healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup, + healthcheckTimeout: DefaultHealthcheckTimeout, + healthcheckInterval: DefaultHealthcheckInterval, + healthcheckStop: make(chan bool), + snifferEnabled: DefaultSnifferEnabled, + snifferTimeoutStartup: DefaultSnifferTimeoutStartup, + snifferTimeout: DefaultSnifferTimeout, + snifferInterval: DefaultSnifferInterval, + snifferStop: make(chan bool), + sendGetBodyAs: DefaultSendGetBodyAs, + gzipEnabled: DefaultGzipEnabled, + } + + // Run the options on it + for _, option := range options { + if err := option(c); err != nil { + return nil, err + } + } + + if len(c.urls) == 0 { + c.urls = []string{DefaultURL} + } + c.urls = canonicalize(c.urls...) + + // Check if we can make a request to any of the specified URLs + if c.healthcheckEnabled { + if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil { + return nil, err + } + } + + if c.snifferEnabled { + // Sniff the cluster initially + if err := c.sniff(c.snifferTimeoutStartup); err != nil { + return nil, err + } + } else { + // Do not sniff the cluster initially. Use the provided URLs instead. + for _, url := range c.urls { + c.conns = append(c.conns, newConn(url, url)) + } + } + + if c.healthcheckEnabled { + // Perform an initial health check + c.healthcheck(c.healthcheckTimeoutStartup, true) + } + // Ensure that we have at least one connection available + if err := c.mustActiveConn(); err != nil { + return nil, err + } + + // Check the required plugins + for _, plugin := range c.requiredPlugins { + found, err := c.HasPlugin(plugin) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("elastic: plugin %s not found", plugin) + } + } + + if c.snifferEnabled { + go c.sniffer() // periodically update cluster information + } + if c.healthcheckEnabled { + go c.healthchecker() // start goroutine periodically ping all nodes of the cluster + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + return c, nil +} + +// NewSimpleClient creates a new short-lived Client that can be used in +// use cases where you need e.g. one client per request. +// +// While NewClient by default sets up e.g. periodic health checks +// and sniffing for new nodes in separate goroutines, NewSimpleClient does +// not and is meant as a simple replacement where you don't need all the +// heavy lifting of NewClient. +// +// NewSimpleClient does the following by default: First, all health checks +// are disabled, including timeouts and periodic checks. Second, sniffing +// is disabled, including timeouts and periodic checks. The number of retries +// is set to 1. NewSimpleClient also does not start any goroutines. +// +// Notice that you can still override settings by passing additional options, +// just like with NewClient. +func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) { + c := &Client{ + c: http.DefaultClient, + conns: make([]*conn, 0), + cindex: -1, + scheme: DefaultScheme, + decoder: &DefaultDecoder{}, + maxRetries: 1, + healthcheckEnabled: false, + healthcheckTimeoutStartup: off, + healthcheckTimeout: off, + healthcheckInterval: off, + healthcheckStop: make(chan bool), + snifferEnabled: false, + snifferTimeoutStartup: off, + snifferTimeout: off, + snifferInterval: off, + snifferStop: make(chan bool), + sendGetBodyAs: DefaultSendGetBodyAs, + gzipEnabled: DefaultGzipEnabled, + } + + // Run the options on it + for _, option := range options { + if err := option(c); err != nil { + return nil, err + } + } + + if len(c.urls) == 0 { + c.urls = []string{DefaultURL} + } + c.urls = canonicalize(c.urls...) + + for _, url := range c.urls { + c.conns = append(c.conns, newConn(url, url)) + } + + // Ensure that we have at least one connection available + if err := c.mustActiveConn(); err != nil { + return nil, err + } + + // Check the required plugins + for _, plugin := range c.requiredPlugins { + found, err := c.HasPlugin(plugin) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("elastic: plugin %s not found", plugin) + } + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + return c, nil +} + +// SetHttpClient can be used to specify the http.Client to use when making +// HTTP requests to Elasticsearch. +func SetHttpClient(httpClient *http.Client) ClientOptionFunc { + return func(c *Client) error { + if httpClient != nil { + c.c = httpClient + } else { + c.c = http.DefaultClient + } + return nil + } +} + +// SetBasicAuth can be used to specify the HTTP Basic Auth credentials to +// use when making HTTP requests to Elasticsearch. +func SetBasicAuth(username, password string) ClientOptionFunc { + return func(c *Client) error { + c.basicAuthUsername = username + c.basicAuthPassword = password + c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != "" + return nil + } +} + +// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that +// when sniffing is enabled, these URLs are used to initially sniff the +// cluster on startup. +func SetURL(urls ...string) ClientOptionFunc { + return func(c *Client) error { + switch len(urls) { + case 0: + c.urls = []string{DefaultURL} + default: + c.urls = urls + } + return nil + } +} + +// SetScheme sets the HTTP scheme to look for when sniffing (http or https). +// This is http by default. +func SetScheme(scheme string) ClientOptionFunc { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// SetSniff enables or disables the sniffer (enabled by default). +func SetSniff(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.snifferEnabled = enabled + return nil + } +} + +// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used +// when creating a new client. The default is 5 seconds. Notice that the +// timeout being used for subsequent sniffing processes is set with +// SetSnifferTimeout. +func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferTimeoutStartup = timeout + return nil + } +} + +// SetSnifferTimeout sets the timeout for the sniffer that finds the +// nodes in a cluster. The default is 2 seconds. Notice that the timeout +// used when creating a new client on startup is usually greater and can +// be set with SetSnifferTimeoutStartup. +func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferTimeout = timeout + return nil + } +} + +// SetSnifferInterval sets the interval between two sniffing processes. +// The default interval is 15 minutes. +func SetSnifferInterval(interval time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferInterval = interval + return nil + } +} + +// SetHealthcheck enables or disables healthchecks (enabled by default). +func SetHealthcheck(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckEnabled = enabled + return nil + } +} + +// SetHealthcheckTimeoutStartup sets the timeout for the initial health check. +// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup). +// Notice that timeouts for subsequent health checks can be modified with +// SetHealthcheckTimeout. +func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckTimeoutStartup = timeout + return nil + } +} + +// SetHealthcheckTimeout sets the timeout for periodic health checks. +// The default timeout is 1 second (see DefaultHealthcheckTimeout). +// Notice that a different (usually larger) timeout is used for the initial +// healthcheck, which is initiated while creating a new client. +// The startup timeout can be modified with SetHealthcheckTimeoutStartup. +func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckTimeout = timeout + return nil + } +} + +// SetHealthcheckInterval sets the interval between two health checks. +// The default interval is 60 seconds. +func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckInterval = interval + return nil + } +} + +// SetMaxRetries sets the maximum number of retries before giving up when +// performing a HTTP request to Elasticsearch. +func SetMaxRetries(maxRetries int) ClientOptionFunc { + return func(c *Client) error { + if maxRetries < 0 { + return errors.New("MaxRetries must be greater than or equal to 0") + } + c.maxRetries = maxRetries + return nil + } +} + +// SetGzip enables or disables gzip compression (disabled by default). +func SetGzip(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.gzipEnabled = enabled + return nil + } +} + +// SetDecoder sets the Decoder to use when decoding data from Elasticsearch. +// DefaultDecoder is used by default. +func SetDecoder(decoder Decoder) ClientOptionFunc { + return func(c *Client) error { + if decoder != nil { + c.decoder = decoder + } else { + c.decoder = &DefaultDecoder{} + } + return nil + } +} + +// SetRequiredPlugins can be used to indicate that some plugins are required +// before a Client will be created. +func SetRequiredPlugins(plugins ...string) ClientOptionFunc { + return func(c *Client) error { + if c.requiredPlugins == nil { + c.requiredPlugins = make([]string, 0) + } + c.requiredPlugins = append(c.requiredPlugins, plugins...) + return nil + } +} + +// SetErrorLog sets the logger for critical messages like nodes joining +// or leaving the cluster or failing requests. It is nil by default. +func SetErrorLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.errorlog = logger + return nil + } +} + +// SetInfoLog sets the logger for informational messages, e.g. requests +// and their response times. It is nil by default. +func SetInfoLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.infolog = logger + return nil + } +} + +// SetTraceLog specifies the log.Logger to use for output of HTTP requests +// and responses which is helpful during debugging. It is nil by default. +func SetTraceLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.tracelog = logger + return nil + } +} + +// SetSendGetBodyAs specifies the HTTP method to use when sending a GET request +// with a body. It is GET by default. +func SetSendGetBodyAs(httpMethod string) ClientOptionFunc { + return func(c *Client) error { + c.sendGetBodyAs = httpMethod + return nil + } +} + +// String returns a string representation of the client status. +func (c *Client) String() string { + c.connsMu.Lock() + conns := c.conns + c.connsMu.Unlock() + + var buf bytes.Buffer + for i, conn := range conns { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(conn.String()) + } + return buf.String() +} + +// IsRunning returns true if the background processes of the client are +// running, false otherwise. +func (c *Client) IsRunning() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.running +} + +// Start starts the background processes like sniffing the cluster and +// periodic health checks. You don't need to run Start when creating a +// client with NewClient; the background processes are run by default. +// +// If the background processes are already running, this is a no-op. +func (c *Client) Start() { + c.mu.RLock() + if c.running { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + + if c.snifferEnabled { + go c.sniffer() + } + if c.healthcheckEnabled { + go c.healthchecker() + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + c.infof("elastic: client started") +} + +// Stop stops the background processes that the client is running, +// i.e. sniffing the cluster periodically and running health checks +// on the nodes. +// +// If the background processes are not running, this is a no-op. +func (c *Client) Stop() { + c.mu.RLock() + if !c.running { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + + if c.healthcheckEnabled { + c.healthcheckStop <- true + <-c.healthcheckStop + } + + if c.snifferEnabled { + c.snifferStop <- true + <-c.snifferStop + } + + c.mu.Lock() + c.running = false + c.mu.Unlock() + + c.infof("elastic: client stopped") +} + +// errorf logs to the error log. +func (c *Client) errorf(format string, args ...interface{}) { + if c.errorlog != nil { + c.errorlog.Printf(format, args...) + } +} + +// infof logs informational messages. +func (c *Client) infof(format string, args ...interface{}) { + if c.infolog != nil { + c.infolog.Printf(format, args...) + } +} + +// tracef logs to the trace log. +func (c *Client) tracef(format string, args ...interface{}) { + if c.tracelog != nil { + c.tracelog.Printf(format, args...) + } +} + +// dumpRequest dumps the given HTTP request to the trace log. +func (c *Client) dumpRequest(r *http.Request) { + if c.tracelog != nil { + out, err := httputil.DumpRequestOut(r, true) + if err == nil { + c.tracef("%s\n", string(out)) + } + } +} + +// dumpResponse dumps the given HTTP response to the trace log. +func (c *Client) dumpResponse(resp *http.Response) { + if c.tracelog != nil { + out, err := httputil.DumpResponse(resp, true) + if err == nil { + c.tracef("%s\n", string(out)) + } + } +} + +// sniffer periodically runs sniff. +func (c *Client) sniffer() { + c.mu.RLock() + timeout := c.snifferTimeout + interval := c.snifferInterval + c.mu.RUnlock() + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-c.snifferStop: + // we are asked to stop, so we signal back that we're stopping now + c.snifferStop <- true + return + case <-ticker.C: + c.sniff(timeout) + } + } +} + +// sniff uses the Node Info API to return the list of nodes in the cluster. +// It uses the list of URLs passed on startup plus the list of URLs found +// by the preceding sniffing process (if sniffing is enabled). +// +// If sniffing is disabled, this is a no-op. +func (c *Client) sniff(timeout time.Duration) error { + c.mu.RLock() + if !c.snifferEnabled { + c.mu.RUnlock() + return nil + } + + // Use all available URLs provided to sniff the cluster. + var urls []string + urlsMap := make(map[string]bool) + + // Add all URLs provided on startup + for _, url := range c.urls { + urlsMap[url] = true + urls = append(urls, url) + } + c.mu.RUnlock() + + // Add all URLs found by sniffing + c.connsMu.RLock() + for _, conn := range c.conns { + if !conn.IsDead() { + url := conn.URL() + if _, found := urlsMap[url]; !found { + urls = append(urls, url) + } + } + } + c.connsMu.RUnlock() + + if len(urls) == 0 { + return ErrNoClient + } + + // Start sniffing on all found URLs + ch := make(chan []*conn, len(urls)) + for _, url := range urls { + go func(url string) { ch <- c.sniffNode(url) }(url) + } + + // Wait for the results to come back, or the process times out. + for { + select { + case conns := <-ch: + if len(conns) > 0 { + c.updateConns(conns) + return nil + } + case <-time.After(timeout): + // We get here if no cluster responds in time + return ErrNoClient + } + } +} + +// sniffNode sniffs a single node. This method is run as a goroutine +// in sniff. If successful, it returns the list of node URLs extracted +// from the result of calling Nodes Info API. Otherwise, an empty array +// is returned. +func (c *Client) sniffNode(url string) []*conn { + var nodes []*conn + + // Call the Nodes Info API at /_nodes/http + req, err := NewRequest("GET", url+"/_nodes/http") + if err != nil { + return nodes + } + + c.mu.RLock() + if c.basicAuth { + req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword) + } + c.mu.RUnlock() + + res, err := c.c.Do((*http.Request)(req)) + if err != nil { + return nodes + } + if res == nil { + return nodes + } + + if res.Body != nil { + defer res.Body.Close() + } + + var info NodesInfoResponse + if err := json.NewDecoder(res.Body).Decode(&info); err == nil { + if len(info.Nodes) > 0 { + for nodeID, node := range info.Nodes { + if node.HTTP != nil && len(node.HTTP.PublishAddress) > 0 { + url := c.extractHostname(c.scheme, node.HTTP.PublishAddress) + if url != "" { + nodes = append(nodes, newConn(nodeID, url)) + } + } + } + } + } + return nodes +} + +// reSniffHostAndPort is used to extract hostname and port from a result +// from a Nodes Info API (example: "inet[/127.0.0.1:9200]"). +var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`) + +func (c *Client) extractHostname(scheme, address string) string { + if strings.HasPrefix(address, "inet") { + m := reSniffHostAndPort.FindStringSubmatch(address) + if len(m) == 3 { + return fmt.Sprintf("%s://%s:%s", scheme, m[1], m[2]) + } + } + s := address + if idx := strings.Index(s, "/"); idx >= 0 { + s = s[idx+1:] + } + if strings.Index(s, ":") < 0 { + return "" + } + return fmt.Sprintf("%s://%s", scheme, s) +} + +// updateConns updates the clients' connections with new information +// gather by a sniff operation. +func (c *Client) updateConns(conns []*conn) { + c.connsMu.Lock() + + // Build up new connections: + // If we find an existing connection, use that (including no. of failures etc.). + // If we find a new connection, add it. + var newConns []*conn + for _, conn := range conns { + var found bool + for _, oldConn := range c.conns { + if oldConn.NodeID() == conn.NodeID() { + // Take over the old connection + newConns = append(newConns, oldConn) + found = true + break + } + } + if !found { + // New connection didn't exist, so add it to our list of new conns. + c.infof("elastic: %s joined the cluster", conn.URL()) + newConns = append(newConns, conn) + } + } + + c.conns = newConns + c.cindex = -1 + c.connsMu.Unlock() +} + +// healthchecker periodically runs healthcheck. +func (c *Client) healthchecker() { + c.mu.RLock() + timeout := c.healthcheckTimeout + interval := c.healthcheckInterval + c.mu.RUnlock() + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-c.healthcheckStop: + // we are asked to stop, so we signal back that we're stopping now + c.healthcheckStop <- true + return + case <-ticker.C: + c.healthcheck(timeout, false) + } + } +} + +// healthcheck does a health check on all nodes in the cluster. Depending on +// the node state, it marks connections as dead, sets them alive etc. +// If healthchecks are disabled and force is false, this is a no-op. +// The timeout specifies how long to wait for a response from Elasticsearch. +func (c *Client) healthcheck(timeout time.Duration, force bool) { + c.mu.RLock() + if !c.healthcheckEnabled && !force { + c.mu.RUnlock() + return + } + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + c.mu.RUnlock() + + c.connsMu.RLock() + conns := c.conns + c.connsMu.RUnlock() + + for _, conn := range conns { + // Run the HEAD request against ES with a timeout + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Goroutine executes the HTTP request, returns an error and sets status + var status int + errc := make(chan error, 1) + go func(url string) { + req, err := NewRequest("HEAD", url) + if err != nil { + errc <- err + return + } + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + res, err := c.c.Do((*http.Request)(req)) + if res != nil { + status = res.StatusCode + if res.Body != nil { + res.Body.Close() + } + } + errc <- err + }(conn.URL()) + + // Wait for the Goroutine (or its timeout) + select { + case <-ctx.Done(): // timeout + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + break + case err := <-errc: + if err != nil { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + break + } + if status >= 200 && status < 300 { + conn.MarkAsAlive() + } else { + conn.MarkAsDead() + c.errorf("elastic: %s is dead [status=%d]", conn.URL(), status) + } + break + } + } +} + +// startupHealthcheck is used at startup to check if the server is available +// at all. +func (c *Client) startupHealthcheck(timeout time.Duration) error { + c.mu.Lock() + urls := c.urls + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + c.mu.Unlock() + + // If we don't get a connection after "timeout", we bail. + start := time.Now() + for { + // Make a copy of the HTTP client provided via options to respect + // settings like Basic Auth or a user-specified http.Transport. + cl := new(http.Client) + *cl = *c.c + cl.Timeout = timeout + for _, url := range urls { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return err + } + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + res, err := cl.Do(req) + if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 { + return nil + } + } + time.Sleep(1 * time.Second) + if time.Now().Sub(start) > timeout { + break + } + } + return ErrNoClient +} + +// next returns the next available connection, or ErrNoClient. +func (c *Client) next() (*conn, error) { + // We do round-robin here. + // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients. + c.connsMu.Lock() + defer c.connsMu.Unlock() + + i := 0 + numConns := len(c.conns) + for { + i++ + if i > numConns { + break // we visited all conns: they all seem to be dead + } + c.cindex++ + if c.cindex >= numConns { + c.cindex = 0 + } + conn := c.conns[c.cindex] + if !conn.IsDead() { + return conn, nil + } + } + + // We have a deadlock here: All nodes are marked as dead. + // If sniffing is disabled, connections will never be marked alive again. + // So we are marking them as alive--if sniffing is disabled. + // They'll then be picked up in the next call to PerformRequest. + if !c.snifferEnabled { + c.errorf("elastic: all %d nodes marked as dead; resurrecting them to prevent deadlock", len(c.conns)) + for _, conn := range c.conns { + conn.MarkAsAlive() + } + } + + // We tried hard, but there is no node available + return nil, ErrNoClient +} + +// mustActiveConn returns nil if there is an active connection, +// otherwise ErrNoClient is returned. +func (c *Client) mustActiveConn() error { + c.connsMu.Lock() + defer c.connsMu.Unlock() + + for _, c := range c.conns { + if !c.IsDead() { + return nil + } + } + return ErrNoClient +} + +// PerformRequest does a HTTP request to Elasticsearch. +// It returns a response (which might be nil) and an error on failure. +// +// Optionally, a list of HTTP error codes to ignore can be passed. +// This is necessary for services that expect e.g. HTTP status 404 as a +// valid outcome (Exists, IndicesExists, IndicesTypeExists). +func (c *Client) PerformRequest(ctx context.Context, method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) { + start := time.Now().UTC() + + c.mu.RLock() + timeout := c.healthcheckTimeout + retries := c.maxRetries + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + sendGetBodyAs := c.sendGetBodyAs + gzipEnabled := c.gzipEnabled + c.mu.RUnlock() + + var err error + var conn *conn + var req *Request + var resp *Response + var retried bool + + // We wait between retries, using simple exponential back-off. + // TODO: Make this configurable, including the jitter. + retryWaitMsec := int64(100 + (rand.Intn(20) - 10)) + + // Change method if sendGetBodyAs is specified. + if method == "GET" && body != nil && sendGetBodyAs != "GET" { + method = sendGetBodyAs + } + + for { + pathWithParams := path + if len(params) > 0 { + pathWithParams += "?" + params.Encode() + } + + // Get a connection + conn, err = c.next() + if err == ErrNoClient { + if !retried { + // Force a healtcheck as all connections seem to be dead. + c.healthcheck(timeout, false) + } + retries-- + if retries <= 0 { + return nil, err + } + retried = true + time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond) + retryWaitMsec += retryWaitMsec + continue // try again + } + if err != nil { + c.errorf("elastic: cannot get connection from pool") + return nil, err + } + + req, err = NewRequest(method, conn.URL()+pathWithParams) + if err != nil { + c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err) + return nil, err + } + + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + + // Set body + if body != nil { + err = req.SetBody(body, gzipEnabled) + if err != nil { + c.errorf("elastic: couldn't set body %+v for request: %v", body, err) + return nil, err + } + } + + // Tracing + c.dumpRequest((*http.Request)(req)) + + // Get response + res, err := ctxhttp.Do(ctx, c.c, (*http.Request)(req)) + if err != nil { + retries-- + if retries <= 0 { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + return nil, err + } + retried = true + time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond) + retryWaitMsec += retryWaitMsec + continue // try again + } + if res.Body != nil { + defer res.Body.Close() + } + + // Check for errors + if err := checkResponse((*http.Request)(req), res, ignoreErrors...); err != nil { + // No retry if request succeeded + // We still try to return a response. + resp, _ = c.newResponse(res) + return resp, err + } + + // Tracing + c.dumpResponse(res) + + // We successfully made a request with this connection + conn.MarkAsHealthy() + + resp, err = c.newResponse(res) + if err != nil { + return nil, err + } + + break + } + + duration := time.Now().UTC().Sub(start) + c.infof("%s %s [status:%d, request:%.3fs]", + strings.ToUpper(method), + req.URL, + resp.StatusCode, + float64(int64(duration/time.Millisecond))/1000) + + return resp, nil +} + +// -- Document APIs -- + +// Index a document. +func (c *Client) Index() *IndexService { + return NewIndexService(c) +} + +// Get a document. +func (c *Client) Get() *GetService { + return NewGetService(c) +} + +// MultiGet retrieves multiple documents in one roundtrip. +func (c *Client) MultiGet() *MgetService { + return NewMgetService(c) +} + +// Mget retrieves multiple documents in one roundtrip. +func (c *Client) Mget() *MgetService { + return NewMgetService(c) +} + +// Delete a document. +func (c *Client) Delete() *DeleteService { + return NewDeleteService(c) +} + +// DeleteByQuery deletes documents as found by a query. +func (c *Client) DeleteByQuery(indices ...string) *DeleteByQueryService { + return NewDeleteByQueryService(c).Index(indices...) +} + +// Update a document. +func (c *Client) Update() *UpdateService { + return NewUpdateService(c) +} + +// UpdateByQuery performs an update on a set of documents. +func (c *Client) UpdateByQuery(indices ...string) *UpdateByQueryService { + return NewUpdateByQueryService(c).Index(indices...) +} + +// Bulk is the entry point to mass insert/update/delete documents. +func (c *Client) Bulk() *BulkService { + return NewBulkService(c) +} + +// BulkProcessor allows setting up a concurrent processor of bulk requests. +func (c *Client) BulkProcessor() *BulkProcessorService { + return NewBulkProcessorService(c) +} + +// Reindex copies data from a source index into a destination index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html +// for details on the Reindex API. +func (c *Client) Reindex() *ReindexService { + return NewReindexService(c) +} + +// TermVectors returns information and statistics on terms in the fields +// of a particular document. +func (c *Client) TermVectors(index, typ string) *TermvectorsService { + builder := NewTermvectorsService(c) + builder = builder.Index(index).Type(typ) + return builder +} + +// MultiTermVectors returns information and statistics on terms in the fields +// of multiple documents. +func (c *Client) MultiTermVectors() *MultiTermvectorService { + return NewMultiTermvectorService(c) +} + +// -- Search APIs -- + +// Search is the entry point for searches. +func (c *Client) Search(indices ...string) *SearchService { + return NewSearchService(c).Index(indices...) +} + +// Suggest returns a service to return suggestions. +func (c *Client) Suggest(indices ...string) *SuggestService { + return NewSuggestService(c).Index(indices...) +} + +// MultiSearch is the entry point for multi searches. +func (c *Client) MultiSearch() *MultiSearchService { + return NewMultiSearchService(c) +} + +// Count documents. +func (c *Client) Count(indices ...string) *CountService { + return NewCountService(c).Index(indices...) +} + +// Explain computes a score explanation for a query and a specific document. +func (c *Client) Explain(index, typ, id string) *ExplainService { + return NewExplainService(c).Index(index).Type(typ).Id(id) +} + +// TODO Search Template +// TODO Search Shards API +// TODO Search Exists API +// TODO Validate API + +// FieldStats returns statistical information about fields in indices. +func (c *Client) FieldStats(indices ...string) *FieldStatsService { + return NewFieldStatsService(c).Index(indices...) +} + +// Exists checks if a document exists. +func (c *Client) Exists() *ExistsService { + return NewExistsService(c) +} + +// Scroll through documents. Use this to efficiently scroll through results +// while returning the results to a client. +func (c *Client) Scroll(indices ...string) *ScrollService { + return NewScrollService(c).Index(indices...) +} + +// ClearScroll can be used to clear search contexts manually. +func (c *Client) ClearScroll(scrollIds ...string) *ClearScrollService { + return NewClearScrollService(c).ScrollId(scrollIds...) +} + +// -- Indices APIs -- + +// CreateIndex returns a service to create a new index. +func (c *Client) CreateIndex(name string) *IndicesCreateService { + return NewIndicesCreateService(c).Index(name) +} + +// DeleteIndex returns a service to delete an index. +func (c *Client) DeleteIndex(indices ...string) *IndicesDeleteService { + return NewIndicesDeleteService(c).Index(indices) +} + +// IndexExists allows to check if an index exists. +func (c *Client) IndexExists(indices ...string) *IndicesExistsService { + return NewIndicesExistsService(c).Index(indices) +} + +// ShrinkIndex returns a service to shrink one index into another. +func (c *Client) ShrinkIndex(source, target string) *IndicesShrinkService { + return NewIndicesShrinkService(c).Source(source).Target(target) +} + +// RolloverIndex rolls an alias over to a new index when the existing index +// is considered to be too large or too old. +func (c *Client) RolloverIndex(alias string) *IndicesRolloverService { + return NewIndicesRolloverService(c).Alias(alias) +} + +// TypeExists allows to check if one or more types exist in one or more indices. +func (c *Client) TypeExists() *IndicesExistsTypeService { + return NewIndicesExistsTypeService(c) +} + +// IndexStats provides statistics on different operations happining +// in one or more indices. +func (c *Client) IndexStats(indices ...string) *IndicesStatsService { + return NewIndicesStatsService(c).Index(indices...) +} + +// OpenIndex opens an index. +func (c *Client) OpenIndex(name string) *IndicesOpenService { + return NewIndicesOpenService(c).Index(name) +} + +// CloseIndex closes an index. +func (c *Client) CloseIndex(name string) *IndicesCloseService { + return NewIndicesCloseService(c).Index(name) +} + +// IndexGet retrieves information about one or more indices. +// IndexGet is only available for Elasticsearch 1.4 or later. +func (c *Client) IndexGet(indices ...string) *IndicesGetService { + return NewIndicesGetService(c).Index(indices...) +} + +// IndexGetSettings retrieves settings of all, one or more indices. +func (c *Client) IndexGetSettings(indices ...string) *IndicesGetSettingsService { + return NewIndicesGetSettingsService(c).Index(indices...) +} + +// IndexPutSettings sets settings for all, one or more indices. +func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService { + return NewIndicesPutSettingsService(c).Index(indices...) +} + +// Forcemerge optimizes one or more indices. +// It replaces the deprecated Optimize API. +func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService { + return NewIndicesForcemergeService(c).Index(indices...) +} + +// Refresh asks Elasticsearch to refresh one or more indices. +func (c *Client) Refresh(indices ...string) *RefreshService { + return NewRefreshService(c).Index(indices...) +} + +// Flush asks Elasticsearch to free memory from the index and +// flush data to disk. +func (c *Client) Flush(indices ...string) *IndicesFlushService { + return NewIndicesFlushService(c).Index(indices...) +} + +// Alias enables the caller to add and/or remove aliases. +func (c *Client) Alias() *AliasService { + return NewAliasService(c) +} + +// Aliases returns aliases by index name(s). +func (c *Client) Aliases() *AliasesService { + return NewAliasesService(c) +} + +// GetTemplate gets a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) GetTemplate() *GetTemplateService { + return NewGetTemplateService(c) +} + +// PutTemplate creates or updates a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) PutTemplate() *PutTemplateService { + return NewPutTemplateService(c) +} + +// DeleteTemplate deletes a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) DeleteTemplate() *DeleteTemplateService { + return NewDeleteTemplateService(c) +} + +// IndexGetTemplate gets an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService { + return NewIndicesGetTemplateService(c).Name(names...) +} + +// IndexTemplateExists gets check if an index template exists. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService { + return NewIndicesExistsTemplateService(c).Name(name) +} + +// IndexPutTemplate creates or updates an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService { + return NewIndicesPutTemplateService(c).Name(name) +} + +// IndexDeleteTemplate deletes an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService { + return NewIndicesDeleteTemplateService(c).Name(name) +} + +// GetMapping gets a mapping. +func (c *Client) GetMapping() *IndicesGetMappingService { + return NewIndicesGetMappingService(c) +} + +// PutMapping registers a mapping. +func (c *Client) PutMapping() *IndicesPutMappingService { + return NewIndicesPutMappingService(c) +} + +// -- cat APIs -- + +// TODO cat aliases +// TODO cat allocation +// TODO cat count +// TODO cat fielddata +// TODO cat health +// TODO cat indices +// TODO cat master +// TODO cat nodes +// TODO cat pending tasks +// TODO cat plugins +// TODO cat recovery +// TODO cat thread pool +// TODO cat shards +// TODO cat segments + +// -- Ingest APIs -- + +// IngestPutPipeline adds pipelines and updates existing pipelines in +// the cluster. +func (c *Client) IngestPutPipeline(id string) *IngestPutPipelineService { + return NewIngestPutPipelineService(c).Id(id) +} + +// IngestGetPipeline returns pipelines based on ID. +func (c *Client) IngestGetPipeline(ids ...string) *IngestGetPipelineService { + return NewIngestGetPipelineService(c).Id(ids...) +} + +// IngestDeletePipeline deletes a pipeline by ID. +func (c *Client) IngestDeletePipeline(id string) *IngestDeletePipelineService { + return NewIngestDeletePipelineService(c).Id(id) +} + +// IngestSimulatePipeline executes a specific pipeline against the set of +// documents provided in the body of the request. +func (c *Client) IngestSimulatePipeline() *IngestSimulatePipelineService { + return NewIngestSimulatePipelineService(c) +} + +// -- Cluster APIs -- + +// ClusterHealth retrieves the health of the cluster. +func (c *Client) ClusterHealth() *ClusterHealthService { + return NewClusterHealthService(c) +} + +// ClusterState retrieves the state of the cluster. +func (c *Client) ClusterState() *ClusterStateService { + return NewClusterStateService(c) +} + +// ClusterStats retrieves cluster statistics. +func (c *Client) ClusterStats() *ClusterStatsService { + return NewClusterStatsService(c) +} + +// NodesInfo retrieves one or more or all of the cluster nodes information. +func (c *Client) NodesInfo() *NodesInfoService { + return NewNodesInfoService(c) +} + +// NodesStats retrieves one or more or all of the cluster nodes statistics. +func (c *Client) NodesStats() *NodesStatsService { + return NewNodesStatsService(c) +} + +// TasksCancel cancels tasks running on the specified nodes. +func (c *Client) TasksCancel() *TasksCancelService { + return NewTasksCancelService(c) +} + +// TasksList retrieves the list of tasks running on the specified nodes. +func (c *Client) TasksList() *TasksListService { + return NewTasksListService(c) +} + +// TODO Pending cluster tasks +// TODO Cluster Reroute +// TODO Cluster Update Settings +// TODO Nodes Stats +// TODO Nodes hot_threads + +// -- Snapshot and Restore -- + +// TODO Snapshot Create +// TODO Snapshot Create Repository +// TODO Snapshot Delete +// TODO Snapshot Delete Repository +// TODO Snapshot Get +// TODO Snapshot Get Repository +// TODO Snapshot Restore +// TODO Snapshot Status +// TODO Snapshot Verify Repository + +// -- Helpers and shortcuts -- + +// ElasticsearchVersion returns the version number of Elasticsearch +// running on the given URL. +func (c *Client) ElasticsearchVersion(url string) (string, error) { + res, _, err := c.Ping(url).Do(context.Background()) + if err != nil { + return "", err + } + return res.Version.Number, nil +} + +// IndexNames returns the names of all indices in the cluster. +func (c *Client) IndexNames() ([]string, error) { + res, err := c.IndexGetSettings().Index("_all").Do(context.Background()) + if err != nil { + return nil, err + } + var names []string + for name := range res { + names = append(names, name) + } + return names, nil +} + +// Ping checks if a given node in a cluster exists and (optionally) +// returns some basic information about the Elasticsearch server, +// e.g. the Elasticsearch version number. +// +// Notice that you need to specify a URL here explicitly. +func (c *Client) Ping(url string) *PingService { + return NewPingService(c).URL(url) +} + +// WaitForStatus waits for the cluster to have the given status. +// This is a shortcut method for the ClusterHealth service. +// +// WaitForStatus waits for the specified timeout, e.g. "10s". +// If the cluster will have the given state within the timeout, nil is returned. +// If the request timed out, ErrTimeout is returned. +func (c *Client) WaitForStatus(status string, timeout string) error { + health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do(context.Background()) + if err != nil { + return err + } + if health.TimedOut { + return ErrTimeout + } + return nil +} + +// WaitForGreenStatus waits for the cluster to have the "green" status. +// See WaitForStatus for more details. +func (c *Client) WaitForGreenStatus(timeout string) error { + return c.WaitForStatus("green", timeout) +} + +// WaitForYellowStatus waits for the cluster to have the "yellow" status. +// See WaitForStatus for more details. +func (c *Client) WaitForYellowStatus(timeout string) error { + return c.WaitForStatus("yellow", timeout) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/client_test.go b/vendor/gopkg.in/olivere/elastic.v5/client_test.go new file mode 100644 index 000000000..c9e9ff327 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/client_test.go @@ -0,0 +1,1025 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "regexp" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func findConn(s string, slice ...*conn) (int, bool) { + for i, t := range slice { + if s == t.URL() { + return i, true + } + } + return -1, false +} + +// -- NewClient -- + +func TestClientDefaults(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + if client.healthcheckEnabled != true { + t.Errorf("expected health checks to be enabled, got: %v", client.healthcheckEnabled) + } + if client.healthcheckTimeoutStartup != DefaultHealthcheckTimeoutStartup { + t.Errorf("expected health checks timeout on startup = %v, got: %v", DefaultHealthcheckTimeoutStartup, client.healthcheckTimeoutStartup) + } + if client.healthcheckTimeout != DefaultHealthcheckTimeout { + t.Errorf("expected health checks timeout = %v, got: %v", DefaultHealthcheckTimeout, client.healthcheckTimeout) + } + if client.healthcheckInterval != DefaultHealthcheckInterval { + t.Errorf("expected health checks interval = %v, got: %v", DefaultHealthcheckInterval, client.healthcheckInterval) + } + if client.snifferEnabled != true { + t.Errorf("expected sniffing to be enabled, got: %v", client.snifferEnabled) + } + if client.snifferTimeoutStartup != DefaultSnifferTimeoutStartup { + t.Errorf("expected sniffer timeout on startup = %v, got: %v", DefaultSnifferTimeoutStartup, client.snifferTimeoutStartup) + } + if client.snifferTimeout != DefaultSnifferTimeout { + t.Errorf("expected sniffer timeout = %v, got: %v", DefaultSnifferTimeout, client.snifferTimeout) + } + if client.snifferInterval != DefaultSnifferInterval { + t.Errorf("expected sniffer interval = %v, got: %v", DefaultSnifferInterval, client.snifferInterval) + } + if client.basicAuth != false { + t.Errorf("expected no basic auth; got: %v", client.basicAuth) + } + if client.basicAuthUsername != "" { + t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername) + } + if client.basicAuthPassword != "" { + t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername) + } + if client.sendGetBodyAs != "GET" { + t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs) + } +} + +func TestClientWithoutURL(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + // Two things should happen here: + // 1. The client starts sniffing the cluster on DefaultURL + // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL + if len(client.conns) == 0 { + t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns) + } + if !isTravis() { + if _, found := findConn(DefaultURL, client.conns...); !found { + t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) + } + } +} + +func TestClientWithSingleURL(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:9200")) + if err != nil { + t.Fatal(err) + } + // Two things should happen here: + // 1. The client starts sniffing the cluster on DefaultURL + // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL + if len(client.conns) == 0 { + t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns) + } + if !isTravis() { + if _, found := findConn(DefaultURL, client.conns...); !found { + t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) + } + } +} + +func TestClientWithMultipleURLs(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes. + if len(client.conns) != 1 { + t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns) + } + if !isTravis() { + if client.conns[0].URL() != DefaultURL { + t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) + } + } +} + +func TestClientWithBasicAuth(t *testing.T) { + client, err := NewClient(SetBasicAuth("user", "secret")) + if err != nil { + t.Fatal(err) + } + if client.basicAuth != true { + t.Errorf("expected basic auth; got: %v", client.basicAuth) + } + if got, want := client.basicAuthUsername, "user"; got != want { + t.Errorf("expected basic auth username %q; got: %q", want, got) + } + if got, want := client.basicAuthPassword, "secret"; got != want { + t.Errorf("expected basic auth password %q; got: %q", want, got) + } +} + +func TestClientSniffSuccess(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:9200")) + if err != nil { + t.Fatal(err) + } + // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes. + if len(client.conns) != 1 { + t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns) + } +} + +func TestClientSniffFailure(t *testing.T) { + _, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:19201")) + if err == nil { + t.Fatalf("expected cluster to fail with no nodes found") + } +} + +func TestClientSniffDisabled(t *testing.T) { + client, err := NewClient(SetSniff(false), SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + // The client should not sniff, so it should have two connections. + if len(client.conns) != 2 { + t.Fatalf("expected 2 nodes, got: %d (%v)", len(client.conns), client.conns) + } + // Make two requests, so that both connections are being used + for i := 0; i < len(client.conns); i++ { + client.Flush().Do(context.TODO()) + } + // The first connection (127.0.0.1:9200) should now be okay. + if i, found := findConn("http://127.0.0.1:9200", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9200") + } else { + if conn := client.conns[i]; conn.IsDead() { + t.Fatal("expected connection to be alive, but it is dead") + } + } + // The second connection (127.0.0.1:9201) should now be marked as dead. + if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") + } else { + if conn := client.conns[i]; !conn.IsDead() { + t.Fatal("expected connection to be dead, but it is alive") + } + } +} + +func TestClientWillMarkConnectionsAsAliveWhenAllAreDead(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:9201"), + SetSniff(false), SetHealthcheck(false), SetMaxRetries(0)) + if err != nil { + t.Fatal(err) + } + // We should have a connection. + if len(client.conns) != 1 { + t.Fatalf("expected 1 node, got: %d (%v)", len(client.conns), client.conns) + } + + // Make a request, so that the connections is marked as dead. + client.Flush().Do(context.TODO()) + + // The connection should now be marked as dead. + if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") + } else { + if conn := client.conns[i]; !conn.IsDead() { + t.Fatalf("expected connection to be dead, got: %v", conn) + } + } + + // Now send another request and the connection should be marked as alive again. + client.Flush().Do(context.TODO()) + + if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") + } else { + if conn := client.conns[i]; conn.IsDead() { + t.Fatalf("expected connection to be alive, got: %v", conn) + } + } +} + +func TestClientWithRequiredPlugins(t *testing.T) { + _, err := NewClient(SetRequiredPlugins("no-such-plugin")) + if err == nil { + t.Fatal("expected error when creating client") + } + if got, want := err.Error(), "elastic: plugin no-such-plugin not found"; got != want { + t.Fatalf("expected error %q; got: %q", want, got) + } +} + +func TestClientHealthcheckStartupTimeout(t *testing.T) { + start := time.Now() + _, err := NewClient(SetURL("http://localhost:9299"), SetHealthcheckTimeoutStartup(5*time.Second)) + duration := time.Now().Sub(start) + if err != ErrNoClient { + t.Fatal(err) + } + if duration < 5*time.Second { + t.Fatalf("expected a timeout in more than 5 seconds; got: %v", duration) + } +} + +// -- NewSimpleClient -- + +func TestSimpleClientDefaults(t *testing.T) { + client, err := NewSimpleClient() + if err != nil { + t.Fatal(err) + } + if client.healthcheckEnabled != false { + t.Errorf("expected health checks to be disabled, got: %v", client.healthcheckEnabled) + } + if client.healthcheckTimeoutStartup != off { + t.Errorf("expected health checks timeout on startup = %v, got: %v", off, client.healthcheckTimeoutStartup) + } + if client.healthcheckTimeout != off { + t.Errorf("expected health checks timeout = %v, got: %v", off, client.healthcheckTimeout) + } + if client.healthcheckInterval != off { + t.Errorf("expected health checks interval = %v, got: %v", off, client.healthcheckInterval) + } + if client.snifferEnabled != false { + t.Errorf("expected sniffing to be disabled, got: %v", client.snifferEnabled) + } + if client.snifferTimeoutStartup != off { + t.Errorf("expected sniffer timeout on startup = %v, got: %v", off, client.snifferTimeoutStartup) + } + if client.snifferTimeout != off { + t.Errorf("expected sniffer timeout = %v, got: %v", off, client.snifferTimeout) + } + if client.snifferInterval != off { + t.Errorf("expected sniffer interval = %v, got: %v", off, client.snifferInterval) + } + if client.basicAuth != false { + t.Errorf("expected no basic auth; got: %v", client.basicAuth) + } + if client.basicAuthUsername != "" { + t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername) + } + if client.basicAuthPassword != "" { + t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername) + } + if client.sendGetBodyAs != "GET" { + t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs) + } +} + +// -- Start and stop -- + +func TestClientStartAndStop(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + + running := client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Stop + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Stop again => no-op + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Start + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Start again => no-op + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } +} + +func TestClientStartAndStopWithSnifferAndHealthchecksDisabled(t *testing.T) { + client, err := NewClient(SetSniff(false), SetHealthcheck(false)) + if err != nil { + t.Fatal(err) + } + + running := client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Stop + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Stop again => no-op + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Start + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Start again => no-op + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } +} + +// -- Sniffing -- + +func TestClientSniffNode(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + + ch := make(chan []*conn) + go func() { ch <- client.sniffNode(DefaultURL) }() + + select { + case nodes := <-ch: + if len(nodes) != 1 { + t.Fatalf("expected %d nodes; got: %d", 1, len(nodes)) + } + pattern := `http:\/\/[\d\.]+:9200` + matched, err := regexp.MatchString(pattern, nodes[0].URL()) + if err != nil { + t.Fatal(err) + } + if !matched { + t.Fatalf("expected node URL pattern %q; got: %q", pattern, nodes[0].URL()) + } + case <-time.After(2 * time.Second): + t.Fatal("expected no timeout in sniff node") + break + } +} + +func TestClientSniffOnDefaultURL(t *testing.T) { + client, _ := NewClient() + if client == nil { + t.Fatal("no client returned") + } + + ch := make(chan error, 1) + go func() { + ch <- client.sniff(DefaultSnifferTimeoutStartup) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("expected sniff to succeed; got: %v", err) + } + if len(client.conns) != 1 { + t.Fatalf("expected %d nodes; got: %d", 1, len(client.conns)) + } + pattern := `http:\/\/[\d\.]+:9200` + matched, err := regexp.MatchString(pattern, client.conns[0].URL()) + if err != nil { + t.Fatal(err) + } + if !matched { + t.Fatalf("expected node URL pattern %q; got: %q", pattern, client.conns[0].URL()) + } + case <-time.After(2 * time.Second): + t.Fatal("expected no timeout in sniff") + break + } +} + +func TestClientExtractHostname(t *testing.T) { + tests := []struct { + Scheme string + Address string + Output string + }{ + { + Scheme: "http", + Address: "", + Output: "", + }, + { + Scheme: "https", + Address: "abc", + Output: "", + }, + { + Scheme: "http", + Address: "127.0.0.1:19200", + Output: "http://127.0.0.1:19200", + }, + { + Scheme: "https", + Address: "127.0.0.1:9200", + Output: "https://127.0.0.1:9200", + }, + { + Scheme: "http", + Address: "myelk.local/10.1.0.24:9200", + Output: "http://10.1.0.24:9200", + }, + } + + client, err := NewClient(SetSniff(false), SetHealthcheck(false)) + if err != nil { + t.Fatal(err) + } + for _, test := range tests { + got := client.extractHostname(test.Scheme, test.Address) + if want := test.Output; want != got { + t.Errorf("expected %q; got: %q", want, got) + } + } +} + +// -- Selector -- + +func TestClientSelectConnHealthy(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // Both are healthy, so we should get both URLs in round-robin + client.conns[0].MarkAsHealthy() + client.conns[1].MarkAsHealthy() + + // #1: Return 1st + c, err := client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } + // #2: Return 2nd + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } + // #3: Return 1st + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } +} + +func TestClientSelectConnHealthyAndDead(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // 1st is healthy, second is dead + client.conns[0].MarkAsHealthy() + client.conns[1].MarkAsDead() + + // #1: Return 1st + c, err := client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } + // #2: Return 1st again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } + // #3: Return 1st again and again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } +} + +func TestClientSelectConnDeadAndHealthy(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // 1st is dead, 2nd is healthy + client.conns[0].MarkAsDead() + client.conns[1].MarkAsHealthy() + + // #1: Return 2nd + c, err := client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } + // #2: Return 2nd again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } + // #3: Return 2nd again and again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } +} + +func TestClientSelectConnAllDead(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // Both are dead + client.conns[0].MarkAsDead() + client.conns[1].MarkAsDead() + + // If all connections are dead, next should make them alive again, but + // still return ErrNoClient when it first finds out. + c, err := client.next() + if err != ErrNoClient { + t.Fatal(err) + } + if c != nil { + t.Fatalf("expected no connection; got: %v", c) + } + // Return a connection + c, err = client.next() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if c == nil { + t.Fatalf("expected connection; got: %v", c) + } + // Return a connection + c, err = client.next() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if c == nil { + t.Fatalf("expected connection; got: %v", c) + } +} + +// -- ElasticsearchVersion -- + +func TestElasticsearchVersion(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + version, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if version == "" { + t.Errorf("expected a version number, got: %q", version) + } +} + +// -- IndexNames -- + +func TestIndexNames(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + names, err := client.IndexNames() + if err != nil { + t.Fatal(err) + } + if len(names) == 0 { + t.Fatalf("expected some index names, got: %d", len(names)) + } + var found bool + for _, name := range names { + if name == testIndexName { + found = true + break + } + } + if !found { + t.Fatalf("expected to find index %q; got: %v", testIndexName, found) + } +} + +// -- PerformRequest -- + +func TestPerformRequest(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } +} + +func TestPerformRequestWithSimpleClient(t *testing.T) { + client, err := NewSimpleClient() + if err != nil { + t.Fatal(err) + } + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } +} + +func TestPerformRequestWithLogger(t *testing.T) { + var w bytes.Buffer + out := log.New(&w, "LOGGER ", log.LstdFlags) + + client, err := NewClient(SetInfoLog(out), SetSniff(false)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } + + got := w.String() + pattern := `^LOGGER \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n` + matched, err := regexp.MatchString(pattern, got) + if err != nil { + t.Fatalf("expected log line to match %q; got: %v", pattern, err) + } + if !matched { + t.Errorf("expected log line to match %q; got: %v", pattern, got) + } +} + +func TestPerformRequestWithLoggerAndTracer(t *testing.T) { + var lw bytes.Buffer + lout := log.New(&lw, "LOGGER ", log.LstdFlags) + + var tw bytes.Buffer + tout := log.New(&tw, "TRACER ", log.LstdFlags) + + client, err := NewClient(SetInfoLog(lout), SetTraceLog(tout), SetSniff(false)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } + + lgot := lw.String() + if lgot == "" { + t.Errorf("expected logger output; got: %q", lgot) + } + + tgot := tw.String() + if tgot == "" { + t.Errorf("expected tracer output; got: %q", tgot) + } +} + +type customLogger struct { + out bytes.Buffer +} + +func (l *customLogger) Printf(format string, v ...interface{}) { + l.out.WriteString(fmt.Sprintf(format, v...) + "\n") +} + +func TestPerformRequestWithCustomLogger(t *testing.T) { + logger := &customLogger{} + + client, err := NewClient(SetInfoLog(logger), SetSniff(false)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } + + got := logger.out.String() + pattern := `^GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n` + matched, err := regexp.MatchString(pattern, got) + if err != nil { + t.Fatalf("expected log line to match %q; got: %v", pattern, err) + } + if !matched { + t.Errorf("expected log line to match %q; got: %v", pattern, got) + } +} + +// failingTransport will run a fail callback if it sees a given URL path prefix. +type failingTransport struct { + path string // path prefix to look for + fail func(*http.Request) (*http.Response, error) // call when path prefix is found + next http.RoundTripper // next round-tripper (use http.DefaultTransport if nil) +} + +// RoundTrip implements a failing transport. +func (tr *failingTransport) RoundTrip(r *http.Request) (*http.Response, error) { + if strings.HasPrefix(r.URL.Path, tr.path) && tr.fail != nil { + return tr.fail(r) + } + if tr.next != nil { + return tr.next.RoundTrip(r) + } + return http.DefaultTransport.RoundTrip(r) +} + +func TestPerformRequestRetryOnHttpError(t *testing.T) { + var numFailedReqs int + fail := func(r *http.Request) (*http.Response, error) { + numFailedReqs += 1 + //return &http.Response{Request: r, StatusCode: 400}, nil + return nil, errors.New("request failed") + } + + // Run against a failing endpoint and see if PerformRequest + // retries correctly. + tr := &failingTransport{path: "/fail", fail: fail} + httpClient := &http.Client{Transport: tr} + + client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5), SetHealthcheck(false)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest(context.TODO(), "GET", "/fail", nil, nil) + if err == nil { + t.Fatal("expected error") + } + if res != nil { + t.Fatal("expected no response") + } + // Connection should be marked as dead after it failed + if numFailedReqs != 5 { + t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs) + } +} + +func TestPerformRequestNoRetryOnValidButUnsuccessfulHttpStatus(t *testing.T) { + var numFailedReqs int + fail := func(r *http.Request) (*http.Response, error) { + numFailedReqs += 1 + return &http.Response{Request: r, StatusCode: 500}, nil + } + + // Run against a failing endpoint and see if PerformRequest + // retries correctly. + tr := &failingTransport{path: "/fail", fail: fail} + httpClient := &http.Client{Transport: tr} + + client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5), SetHealthcheck(false)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest(context.TODO(), "GET", "/fail", nil, nil) + if err == nil { + t.Fatal("expected error") + } + if res == nil { + t.Fatal("expected response, got nil") + } + if want, got := 500, res.StatusCode; want != got { + t.Fatalf("expected status code = %d, got %d", want, got) + } + // Retry should not have triggered additional requests because + if numFailedReqs != 1 { + t.Errorf("expected %d failed requests; got: %d", 1, numFailedReqs) + } +} + +// failingBody will return an error when json.Marshal is called on it. +type failingBody struct{} + +// MarshalJSON implements the json.Marshaler interface and always returns an error. +func (fb failingBody) MarshalJSON() ([]byte, error) { + return nil, errors.New("failing to marshal") +} + +func TestPerformRequestWithSetBodyError(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, failingBody{}) + if err == nil { + t.Fatal("expected error") + } + if res != nil { + t.Fatal("expected no response") + } +} + +// sleepingTransport will sleep before doing a request. +type sleepingTransport struct { + timeout time.Duration +} + +// RoundTrip implements a "sleepy" transport. +func (tr *sleepingTransport) RoundTrip(r *http.Request) (*http.Response, error) { + time.Sleep(tr.timeout) + return http.DefaultTransport.RoundTrip(r) +} + +func TestPerformRequestWithCancel(t *testing.T) { + tr := &sleepingTransport{timeout: 3 * time.Second} + httpClient := &http.Client{Transport: tr} + + client, err := NewSimpleClient(SetHttpClient(httpClient), SetMaxRetries(0)) + if err != nil { + t.Fatal(err) + } + + type result struct { + res *Response + err error + } + ctx, cancel := context.WithCancel(context.Background()) + + resc := make(chan result, 1) + go func() { + res, err := client.PerformRequest(ctx, "GET", "/", nil, nil) + resc <- result{res: res, err: err} + }() + select { + case <-time.After(1 * time.Second): + cancel() + case res := <-resc: + t.Fatalf("expected response before cancel, got %v", res) + case <-ctx.Done(): + t.Fatalf("expected no early termination, got ctx.Done(): %v", ctx.Err()) + } + err = ctx.Err() + if err != context.Canceled { + t.Fatalf("expected error context.Canceled, got: %v", err) + } +} + +func TestPerformRequestWithTimeout(t *testing.T) { + tr := &sleepingTransport{timeout: 3 * time.Second} + httpClient := &http.Client{Transport: tr} + + client, err := NewSimpleClient(SetHttpClient(httpClient), SetMaxRetries(0)) + if err != nil { + t.Fatal(err) + } + + type result struct { + res *Response + err error + } + ctx, _ := context.WithTimeout(context.Background(), 1*time.Second) + + resc := make(chan result, 1) + go func() { + res, err := client.PerformRequest(ctx, "GET", "/", nil, nil) + resc <- result{res: res, err: err} + }() + select { + case res := <-resc: + t.Fatalf("expected timeout before response, got %v", res) + case <-ctx.Done(): + err := ctx.Err() + if err != context.DeadlineExceeded { + t.Fatalf("expected error context.DeadlineExceeded, got: %v", err) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster-test/Makefile b/vendor/gopkg.in/olivere/elastic.v5/cluster-test/Makefile new file mode 100644 index 000000000..cc6261db5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster-test/Makefile @@ -0,0 +1,16 @@ +.PHONY: build run-omega-cluster-test + +default: build + +build: + go build cluster-test.go + +run-omega-cluster-test: + go run -race cluster-test.go \ + -nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \ + -n=5 \ + -retries=5 \ + -sniff=true -sniffer=10s \ + -healthcheck=true -healthchecker=5s \ + -errorlog=errors.log + diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster-test/README.md b/vendor/gopkg.in/olivere/elastic.v5/cluster-test/README.md new file mode 100644 index 000000000..f10748cc2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster-test/README.md @@ -0,0 +1,63 @@ +# Cluster Test + +This directory contains a program you can use to test a cluster. + +Here's how: + +First, install a cluster of Elasticsearch nodes. You can install them on +different computers, or start several nodes on a single machine. + +Build cluster-test by `go build cluster-test.go` (or build with `make`). + +Run `./cluster-test -h` to get a list of flags: + +```sh +$ ./cluster-test -h +Usage of ./cluster-test: + -errorlog="": error log file + -healthcheck=true: enable or disable healthchecks + -healthchecker=1m0s: healthcheck interval + -index="twitter": name of ES index to use + -infolog="": info log file + -n=5: number of goroutines that run searches + -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200') + -retries=0: number of retries + -sniff=true: enable or disable sniffer + -sniffer=15m0s: sniffer interval + -tracelog="": trace log file +``` + +Example: + +```sh +$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log +``` + +The above example will create an index and start some search jobs on the +cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201, +and http://127.0.0.1:9202. + +* It will create an index called `twitter` on the cluster (`-index=twitter`) +* It will run 5 search jobs in parallel (`-n=5`). +* It will retry failed requests 5 times (`-retries=5`). +* It will sniff the cluster periodically (`-sniff=true`). +* It will sniff the cluster every 10 seconds (`-sniffer=10s`). +* It will perform health checks periodically (`-healthcheck=true`). +* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`). +* It will write an error log file (`-errorlog=error.log`). + +If you want to test Elastic with nodes going up and down, you can use a +chaos monkey script like this and run it on the nodes of your cluster: + +```sh +#!/bin/bash +while true +do + echo "Starting ES node" + elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid + sleep `jot -r 1 10 300` # wait for 10-300s + echo "Stopping ES node" + kill -TERM `cat es.pid` + sleep `jot -r 1 10 60` # wait for 10-60s +done +``` diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go b/vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go new file mode 100644 index 000000000..112a60bad --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go @@ -0,0 +1,362 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package main + +import ( + "encoding/json" + "errors" + "flag" + "fmt" + "log" + "math/rand" + "os" + "runtime" + "strings" + "sync/atomic" + "time" + + "golang.org/x/net/context" + + elastic "gopkg.in/olivere/elastic.v5" +) + +type Tweet struct { + User string `json:"user"` + Message string `json:"message"` + Retweets int `json:"retweets"` + Image string `json:"image,omitempty"` + Created time.Time `json:"created,omitempty"` + Tags []string `json:"tags,omitempty"` + Location string `json:"location,omitempty"` + Suggest *elastic.SuggestField `json:"suggest_field,omitempty"` +} + +var ( + nodes = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')") + n = flag.Int("n", 5, "number of goroutines that run searches") + index = flag.String("index", "twitter", "name of ES index to use") + errorlogfile = flag.String("errorlog", "", "error log file") + infologfile = flag.String("infolog", "", "info log file") + tracelogfile = flag.String("tracelog", "", "trace log file") + retries = flag.Int("retries", elastic.DefaultMaxRetries, "number of retries") + sniff = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer") + sniffer = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval") + healthcheck = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks") + healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval") +) + +func main() { + flag.Parse() + + runtime.GOMAXPROCS(runtime.NumCPU()) + + if *nodes == "" { + log.Fatal("no nodes specified") + } + urls := strings.SplitN(*nodes, ",", -1) + + testcase, err := NewTestCase(*index, urls) + if err != nil { + log.Fatal(err) + } + + testcase.SetErrorLogFile(*errorlogfile) + testcase.SetInfoLogFile(*infologfile) + testcase.SetTraceLogFile(*tracelogfile) + testcase.SetMaxRetries(*retries) + testcase.SetHealthcheck(*healthcheck) + testcase.SetHealthcheckInterval(*healthchecker) + testcase.SetSniff(*sniff) + testcase.SetSnifferInterval(*sniffer) + + if err := testcase.Run(*n); err != nil { + log.Fatal(err) + } + + select {} +} + +type RunInfo struct { + Success bool +} + +type TestCase struct { + nodes []string + client *elastic.Client + runs int64 + failures int64 + runCh chan RunInfo + index string + errorlogfile string + infologfile string + tracelogfile string + maxRetries int + healthcheck bool + healthcheckInterval time.Duration + sniff bool + snifferInterval time.Duration +} + +func NewTestCase(index string, nodes []string) (*TestCase, error) { + if index == "" { + return nil, errors.New("no index name specified") + } + + return &TestCase{ + index: index, + nodes: nodes, + runCh: make(chan RunInfo), + }, nil +} + +func (t *TestCase) SetIndex(name string) { + t.index = name +} + +func (t *TestCase) SetErrorLogFile(name string) { + t.errorlogfile = name +} + +func (t *TestCase) SetInfoLogFile(name string) { + t.infologfile = name +} + +func (t *TestCase) SetTraceLogFile(name string) { + t.tracelogfile = name +} + +func (t *TestCase) SetMaxRetries(n int) { + t.maxRetries = n +} + +func (t *TestCase) SetSniff(enabled bool) { + t.sniff = enabled +} + +func (t *TestCase) SetSnifferInterval(d time.Duration) { + t.snifferInterval = d +} + +func (t *TestCase) SetHealthcheck(enabled bool) { + t.healthcheck = enabled +} + +func (t *TestCase) SetHealthcheckInterval(d time.Duration) { + t.healthcheckInterval = d +} + +func (t *TestCase) Run(n int) error { + if err := t.setup(); err != nil { + return err + } + + for i := 1; i < n; i++ { + go t.search() + } + + go t.monitor() + + return nil +} + +func (t *TestCase) monitor() { + print := func() { + fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), " ") + } + + for { + select { + case run := <-t.runCh: + atomic.AddInt64(&t.runs, 1) + if !run.Success { + atomic.AddInt64(&t.failures, 1) + fmt.Println() + } + print() + case <-time.After(5 * time.Second): + // Print stats after some inactivity + print() + break + } + } +} + +func (t *TestCase) setup() error { + var errorlogger *log.Logger + if t.errorlogfile != "" { + f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return err + } + errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile) + } + + var infologger *log.Logger + if t.infologfile != "" { + f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return err + } + infologger = log.New(f, "", log.LstdFlags) + } + + // Trace request and response details like this + var tracelogger *log.Logger + if t.tracelogfile != "" { + f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return err + } + tracelogger = log.New(f, "", log.LstdFlags) + } + + client, err := elastic.NewClient( + elastic.SetURL(t.nodes...), + elastic.SetErrorLog(errorlogger), + elastic.SetInfoLog(infologger), + elastic.SetTraceLog(tracelogger), + elastic.SetMaxRetries(t.maxRetries), + elastic.SetSniff(t.sniff), + elastic.SetSnifferInterval(t.snifferInterval), + elastic.SetHealthcheck(t.healthcheck), + elastic.SetHealthcheckInterval(t.healthcheckInterval)) + if err != nil { + // Handle error + return err + } + t.client = client + + ctx := context.Background() + + // Use the IndexExists service to check if a specified index exists. + exists, err := t.client.IndexExists(t.index).Do(ctx) + if err != nil { + return err + } + if exists { + deleteIndex, err := t.client.DeleteIndex(t.index).Do(ctx) + if err != nil { + return err + } + if !deleteIndex.Acknowledged { + return errors.New("delete index not acknowledged") + } + } + + // Create a new index. + createIndex, err := t.client.CreateIndex(t.index).Do(ctx) + if err != nil { + return err + } + if !createIndex.Acknowledged { + return errors.New("create index not acknowledged") + } + + // Index a tweet (using JSON serialization) + tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0} + _, err = t.client.Index(). + Index(t.index). + Type("tweet"). + Id("1"). + BodyJson(tweet1). + Do(ctx) + if err != nil { + return err + } + + // Index a second tweet (by string) + tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}` + _, err = t.client.Index(). + Index(t.index). + Type("tweet"). + Id("2"). + BodyString(tweet2). + Do(ctx) + if err != nil { + return err + } + + // Flush to make sure the documents got written. + _, err = t.client.Flush().Index(t.index).Do(ctx) + if err != nil { + return err + } + + return nil +} + +func (t *TestCase) search() { + ctx := context.Background() + + // Loop forever to check for connection issues + for { + // Get tweet with specified ID + get1, err := t.client.Get(). + Index(t.index). + Type("tweet"). + Id("1"). + Do(ctx) + if err != nil { + //failf("Get failed: %v", err) + t.runCh <- RunInfo{Success: false} + continue + } + if !get1.Found { + //log.Printf("Document %s not found\n", "1") + //fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type) + t.runCh <- RunInfo{Success: false} + continue + } + + // Search with a term query + searchResult, err := t.client.Search(). + Index(t.index). // search in index t.index + Query(elastic.NewTermQuery("user", "olivere")). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do(ctx) // execute + if err != nil { + //failf("Search failed: %v\n", err) + t.runCh <- RunInfo{Success: false} + continue + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + //fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Number of hits + if searchResult.Hits.TotalHits > 0 { + //fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var tweet Tweet + err := json.Unmarshal(*hit.Source, &tweet) + if err != nil { + // Deserialization failed + //failf("Deserialize failed: %v\n", err) + t.runCh <- RunInfo{Success: false} + continue + } + + // Work with tweet + //fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + //fmt.Print("Found no tweets\n") + } + + t.runCh <- RunInfo{Success: true} + + // Sleep some time + time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_health.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_health.go new file mode 100644 index 000000000..14694c714 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_health.go @@ -0,0 +1,245 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// ClusterHealthService allows to get a very simple status on the health of the cluster. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html +// for details. +type ClusterHealthService struct { + client *Client + pretty bool + indices []string + level string + local *bool + masterTimeout string + timeout string + waitForActiveShards *int + waitForNodes string + waitForRelocatingShards *int + waitForStatus string +} + +// NewClusterHealthService creates a new ClusterHealthService. +func NewClusterHealthService(client *Client) *ClusterHealthService { + return &ClusterHealthService{ + client: client, + indices: make([]string, 0), + } +} + +// Index limits the information returned to specific indices. +func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService { + s.indices = append(s.indices, indices...) + return s +} + +// Level specifies the level of detail for returned information. +func (s *ClusterHealthService) Level(level string) *ClusterHealthService { + s.level = level + return s +} + +// Local indicates whether to return local information. If it is true, +// we do not retrieve the state from master node (default: false). +func (s *ClusterHealthService) Local(local bool) *ClusterHealthService { + s.local = &local + return s +} + +// MasterTimeout specifies an explicit operation timeout for connection to master node. +func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout specifies an explicit operation timeout. +func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService { + s.timeout = timeout + return s +} + +// WaitForActiveShards can be used to wait until the specified number of shards are active. +func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService { + s.waitForActiveShards = &waitForActiveShards + return s +} + +// WaitForNodes can be used to wait until the specified number of nodes are available. +// Example: "12" to wait for exact values, ">12" and "<12" for ranges. +func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService { + s.waitForNodes = waitForNodes + return s +} + +// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished. +func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService { + s.waitForRelocatingShards = &waitForRelocatingShards + return s +} + +// WaitForStatus can be used to wait until the cluster is in a specific state. +// Valid values are: green, yellow, or red. +func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService { + s.waitForStatus = waitForStatus + return s +} + +// WaitForGreenStatus will wait for the "green" state. +func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService { + return s.WaitForStatus("green") +} + +// WaitForYellowStatus will wait for the "yellow" state. +func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService { + return s.WaitForStatus("yellow") +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterHealthService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.indices) > 0 { + path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{ + "index": strings.Join(s.indices, ","), + }) + } else { + path = "/_cluster/health" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.level != "" { + params.Set("level", s.level) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != nil { + params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards)) + } + if s.waitForNodes != "" { + params.Set("wait_for_nodes", s.waitForNodes) + } + if s.waitForRelocatingShards != nil { + params.Set("wait_for_relocating_shards", fmt.Sprintf("%v", s.waitForRelocatingShards)) + } + if s.waitForStatus != "" { + params.Set("wait_for_status", s.waitForStatus) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterHealthService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterHealthService) Do(ctx context.Context) (*ClusterHealthResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterHealthResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterHealthResponse is the response of ClusterHealthService.Do. +type ClusterHealthResponse struct { + ClusterName string `json:"cluster_name"` + Status string `json:"status"` + TimedOut bool `json:"timed_out"` + NumberOfNodes int `json:"number_of_nodes"` + NumberOfDataNodes int `json:"number_of_data_nodes"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` + DelayedUnassignedShards int `json:"delayed_unassigned_shards"` + NumberOfPendingTasks int `json:"number_of_pending_tasks"` + NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` + TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"` + ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` + + // Validation failures -> index name -> array of validation failures + ValidationFailures []map[string][]string `json:"validation_failures"` + + // Index name -> index health + Indices map[string]*ClusterIndexHealth `json:"indices"` +} + +// ClusterIndexHealth will be returned as part of ClusterHealthResponse. +type ClusterIndexHealth struct { + Status string `json:"status"` + NumberOfShards int `json:"number_of_shards"` + NumberOfReplicas int `json:"number_of_replicas"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` + // Validation failures + ValidationFailures []string `json:"validation_failures"` + // Shards by id, e.g. "0" or "1" + Shards map[string]*ClusterShardHealth `json:"shards"` +} + +// ClusterShardHealth will be returned as part of ClusterHealthResponse. +type ClusterShardHealth struct { + Status string `json:"status"` + PrimaryActive bool `json:"primary_active"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_health_test.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_health_test.go new file mode 100644 index 000000000..d98706415 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_health_test.go @@ -0,0 +1,120 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "testing" + + "golang.org/x/net/context" +) + +func TestClusterHealth(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Get cluster health + res, err := client.ClusterHealth().Index(testIndexName).Level("shards").Pretty(true).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected res to be != nil; got: %v", res) + } + if res.Status != "green" && res.Status != "red" && res.Status != "yellow" { + t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status) + } +} + +func TestClusterHealthURLs(t *testing.T) { + tests := []struct { + Service *ClusterHealthService + ExpectedPath string + ExpectedParams url.Values + }{ + { + Service: &ClusterHealthService{ + indices: []string{}, + }, + ExpectedPath: "/_cluster/health", + }, + { + Service: &ClusterHealthService{ + indices: []string{"twitter"}, + }, + ExpectedPath: "/_cluster/health/twitter", + }, + { + Service: &ClusterHealthService{ + indices: []string{"twitter", "gplus"}, + }, + ExpectedPath: "/_cluster/health/twitter%2Cgplus", + }, + { + Service: &ClusterHealthService{ + indices: []string{"twitter"}, + waitForStatus: "yellow", + }, + ExpectedPath: "/_cluster/health/twitter", + ExpectedParams: url.Values{"wait_for_status": []string{"yellow"}}, + }, + } + + for _, test := range tests { + gotPath, gotParams, err := test.Service.buildURL() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if gotPath != test.ExpectedPath { + t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) + } + if gotParams.Encode() != test.ExpectedParams.Encode() { + t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) + } + } +} + +func TestClusterHealthWaitForStatus(t *testing.T) { + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + // Ensure preconditions are met: A green cluster. + health, err := client.ClusterHealth().Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if got, want := health.Status, "green"; got != want { + t.Skipf("precondition failed: expected cluster to be %q, not %q", want, got) + } + + // Cluster health on an index that does not exist should never get to yellow + health, err = client.ClusterHealth().Index("no-such-index").WaitForStatus("yellow").Timeout("1s").Do(context.TODO()) + if err == nil { + t.Fatalf("expected timeout error; got: %v", err) + } + if !IsTimeout(err) { + t.Fatalf("expected timeout error; got: %v", err) + } + if health != nil { + t.Fatalf("expected no response; got: %v", health) + } + + // Cluster wide health + health, err = client.ClusterHealth().WaitForGreenStatus().Timeout("10s").Do(context.TODO()) + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if health.TimedOut != false { + t.Fatalf("expected no timeout; got: %v "+ + "(does your local cluster contain unassigned shards?)", health.TimedOut) + } + if health.Status != "green" { + t.Fatalf("expected health = %q; got: %q", "green", health.Status) + } + + // Cluster wide health via shortcut on client + err = client.WaitForGreenStatus("10s") + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_state.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_state.go new file mode 100644 index 000000000..1c287890e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_state.go @@ -0,0 +1,285 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// ClusterStateService allows to get a comprehensive state information of the whole cluster. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html +// for details. +type ClusterStateService struct { + client *Client + pretty bool + indices []string + metrics []string + allowNoIndices *bool + expandWildcards string + flatSettings *bool + ignoreUnavailable *bool + local *bool + masterTimeout string +} + +// NewClusterStateService creates a new ClusterStateService. +func NewClusterStateService(client *Client) *ClusterStateService { + return &ClusterStateService{ + client: client, + indices: make([]string, 0), + metrics: make([]string, 0), + } +} + +// Index is a list of index names. Use _all or an empty string to +// perform the operation on all indices. +func (s *ClusterStateService) Index(indices ...string) *ClusterStateService { + s.indices = append(s.indices, indices...) + return s +} + +// Metric limits the information returned to the specified metric. +// It can be one of: version, master_node, nodes, routing_table, metadata, +// blocks, or customs. +func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService { + s.metrics = append(s.metrics, metrics...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings, when set, returns settings in flat format (default: false). +func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService { + s.flatSettings = &flatSettings + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Local indicates whether to return local information. When set, it does not +// retrieve the state from master node (default: false). +func (s *ClusterStateService) Local(local bool) *ClusterStateService { + s.local = &local + return s +} + +// MasterTimeout specifies timeout for connection to master. +func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterStateService) buildURL() (string, url.Values, error) { + // Build URL + metrics := strings.Join(s.metrics, ",") + if metrics == "" { + metrics = "_all" + } + indices := strings.Join(s.indices, ",") + if indices == "" { + indices = "_all" + } + path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{ + "metrics": metrics, + "indices": indices, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterStateService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterStateService) Do(ctx context.Context) (*ClusterStateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterStateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterStateResponse is the response of ClusterStateService.Do. +type ClusterStateResponse struct { + ClusterName string `json:"cluster_name"` + Version int64 `json:"version"` + StateUUID string `json:"state_uuid"` + MasterNode string `json:"master_node"` + Blocks map[string]*clusterBlocks `json:"blocks"` + Nodes map[string]*discoveryNode `json:"nodes"` + Metadata *clusterStateMetadata `json:"metadata"` + RoutingTable map[string]*clusterStateRoutingTable `json:"routing_table"` + RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"` + Customs map[string]interface{} `json:"customs"` +} + +type clusterBlocks struct { + Global map[string]*clusterBlock `json:"global"` // id -> cluster block + Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block +} + +type clusterBlock struct { + Description string `json:"description"` + Retryable bool `json:"retryable"` + DisableStatePersistence bool `json:"disable_state_persistence"` + Levels []string `json:"levels"` +} + +type clusterStateMetadata struct { + ClusterUUID string `json:"cluster_uuid"` + Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata + Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data + RoutingTable struct { + Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table + } `json:"routing_table"` + RoutingNodes struct { + Unassigned []*shardRouting `json:"unassigned"` + Nodes []*shardRouting `json:"nodes"` + } `json:"routing_nodes"` + Customs map[string]interface{} `json:"customs"` +} + +type discoveryNode struct { + Name string `json:"name"` // server name, e.g. "es1" + TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300] + Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true } +} + +type clusterStateRoutingTable struct { + Indices map[string]interface{} `json:"indices"` +} + +type clusterStateRoutingNode struct { + Unassigned []*shardRouting `json:"unassigned"` + // Node Id -> shardRouting + Nodes map[string][]*shardRouting `json:"nodes"` +} + +type indexTemplateMetaData struct { + Template string `json:"template"` // e.g. "store-*" + Order int `json:"order"` + Settings map[string]interface{} `json:"settings"` // index settings + Mappings map[string]interface{} `json:"mappings"` // type name -> mapping +} + +type indexMetaData struct { + State string `json:"state"` + Settings map[string]interface{} `json:"settings"` + Mappings map[string]interface{} `json:"mappings"` + Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ] +} + +type indexRoutingTable struct { + Shards map[string]*shardRouting `json:"shards"` +} + +type shardRouting struct { + State string `json:"state"` + Primary bool `json:"primary"` + Node string `json:"node"` + RelocatingNode string `json:"relocating_node"` + Shard int `json:"shard"` + Index string `json:"index"` + Version int64 `json:"state"` + RestoreSource *RestoreSource `json:"restore_source"` + AllocationId *allocationId `json:"allocation_id"` + UnassignedInfo *unassignedInfo `json:"unassigned_info"` +} + +type RestoreSource struct { + Repository string `json:"repository"` + Snapshot string `json:"snapshot"` + Version string `json:"version"` + Index string `json:"index"` +} + +type allocationId struct { + Id string `json:"id"` + RelocationId string `json:"relocation_id"` +} + +type unassignedInfo struct { + Reason string `json:"reason"` + At string `json:"at"` + Details string `json:"details"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_state_test.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_state_test.go new file mode 100644 index 000000000..63fd601b7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_state_test.go @@ -0,0 +1,94 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "testing" + + "golang.org/x/net/context" +) + +func TestClusterState(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Get cluster state + res, err := client.ClusterState().Index("_all").Metric("_all").Pretty(true).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected res to be != nil; got: %v", res) + } + if res.ClusterName == "" { + t.Fatalf("expected a cluster name; got: %q", res.ClusterName) + } +} + +func TestClusterStateURLs(t *testing.T) { + tests := []struct { + Service *ClusterStateService + ExpectedPath string + ExpectedParams url.Values + }{ + { + Service: &ClusterStateService{ + indices: []string{}, + metrics: []string{}, + }, + ExpectedPath: "/_cluster/state/_all/_all", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter"}, + metrics: []string{}, + }, + ExpectedPath: "/_cluster/state/_all/twitter", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter", "gplus"}, + metrics: []string{}, + }, + ExpectedPath: "/_cluster/state/_all/twitter%2Cgplus", + }, + { + Service: &ClusterStateService{ + indices: []string{}, + metrics: []string{"nodes"}, + }, + ExpectedPath: "/_cluster/state/nodes/_all", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter"}, + metrics: []string{"nodes"}, + }, + ExpectedPath: "/_cluster/state/nodes/twitter", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter"}, + metrics: []string{"nodes"}, + masterTimeout: "1s", + }, + ExpectedPath: "/_cluster/state/nodes/twitter", + ExpectedParams: url.Values{"master_timeout": []string{"1s"}}, + }, + } + + for _, test := range tests { + gotPath, gotParams, err := test.Service.buildURL() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if gotPath != test.ExpectedPath { + t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) + } + if gotParams.Encode() != test.ExpectedParams.Encode() { + t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go new file mode 100644 index 000000000..82402d9d6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go @@ -0,0 +1,350 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// ClusterStatsService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-stats.html. +type ClusterStatsService struct { + client *Client + pretty bool + nodeId []string + flatSettings *bool + human *bool +} + +// NewClusterStatsService creates a new ClusterStatsService. +func NewClusterStatsService(client *Client) *ClusterStatsService { + return &ClusterStatsService{ + client: client, + nodeId: make([]string, 0), + } +} + +// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService { + s.nodeId = nodeId + return s +} + +// FlatSettings is documented as: Return settings in flat format (default: false). +func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService { + s.flatSettings = &flatSettings + return s +} + +// Human is documented as: Whether to return time and byte values in human-readable format.. +func (s *ClusterStatsService) Human(human bool) *ClusterStatsService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterStatsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.nodeId) > 0 { + path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + }) + if err != nil { + return "", url.Values{}, err + } + } else { + path, err = uritemplates.Expand("/_cluster/stats", map[string]string{}) + if err != nil { + return "", url.Values{}, err + } + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterStatsService) Do(ctx context.Context) (*ClusterStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterStatsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterStatsResponse is the response of ClusterStatsService.Do. +type ClusterStatsResponse struct { + Timestamp int64 `json:"timestamp"` + ClusterName string `json:"cluster_name"` + ClusterUUID string `json:"uuid"` + Status string `json:"status"` + Indices *ClusterStatsIndices `json:"indices"` + Nodes *ClusterStatsNodes `json:"nodes"` +} + +type ClusterStatsIndices struct { + Count int `json:"count"` + Shards *ClusterStatsIndicesShards `json:"shards"` + Docs *ClusterStatsIndicesDocs `json:"docs"` + Store *ClusterStatsIndicesStore `json:"store"` + FieldData *ClusterStatsIndicesFieldData `json:"fielddata"` + FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"` + IdCache *ClusterStatsIndicesIdCache `json:"id_cache"` + Completion *ClusterStatsIndicesCompletion `json:"completion"` + Segments *ClusterStatsIndicesSegments `json:"segments"` + Percolate *ClusterStatsIndicesPercolate `json:"percolate"` +} + +type ClusterStatsIndicesShards struct { + Total int `json:"total"` + Primaries int `json:"primaries"` + Replication float64 `json:"replication"` + Index *ClusterStatsIndicesShardsIndex `json:"index"` +} + +type ClusterStatsIndicesShardsIndex struct { + Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"` + Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"` + Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"` +} + +type ClusterStatsIndicesShardsIndexIntMinMax struct { + Min int `json:"min"` + Max int `json:"max"` + Avg float64 `json:"avg"` +} + +type ClusterStatsIndicesShardsIndexFloat64MinMax struct { + Min float64 `json:"min"` + Max float64 `json:"max"` + Avg float64 `json:"avg"` +} + +type ClusterStatsIndicesDocs struct { + Count int `json:"count"` + Deleted int `json:"deleted"` +} + +type ClusterStatsIndicesStore struct { + Size string `json:"size"` // e.g. "5.3gb" + SizeInBytes int64 `json:"size_in_bytes"` + ThrottleTime string `json:"throttle_time"` // e.g. "0s" + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` +} + +type ClusterStatsIndicesFieldData struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` + Fields map[string]struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + } `json:"fields"` +} + +type ClusterStatsIndicesFilterCache struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` +} + +type ClusterStatsIndicesIdCache struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` +} + +type ClusterStatsIndicesCompletion struct { + Size string `json:"size"` // e.g. "61.3kb" + SizeInBytes int64 `json:"size_in_bytes"` + Fields map[string]struct { + Size string `json:"size"` // e.g. "61.3kb" + SizeInBytes int64 `json:"size_in_bytes"` + } `json:"fields"` +} + +type ClusterStatsIndicesSegments struct { + Count int64 `json:"count"` + Memory string `json:"memory"` // e.g. "61.3kb" + MemoryInBytes int64 `json:"memory_in_bytes"` + IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb" + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` + IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb" + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"` + VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb" + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` + FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb" + FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` +} + +type ClusterStatsIndicesPercolate struct { + Total int64 `json:"total"` + // TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems + Time string `json:"get_time"` // e.g. "1s" + TimeInBytes int64 `json:"time_in_millis"` + Current int64 `json:"current"` + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"` + Queries int64 `json:"queries"` +} + +// --- + +type ClusterStatsNodes struct { + Count *ClusterStatsNodesCount `json:"count"` + Versions []string `json:"versions"` + OS *ClusterStatsNodesOsStats `json:"os"` + Process *ClusterStatsNodesProcessStats `json:"process"` + JVM *ClusterStatsNodesJvmStats `json:"jvm"` + FS *ClusterStatsNodesFsStats `json:"fs"` + Plugins []*ClusterStatsNodesPlugin `json:"plugins"` +} + +type ClusterStatsNodesCount struct { + Total int `json:"total"` + MasterOnly int `json:"master_only"` + DataOnly int `json:"data_only"` + MasterData int `json:"master_data"` + Client int `json:"client"` +} + +type ClusterStatsNodesOsStats struct { + AvailableProcessors int `json:"available_processors"` + Mem *ClusterStatsNodesOsStatsMem `json:"mem"` + CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"` +} + +type ClusterStatsNodesOsStatsMem struct { + Total string `json:"total"` // e.g. "16gb" + TotalInBytes int64 `json:"total_in_bytes"` +} + +type ClusterStatsNodesOsStatsCPU struct { + Vendor string `json:"vendor"` + Model string `json:"model"` + MHz int `json:"mhz"` + TotalCores int `json:"total_cores"` + TotalSockets int `json:"total_sockets"` + CoresPerSocket int `json:"cores_per_socket"` + CacheSize string `json:"cache_size"` // e.g. "256b" + CacheSizeInBytes int64 `json:"cache_size_in_bytes"` + Count int `json:"count"` +} + +type ClusterStatsNodesProcessStats struct { + CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"` + OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"` +} + +type ClusterStatsNodesProcessStatsCPU struct { + Percent float64 `json:"percent"` +} + +type ClusterStatsNodesProcessStatsOpenFileDescriptors struct { + Min int64 `json:"min"` + Max int64 `json:"max"` + Avg int64 `json:"avg"` +} + +type ClusterStatsNodesJvmStats struct { + MaxUptime string `json:"max_uptime"` // e.g. "5h" + MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` + Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"` + Mem *ClusterStatsNodesJvmStatsMem `json:"mem"` + Threads int64 `json:"threads"` +} + +type ClusterStatsNodesJvmStatsVersion struct { + Version string `json:"version"` // e.g. "1.8.0_45" + VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" + VMVersion string `json:"vm_version"` // e.g. "25.45-b02" + VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" + Count int `json:"count"` +} + +type ClusterStatsNodesJvmStatsMem struct { + HeapUsed string `json:"heap_used"` + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` + HeapMax string `json:"heap_max"` + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` +} + +type ClusterStatsNodesFsStats struct { + Path string `json:"path"` + Mount string `json:"mount"` + Dev string `json:"dev"` + Total string `json:"total"` // e.g. "930.7gb"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` // e.g. "930.7gb"` + FreeInBytes int64 `json:"free_in_bytes"` + Available string `json:"available"` // e.g. "930.7gb"` + AvailableInBytes int64 `json:"available_in_bytes"` + DiskReads int64 `json:"disk_reads"` + DiskWrites int64 `json:"disk_writes"` + DiskIOOp int64 `json:"disk_io_op"` + DiskReadSize string `json:"disk_read_size"` // e.g. "0b"` + DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"` + DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"` + DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"` + DiskIOSize string `json:"disk_io_size"` // e.g. "0b"` + DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"` + DiskQueue string `json:"disk_queue"` + DiskServiceTime string `json:"disk_service_time"` +} + +type ClusterStatsNodesPlugin struct { + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + URL string `json:"url"` + JVM bool `json:"jvm"` + Site bool `json:"site"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_stats_test.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_stats_test.go new file mode 100644 index 000000000..c044b7c0a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_stats_test.go @@ -0,0 +1,93 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "testing" + + "golang.org/x/net/context" +) + +func TestClusterStats(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Get cluster stats + res, err := client.ClusterStats().Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected res to be != nil; got: %v", res) + } + if res.ClusterName == "" { + t.Fatalf("expected a cluster name; got: %q", res.ClusterName) + } + if res.Nodes == nil { + t.Fatalf("expected nodes; got: %v", res.Nodes) + } + if res.Nodes.Count == nil { + t.Fatalf("expected nodes count; got: %v", res.Nodes.Count) + } +} + +func TestClusterStatsURLs(t *testing.T) { + fFlag := false + tFlag := true + + tests := []struct { + Service *ClusterStatsService + ExpectedPath string + ExpectedParams url.Values + }{ + { + Service: &ClusterStatsService{ + nodeId: []string{}, + }, + ExpectedPath: "/_cluster/stats", + }, + { + Service: &ClusterStatsService{ + nodeId: []string{"node1"}, + }, + ExpectedPath: "/_cluster/stats/nodes/node1", + }, + { + Service: &ClusterStatsService{ + nodeId: []string{"node1", "node2"}, + }, + ExpectedPath: "/_cluster/stats/nodes/node1%2Cnode2", + }, + { + Service: &ClusterStatsService{ + nodeId: []string{}, + flatSettings: &tFlag, + }, + ExpectedPath: "/_cluster/stats", + ExpectedParams: url.Values{"flat_settings": []string{"true"}}, + }, + { + Service: &ClusterStatsService{ + nodeId: []string{"node1"}, + flatSettings: &fFlag, + }, + ExpectedPath: "/_cluster/stats/nodes/node1", + ExpectedParams: url.Values{"flat_settings": []string{"false"}}, + }, + } + + for _, test := range tests { + gotPath, gotParams, err := test.Service.buildURL() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if gotPath != test.ExpectedPath { + t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) + } + if gotParams.Encode() != test.ExpectedParams.Encode() { + t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/config/elasticsearch.yml b/vendor/gopkg.in/olivere/elastic.v5/config/elasticsearch.yml new file mode 100644 index 000000000..9923cfe4f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/config/elasticsearch.yml @@ -0,0 +1,15 @@ +# bootstrap.ignore_system_bootstrap_checks: true + +discovery.zen.minimum_master_nodes: 1 + +network.host: +- _local_ +- _site_ + +network.publish_host: _local_ + + +# Enable scripting as described here: https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html +script.inline: true +script.stored: true +script.file: true diff --git a/vendor/gopkg.in/olivere/elastic.v5/config/jvm.options b/vendor/gopkg.in/olivere/elastic.v5/config/jvm.options new file mode 100644 index 000000000..d97fbc9ec --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/config/jvm.options @@ -0,0 +1,100 @@ +## JVM configuration + +################################################################ +## IMPORTANT: JVM heap size +################################################################ +## +## You should always set the min and max JVM heap +## size to the same value. For example, to set +## the heap to 4 GB, set: +## +## -Xms4g +## -Xmx4g +## +## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html +## for more information +## +################################################################ + +# Xms represents the initial size of total heap space +# Xmx represents the maximum size of total heap space + +-Xms2g +-Xmx2g + +################################################################ +## Expert settings +################################################################ +## +## All settings below this section are considered +## expert settings. Don't tamper with them unless +## you understand what you are doing +## +################################################################ + +## GC configuration +-XX:+UseConcMarkSweepGC +-XX:CMSInitiatingOccupancyFraction=75 +-XX:+UseCMSInitiatingOccupancyOnly + +## optimizations + +# disable calls to System#gc +-XX:+DisableExplicitGC + +# pre-touch memory pages used by the JVM during initialization +-XX:+AlwaysPreTouch + +## basic + +# force the server VM +-server + +# set to headless, just in case +-Djava.awt.headless=true + +# ensure UTF-8 encoding by default (e.g. filenames) +-Dfile.encoding=UTF-8 + +# use our provided JNA always versus the system one +-Djna.nosys=true + +# flags to keep Netty from being unsafe +-Dio.netty.noUnsafe=true +-Dio.netty.noKeySetOptimization=true + +# log4j 2 +-Dlog4j.shutdownHookEnabled=false +-Dlog4j2.disable.jmx=true +-Dlog4j.skipJansi=true + +## heap dumps + +# generate a heap dump when an allocation from the Java heap fails +# heap dumps are created in the working directory of the JVM +-XX:+HeapDumpOnOutOfMemoryError + +# specify an alternative path for heap dumps +# ensure the directory exists and has sufficient space +#-XX:HeapDumpPath=${heap.dump.path} + +## GC logging + +#-XX:+PrintGCDetails +#-XX:+PrintGCTimeStamps +#-XX:+PrintGCDateStamps +#-XX:+PrintClassHistogram +#-XX:+PrintTenuringDistribution +#-XX:+PrintGCApplicationStoppedTime + +# log GC status to a file with time stamps +# ensure the directory exists +#-Xloggc:${loggc} + +# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON. +# If documents were already indexed with unquoted fields in a previous version +# of Elasticsearch, some operations may throw errors. +# +# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided +# only for migration purposes. +#-Delasticsearch.json.allow_unquoted_field_names=true diff --git a/vendor/gopkg.in/olivere/elastic.v5/config/log4j2.properties b/vendor/gopkg.in/olivere/elastic.v5/config/log4j2.properties new file mode 100644 index 000000000..9a3147f5a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/config/log4j2.properties @@ -0,0 +1,74 @@ +status = error + +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n + +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs}.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n +appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.rolling.ref = rolling + +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log +appender.deprecation_rolling.layout.type = PatternLayout +appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n +appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 1GB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 4 + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.additivity = false + +appender.index_search_slowlog_rolling.type = RollingFile +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log +appender.index_search_slowlog_rolling.layout.type = PatternLayout +appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log +appender.index_search_slowlog_rolling.policies.type = Policies +appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_search_slowlog_rolling.policies.time.interval = 1 +appender.index_search_slowlog_rolling.policies.time.modulate = true + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +appender.index_indexing_slowlog_rolling.type = RollingFile +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log +appender.index_indexing_slowlog_rolling.policies.type = Policies +appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling.policies.time.interval = 1 +appender.index_indexing_slowlog_rolling.policies.time.modulate = true + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false diff --git a/vendor/gopkg.in/olivere/elastic.v5/config/scripts/.gitkeep b/vendor/gopkg.in/olivere/elastic.v5/config/scripts/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/gopkg.in/olivere/elastic.v5/connection.go b/vendor/gopkg.in/olivere/elastic.v5/connection.go new file mode 100644 index 000000000..0f27a8756 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/connection.go @@ -0,0 +1,90 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "sync" + "time" +) + +// conn represents a single connection to a node in a cluster. +type conn struct { + sync.RWMutex + nodeID string // node ID + url string + failures int + dead bool + deadSince *time.Time +} + +// newConn creates a new connection to the given URL. +func newConn(nodeID, url string) *conn { + c := &conn{ + nodeID: nodeID, + url: url, + } + return c +} + +// String returns a representation of the connection status. +func (c *conn) String() string { + c.RLock() + defer c.RUnlock() + return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince) +} + +// NodeID returns the ID of the node of this connection. +func (c *conn) NodeID() string { + c.RLock() + defer c.RUnlock() + return c.nodeID +} + +// URL returns the URL of this connection. +func (c *conn) URL() string { + c.RLock() + defer c.RUnlock() + return c.url +} + +// IsDead returns true if this connection is marked as dead, i.e. a previous +// request to the URL has been unsuccessful. +func (c *conn) IsDead() bool { + c.RLock() + defer c.RUnlock() + return c.dead +} + +// MarkAsDead marks this connection as dead, increments the failures +// counter and stores the current time in dead since. +func (c *conn) MarkAsDead() { + c.Lock() + c.dead = true + if c.deadSince == nil { + utcNow := time.Now().UTC() + c.deadSince = &utcNow + } + c.failures += 1 + c.Unlock() +} + +// MarkAsAlive marks this connection as eligible to be returned from the +// pool of connections by the selector. +func (c *conn) MarkAsAlive() { + c.Lock() + c.dead = false + c.Unlock() +} + +// MarkAsHealthy marks this connection as healthy, i.e. a request has been +// successfully performed with it. +func (c *conn) MarkAsHealthy() { + c.Lock() + c.dead = false + c.deadSince = nil + c.failures = 0 + c.Unlock() +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/count.go b/vendor/gopkg.in/olivere/elastic.v5/count.go new file mode 100644 index 000000000..459ea0bff --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/count.go @@ -0,0 +1,311 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// CountService is a convenient service for determining the +// number of documents in an index. Use SearchService with +// a SearchType of count for counting with queries etc. +type CountService struct { + client *Client + pretty bool + index []string + typ []string + allowNoIndices *bool + analyzeWildcard *bool + analyzer string + defaultOperator string + df string + expandWildcards string + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + minScore interface{} + preference string + q string + query Query + routing string + bodyJson interface{} + bodyString string +} + +// NewCountService creates a new CountService. +func NewCountService(client *Client) *CountService { + return &CountService{ + client: client, + } +} + +// Index sets the names of the indices to restrict the results. +func (s *CountService) Index(index ...string) *CountService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// Type sets the types to use to restrict the results. +func (s *CountService) Type(typ ...string) *CountService { + if s.typ == nil { + s.typ = make([]string, 0) + } + s.typ = append(s.typ, typ...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes "_all" string +// or when no indices have been specified). +func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService { + s.allowNoIndices = &allowNoIndices + return s +} + +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer specifies the analyzer to use for the query string. +func (s *CountService) Analyzer(analyzer string) *CountService { + s.analyzer = analyzer + return s +} + +// DefaultOperator specifies the default operator for query string query (AND or OR). +func (s *CountService) DefaultOperator(defaultOperator string) *CountService { + s.defaultOperator = defaultOperator + return s +} + +// Df specifies the field to use as default where no field prefix is given +// in the query string. +func (s *CountService) Df(df string) *CountService { + s.df = df + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *CountService) ExpandWildcards(expandWildcards string) *CountService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Lenient specifies whether format-based query failures (such as +// providing text to a numeric field) should be ignored. +func (s *CountService) Lenient(lenient bool) *CountService { + s.lenient = &lenient + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// MinScore indicates to include only documents with a specific `_score` +// value in the result. +func (s *CountService) MinScore(minScore interface{}) *CountService { + s.minScore = minScore + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *CountService) Preference(preference string) *CountService { + s.preference = preference + return s +} + +// Q in the Lucene query string syntax. You can also use Query to pass +// a Query struct. +func (s *CountService) Q(q string) *CountService { + s.q = q + return s +} + +// Query specifies the query to pass. You can also pass a query string with Q. +func (s *CountService) Query(query Query) *CountService { + s.query = query + return s +} + +// Routing specifies the routing value. +func (s *CountService) Routing(routing string) *CountService { + s.routing = routing + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *CountService) Pretty(pretty bool) *CountService { + s.pretty = pretty + return s +} + +// BodyJson specifies the query to restrict the results specified with the +// Query DSL (optional). The interface{} will be serialized to a JSON document, +// so use a map[string]interface{}. +func (s *CountService) BodyJson(body interface{}) *CountService { + s.bodyJson = body + return s +} + +// Body specifies a query to restrict the results specified with +// the Query DSL (optional). +func (s *CountService) BodyString(body string) *CountService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *CountService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_count", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else { + path = "/_all/_count" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.minScore != nil { + params.Set("min_score", fmt.Sprintf("%v", s.minScore)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *CountService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *CountService) Do(ctx context.Context) (int64, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return 0, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return 0, err + } + + // Setup HTTP request body + var body interface{} + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return 0, err + } + query := make(map[string]interface{}) + query["query"] = src + body = query + } else if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return 0, err + } + + // Return result + ret := new(CountResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return 0, err + } + if ret != nil { + return ret.Count, nil + } + + return int64(0), nil +} + +// CountResponse is the response of using the Count API. +type CountResponse struct { + Count int64 `json:"count"` + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/count_test.go b/vendor/gopkg.in/olivere/elastic.v5/count_test.go new file mode 100644 index 000000000..c4703343e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/count_test.go @@ -0,0 +1,128 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestCountURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all/_count", + }, + { + []string{}, + []string{"tweet"}, + "/_all/tweet/_count", + }, + { + []string{"twitter-*"}, + []string{"tweet", "follower"}, + "/twitter-%2A/tweet%2Cfollower/_count", + }, + { + []string{"twitter-2014", "twitter-2015"}, + []string{"tweet", "follower"}, + "/twitter-2014%2Ctwitter-2015/tweet%2Cfollower/_count", + }, + } + + for _, test := range tests { + path, _, err := client.Count().Index(test.Indices...).Type(test.Types...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestCount(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Count documents + count, err = client.Count(testIndexName).Type("tweet").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Count documents + count, err = client.Count(testIndexName).Type("gezwitscher").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 0 { + t.Errorf("expected Count = %d; got %d", 0, count) + } + + // Count with query + query := NewTermQuery("user", "olivere") + count, err = client.Count(testIndexName).Query(query).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } + + // Count with query and type + query = NewTermQuery("user", "olivere") + count, err = client.Count(testIndexName).Type("tweet").Query(query).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/decoder.go b/vendor/gopkg.in/olivere/elastic.v5/decoder.go new file mode 100644 index 000000000..9cd2cf720 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/decoder.go @@ -0,0 +1,26 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" +) + +// Decoder is used to decode responses from Elasticsearch. +// Users of elastic can implement their own marshaler for advanced purposes +// and set them per Client (see SetDecoder). If none is specified, +// DefaultDecoder is used. +type Decoder interface { + Decode(data []byte, v interface{}) error +} + +// DefaultDecoder uses json.Unmarshal from the Go standard library +// to decode JSON data. +type DefaultDecoder struct{} + +// Decode decodes with json.Unmarshal from the Go standard library. +func (u *DefaultDecoder) Decode(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/decoder_test.go b/vendor/gopkg.in/olivere/elastic.v5/decoder_test.go new file mode 100644 index 000000000..507cae819 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/decoder_test.go @@ -0,0 +1,51 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "sync/atomic" + "testing" + + "golang.org/x/net/context" +) + +type decoder struct { + dec json.Decoder + + N int64 +} + +func (d *decoder) Decode(data []byte, v interface{}) error { + atomic.AddInt64(&d.N, 1) + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + return dec.Decode(v) +} + +func TestDecoder(t *testing.T) { + dec := &decoder{} + client := setupTestClientAndCreateIndex(t, SetDecoder(dec), SetMaxRetries(0)) + + tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + if dec.N == 0 { + t.Errorf("expected at least 1 call of decoder; got: %d", dec.N) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete.go b/vendor/gopkg.in/olivere/elastic.v5/delete.go new file mode 100644 index 000000000..c49c9f5d5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/delete.go @@ -0,0 +1,209 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// DeleteService allows to delete a typed JSON document from a specified +// index based on its id. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-delete.html +// for details. +type DeleteService struct { + client *Client + pretty bool + id string + index string + typ string + routing string + timeout string + version interface{} + versionType string + waitForActiveShards string + parent string + refresh string +} + +// NewDeleteService creates a new DeleteService. +func NewDeleteService(client *Client) *DeleteService { + return &DeleteService{ + client: client, + } +} + +// Type is the type of the document. +func (s *DeleteService) Type(typ string) *DeleteService { + s.typ = typ + return s +} + +// Id is the document ID. +func (s *DeleteService) Id(id string) *DeleteService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *DeleteService) Index(index string) *DeleteService { + s.index = index + return s +} + +// Routing is a specific routing value. +func (s *DeleteService) Routing(routing string) *DeleteService { + s.routing = routing + return s +} + +// Timeout is an explicit operation timeout. +func (s *DeleteService) Timeout(timeout string) *DeleteService { + s.timeout = timeout + return s +} + +// Version is an explicit version number for concurrency control. +func (s *DeleteService) Version(version interface{}) *DeleteService { + s.version = version + return s +} + +// VersionType is a specific version type. +func (s *DeleteService) VersionType(versionType string) *DeleteService { + s.versionType = versionType + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active +// before proceeding with the delete operation. Defaults to 1, meaning the +// primary shard only. Set to `all` for all shard copies, otherwise set to +// any non-negative value less than or equal to the total number of copies +// for the shard (number of replicas + 1). +func (s *DeleteService) WaitForActiveShards(waitForActiveShards string) *DeleteService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Parent is the ID of parent document. +func (s *DeleteService) Parent(parent string) *DeleteService { + s.parent = parent + return s +} + +// Refresh the index after performing the operation. +func (s *DeleteService) Refresh(refresh string) *DeleteService { + s.refresh = refresh + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *DeleteService) Pretty(pretty bool) *DeleteService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "index": s.index, + "type": s.typ, + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteService) Validate() error { + var invalid []string + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a delete request. + +// DeleteResponse is the outcome of running DeleteService.Do. +type DeleteResponse struct { + // TODO _shards { total, failed, successful } + Found bool `json:"found"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int64 `json:"_version"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go b/vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go new file mode 100644 index 000000000..a8d8028ab --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go @@ -0,0 +1,649 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// DeleteByQueryService deletes documents that match a query. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html. +type DeleteByQueryService struct { + client *Client + index []string + typ []string + query Query + body interface{} + xSource []string + xSourceExclude []string + xSourceInclude []string + analyzer string + analyzeWildcard *bool + allowNoIndices *bool + conflicts string + defaultOperator string + df string + docvalueFields []string + expandWildcards string + explain *bool + from *int + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + preference string + q string + refresh string + requestCache *bool + requestsPerSecond *int + routing []string + scroll string + scrollSize *int + searchTimeout string + searchType string + size *int + sort []string + stats []string + storedFields []string + suggestField string + suggestMode string + suggestSize *int + suggestText string + terminateAfter *int + timeout string + trackScores *bool + version *bool + waitForActiveShards string + waitForCompletion *bool + pretty bool +} + +// NewDeleteByQueryService creates a new DeleteByQueryService. +// You typically use the client's DeleteByQuery to get a reference to +// the service. +func NewDeleteByQueryService(client *Client) *DeleteByQueryService { + builder := &DeleteByQueryService{ + client: client, + } + return builder +} + +// Index sets the indices on which to perform the delete operation. +func (s *DeleteByQueryService) Index(index ...string) *DeleteByQueryService { + s.index = append(s.index, index...) + return s +} + +// Type limits the delete operation to the given types. +func (s *DeleteByQueryService) Type(typ ...string) *DeleteByQueryService { + s.typ = append(s.typ, typ...) + return s +} + +// XSource is true or false to return the _source field or not, +// or a list of fields to return. +func (s *DeleteByQueryService) XSource(xSource ...string) *DeleteByQueryService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// XSourceExclude represents a list of fields to exclude from the returned _source field. +func (s *DeleteByQueryService) XSourceExclude(xSourceExclude ...string) *DeleteByQueryService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// XSourceInclude represents a list of fields to extract and return from the _source field. +func (s *DeleteByQueryService) XSourceInclude(xSourceInclude ...string) *DeleteByQueryService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) + return s +} + +// Analyzer to use for the query string. +func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService { + s.analyzer = analyzer + return s +} + +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *DeleteByQueryService) AnalyzeWildcard(analyzeWildcard bool) *DeleteByQueryService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices (including the _all string +// or when no indices have been specified). +func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService { + s.allowNoIndices = &allow + return s +} + +// Conflicts indicates what to do when the process detects version conflicts. +// Possible values are "proceed" and "abort". +func (s *DeleteByQueryService) Conflicts(conflicts string) *DeleteByQueryService { + s.conflicts = conflicts + return s +} + +// AbortOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("abort"). +func (s *DeleteByQueryService) AbortOnVersionConflict() *DeleteByQueryService { + s.conflicts = "abort" + return s +} + +// ProceedOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("proceed"). +func (s *DeleteByQueryService) ProceedOnVersionConflict() *DeleteByQueryService { + s.conflicts = "proceed" + return s +} + +// DefaultOperator for query string query (AND or OR). +func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService { + s.defaultOperator = defaultOperator + return s +} + +// DF is the field to use as default where no field prefix is given in the query string. +func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService { + s.df = defaultField + return s +} + +// DefaultField is the field to use as default where no field prefix is given in the query string. +// It is an alias to the DF func. +func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService { + s.df = defaultField + return s +} + +// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit. +func (s *DeleteByQueryService) DocvalueFields(docvalueFields ...string) *DeleteByQueryService { + s.docvalueFields = docvalueFields + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. It can be "open" or "closed". +func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService { + s.expandWildcards = expand + return s +} + +// Explain specifies whether to return detailed information about score +// computation as part of a hit. +func (s *DeleteByQueryService) Explain(explain bool) *DeleteByQueryService { + s.explain = &explain + return s +} + +// From is the starting offset (default: 0). +func (s *DeleteByQueryService) From(from int) *DeleteByQueryService { + s.from = &from + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService { + s.ignoreUnavailable = &ignore + return s +} + +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *DeleteByQueryService) Lenient(lenient bool) *DeleteByQueryService { + s.lenient = &lenient + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *DeleteByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *DeleteByQueryService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// Preference specifies the node or shard the operation should be performed on +// (default: random). +func (s *DeleteByQueryService) Preference(preference string) *DeleteByQueryService { + s.preference = preference + return s +} + +// Q specifies the query in Lucene query string syntax. You can also use +// Query to programmatically specify the query. +func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService { + s.q = query + return s +} + +// QueryString is an alias to Q. Notice that you can also use Query to +// programmatically set the query. +func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService { + s.q = query + return s +} + +// Query sets the query programmatically. +func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService { + s.query = query + return s +} + +// Refresh indicates whether the effected indexes should be refreshed. +func (s *DeleteByQueryService) Refresh(refresh string) *DeleteByQueryService { + s.refresh = refresh + return s +} + +// RequestCache specifies if request cache should be used for this request +// or not, defaults to index level setting. +func (s *DeleteByQueryService) RequestCache(requestCache bool) *DeleteByQueryService { + s.requestCache = &requestCache + return s +} + +// RequestsPerSecond sets the throttle on this request in sub-requests per second. +// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. +func (s *DeleteByQueryService) RequestsPerSecond(requestsPerSecond int) *DeleteByQueryService { + s.requestsPerSecond = &requestsPerSecond + return s +} + +// Routing is a list of specific routing values. +func (s *DeleteByQueryService) Routing(routing ...string) *DeleteByQueryService { + s.routing = append(s.routing, routing...) + return s +} + +// Scroll specifies how long a consistent view of the index should be maintained +// for scrolled search. +func (s *DeleteByQueryService) Scroll(scroll string) *DeleteByQueryService { + s.scroll = scroll + return s +} + +// ScrollSize is the size on the scroll request powering the update_by_query. +func (s *DeleteByQueryService) ScrollSize(scrollSize int) *DeleteByQueryService { + s.scrollSize = &scrollSize + return s +} + +// SearchTimeout defines an explicit timeout for each search request. +// Defaults to no timeout. +func (s *DeleteByQueryService) SearchTimeout(searchTimeout string) *DeleteByQueryService { + s.searchTimeout = searchTimeout + return s +} + +// SearchType is the search operation type. Possible values are +// "query_then_fetch" and "dfs_query_then_fetch". +func (s *DeleteByQueryService) SearchType(searchType string) *DeleteByQueryService { + s.searchType = searchType + return s +} + +// Size represents the number of hits to return (default: 10). +func (s *DeleteByQueryService) Size(size int) *DeleteByQueryService { + s.size = &size + return s +} + +// Sort is a list of : pairs. +func (s *DeleteByQueryService) Sort(sort ...string) *DeleteByQueryService { + s.sort = append(s.sort, sort...) + return s +} + +// SortByField adds a sort order. +func (s *DeleteByQueryService) SortByField(field string, ascending bool) *DeleteByQueryService { + if ascending { + s.sort = append(s.sort, fmt.Sprintf("%s:asc", field)) + } else { + s.sort = append(s.sort, fmt.Sprintf("%s:desc", field)) + } + return s +} + +// Stats specifies specific tag(s) of the request for logging and statistical purposes. +func (s *DeleteByQueryService) Stats(stats ...string) *DeleteByQueryService { + s.stats = append(s.stats, stats...) + return s +} + +// StoredFields specifies the list of stored fields to return as part of a hit. +func (s *DeleteByQueryService) StoredFields(storedFields ...string) *DeleteByQueryService { + s.storedFields = storedFields + return s +} + +// SuggestField specifies which field to use for suggestions. +func (s *DeleteByQueryService) SuggestField(suggestField string) *DeleteByQueryService { + s.suggestField = suggestField + return s +} + +// SuggestMode specifies the suggest mode. Possible values are +// "missing", "popular", and "always". +func (s *DeleteByQueryService) SuggestMode(suggestMode string) *DeleteByQueryService { + s.suggestMode = suggestMode + return s +} + +// SuggestSize specifies how many suggestions to return in response. +func (s *DeleteByQueryService) SuggestSize(suggestSize int) *DeleteByQueryService { + s.suggestSize = &suggestSize + return s +} + +// SuggestText specifies the source text for which the suggestions should be returned. +func (s *DeleteByQueryService) SuggestText(suggestText string) *DeleteByQueryService { + s.suggestText = suggestText + return s +} + +// TerminateAfter indicates the maximum number of documents to collect +// for each shard, upon reaching which the query execution will terminate early. +func (s *DeleteByQueryService) TerminateAfter(terminateAfter int) *DeleteByQueryService { + s.terminateAfter = &terminateAfter + return s +} + +// Timeout is the time each individual bulk request should wait for shards +// that are unavailable. +func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService { + s.timeout = timeout + return s +} + +// TimeoutInMillis sets the timeout in milliseconds. +func (s *DeleteByQueryService) TimeoutInMillis(timeoutInMillis int) *DeleteByQueryService { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) + return s +} + +// TrackScores indicates whether to calculate and return scores even if +// they are not used for sorting. +func (s *DeleteByQueryService) TrackScores(trackScores bool) *DeleteByQueryService { + s.trackScores = &trackScores + return s +} + +// Version specifies whether to return document version as part of a hit. +func (s *DeleteByQueryService) Version(version bool) *DeleteByQueryService { + s.version = &version + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active before proceeding +// with the update by query operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal +// to the total number of copies for the shard (number of replicas + 1). +func (s *DeleteByQueryService) WaitForActiveShards(waitForActiveShards string) *DeleteByQueryService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// WaitForCompletion indicates if the request should block until the reindex is complete. +func (s *DeleteByQueryService) WaitForCompletion(waitForCompletion bool) *DeleteByQueryService { + s.waitForCompletion = &waitForCompletion + return s +} + +// Pretty indents the JSON output from Elasticsearch. +func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService { + s.pretty = pretty + return s +} + +// Body specifies the body of the request. It overrides data being specified via SearchService. +func (s *DeleteByQueryService) Body(body string) *DeleteByQueryService { + s.body = body + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteByQueryService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_delete_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else { + path, err = uritemplates.Expand("/{index}/_delete_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) + } + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.explain != nil { + params.Set("explain", fmt.Sprintf("%v", *s.explain)) + } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) + } + if len(s.docvalueFields) > 0 { + params.Set("docvalue_fields", strings.Join(s.docvalueFields, ",")) + } + if s.from != nil { + params.Set("from", fmt.Sprintf("%d", *s.from)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.conflicts != "" { + params.Set("conflicts", s.conflicts) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if len(s.routing) > 0 { + params.Set("routing", strings.Join(s.routing, ",")) + } + if s.scroll != "" { + params.Set("scroll", s.scroll) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) + } + if s.searchTimeout != "" { + params.Set("search_timeout", s.searchTimeout) + } + if s.size != nil { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if len(s.sort) > 0 { + params.Set("sort", strings.Join(s.sort, ",")) + } + if s.terminateAfter != nil { + params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter)) + } + if len(s.stats) > 0 { + params.Set("stats", strings.Join(s.stats, ",")) + } + if s.suggestField != "" { + params.Set("suggest_field", s.suggestField) + } + if s.suggestMode != "" { + params.Set("suggest_mode", s.suggestMode) + } + if s.suggestSize != nil { + params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize)) + } + if s.suggestText != "" { + params.Set("suggest_text", s.suggestText) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.trackScores != nil { + params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", *s.version)) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.scrollSize != nil { + params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize)) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + if s.requestsPerSecond != nil { + params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteByQueryService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the delete-by-query operation. +func (s *DeleteByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Set body if there is a query set + var body interface{} + if s.body != nil { + body = s.body + } else if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + body = map[string]interface{}{ + "query": src, + } + } + + // Get response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(BulkIndexByScrollResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// BulkIndexByScrollResponse is the outcome of executing Do with +// DeleteByQueryService and UpdateByQueryService. +type BulkIndexByScrollResponse struct { + Took int64 `json:"took"` + TimedOut bool `json:"timed_out"` + Total int64 `json:"total"` + Updated int64 `json:"updated"` + Created int64 `json:"created"` + Deleted int64 `json:"deleted"` + Batches int64 `json:"batches"` + VersionConflicts int64 `json:"version_conflicts"` + Noops int64 `json:"noops"` + Retries struct { + Bulk int64 `json:"bulk"` + Search int64 `json:"search"` + } `json:"retries"` + Throttled string `json:"throttled"` + ThrottledMillis int64 `json:"throttled_millis"` + RequestsPerSecond float64 `json:"requests_per_second"` + Canceled string `json:"canceled"` + ThrottledUntil string `json:"throttled_until"` + ThrottledUntilMillis int64 `json:"throttled_until_millis"` + Failures []bulkIndexByScrollResponseFailure `json:"failures"` +} + +type bulkIndexByScrollResponseFailure struct { + Index string `json:"index,omitempty"` + Type string `json:"type,omitempty"` + Id string `json:"id,omitempty"` + Status int `json:"status,omitempty"` + Shard int `json:"shard,omitempty"` + Node int `json:"node,omitempty"` + // TOOD "cause" contains exception details + // TOOD "reason" contains exception details +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go b/vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go new file mode 100644 index 000000000..b829a7e09 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go @@ -0,0 +1,147 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestDeleteByQueryBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Types []string + Expected string + ExpectErr bool + }{ + { + []string{}, + []string{}, + "", + true, + }, + { + []string{"index1"}, + []string{}, + "/index1/_delete_by_query", + false, + }, + { + []string{"index1", "index2"}, + []string{}, + "/index1%2Cindex2/_delete_by_query", + false, + }, + { + []string{}, + []string{"type1"}, + "", + true, + }, + { + []string{"index1"}, + []string{"type1"}, + "/index1/type1/_delete_by_query", + false, + }, + { + []string{"index1", "index2"}, + []string{"type1", "type2"}, + "/index1%2Cindex2/type1%2Ctype2/_delete_by_query", + false, + }, + } + + for i, test := range tests { + builder := client.DeleteByQuery().Index(test.Indices...).Type(test.Types...) + err := builder.Validate() + if err != nil { + if !test.ExpectErr { + t.Errorf("case #%d: %v", i+1, err) + continue + } + } else { + // err == nil + if test.ExpectErr { + t.Errorf("case #%d: expected error", i+1) + continue + } + path, _, _ := builder.buildURL() + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } + } +} + +func TestDeleteByQuery(t *testing.T) { + // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Fatalf("expected count = %d; got: %d", 3, count) + } + + // Delete all documents by sandrae + q := NewTermQuery("user", "sandrae") + res, err := client.DeleteByQuery(). + Index(testIndexName). + Type("tweet"). + Query(q). + Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected response != nil; got: %v", res) + } + + // Flush and check count + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + count, err = client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Fatalf("expected Count = %d; got: %d", 2, count) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_template.go b/vendor/gopkg.in/olivere/elastic.v5/delete_template.go new file mode 100644 index 000000000..a7ec9844e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/delete_template.go @@ -0,0 +1,110 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// DeleteTemplateService deletes a search template. More information can +// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type DeleteTemplateService struct { + client *Client + pretty bool + id string + version *int + versionType string +} + +// NewDeleteTemplateService creates a new DeleteTemplateService. +func NewDeleteTemplateService(client *Client) *DeleteTemplateService { + return &DeleteTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService { + s.id = id + return s +} + +// Version an explicit version number for concurrency control. +func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService { + s.version = &version + return s +} + +// VersionType specifies a version type. +func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService { + s.versionType = versionType + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%d", *s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *DeleteTemplateService) Do(ctx context.Context) (*AcknowledgedResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(AcknowledgedResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_template_test.go b/vendor/gopkg.in/olivere/elastic.v5/delete_template_test.go new file mode 100644 index 000000000..c0fe8f0cd --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/delete_template_test.go @@ -0,0 +1,24 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestDeleteTemplateValidate(t *testing.T) { + client := setupTestClient(t) + + // No template id -> fail with error + res, err := NewDeleteTemplateService(client).Do(context.TODO()) + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_test.go b/vendor/gopkg.in/olivere/elastic.v5/delete_test.go new file mode 100644 index 000000000..fd95f49d2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/delete_test.go @@ -0,0 +1,120 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestDelete(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Delete document 1 + res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + count, err = client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } + + // Delete non existent document 99 + res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh("true").Do(context.TODO()) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + if !IsNotFound(err) { + t.Errorf("expected NotFound error; got %v", err) + } + if res != nil { + t.Fatalf("expected no response; got: %v", res) + } + + count, err = client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } +} + +func TestDeleteValidate(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + // No index name -> fail with error + res, err := NewDeleteService(client).Type("tweet").Id("1").Do(context.TODO()) + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } + + // No type -> fail with error + res, err = NewDeleteService(client).Index(testIndexName).Id("1").Do(context.TODO()) + if err == nil { + t.Fatalf("expected Delete to fail without type") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } + + // No id -> fail with error + res, err = NewDeleteService(client).Index(testIndexName).Type("tweet").Do(context.TODO()) + if err == nil { + t.Fatalf("expected Delete to fail without id") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/doc.go b/vendor/gopkg.in/olivere/elastic.v5/doc.go new file mode 100644 index 000000000..38e9980f7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/doc.go @@ -0,0 +1,51 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +/* +Package elastic provides an interface to the Elasticsearch server +(http://www.elasticsearch.org/). + +The first thing you do is to create a Client. If you have Elasticsearch +installed and running with its default settings +(i.e. available at http://127.0.0.1:9200), all you need to do is: + + client, err := elastic.NewClient() + if err != nil { + // Handle error + } + +If your Elasticsearch server is running on a different IP and/or port, +just provide a URL to NewClient: + + // Create a client and connect to http://192.168.2.10:9201 + client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201")) + if err != nil { + // Handle error + } + +You can pass many more configuration parameters to NewClient. Review the +documentation of NewClient for more information. + +If no Elasticsearch server is available, services will fail when creating +a new request and will return ErrNoClient. + +A Client provides services. The services usually come with a variety of +methods to prepare the query and a Do function to execute it against the +Elasticsearch REST interface and return a response. Here is an example +of the IndexExists service that checks if a given index already exists. + + exists, err := client.IndexExists("twitter").Do() + if err != nil { + // Handle error + } + if !exists { + // Index does not exist yet. + } + +Look up the documentation for Client to get an idea of the services provided +and what kinds of responses you get when executing the Do function of a service. +Also see the wiki on Github for more details. + +*/ +package elastic diff --git a/vendor/gopkg.in/olivere/elastic.v5/errors.go b/vendor/gopkg.in/olivere/elastic.v5/errors.go new file mode 100644 index 000000000..009123531 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/errors.go @@ -0,0 +1,141 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +// checkResponse will return an error if the request/response indicates +// an error returned from Elasticsearch. +// +// HTTP status codes between in the range [200..299] are considered successful. +// All other errors are considered errors except they are specified in +// ignoreErrors. This is necessary because for some services, HTTP status 404 +// is a valid response from Elasticsearch (e.g. the Exists service). +// +// The func tries to parse error details as returned from Elasticsearch +// and encapsulates them in type elastic.Error. +func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error { + // 200-299 are valid status codes + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + // Ignore certain errors? + for _, code := range ignoreErrors { + if code == res.StatusCode { + return nil + } + } + return createResponseError(res) +} + +// createResponseError creates an Error structure from the HTTP response, +// its status code and the error information sent by Elasticsearch. +func createResponseError(res *http.Response) error { + if res.Body == nil { + return &Error{Status: res.StatusCode} + } + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return &Error{Status: res.StatusCode} + } + errReply := new(Error) + err = json.Unmarshal(data, errReply) + if err != nil { + return &Error{Status: res.StatusCode} + } + if errReply != nil { + if errReply.Status == 0 { + errReply.Status = res.StatusCode + } + return errReply + } + return &Error{Status: res.StatusCode} +} + +// Error encapsulates error details as returned from Elasticsearch. +type Error struct { + Status int `json:"status"` + Details *ErrorDetails `json:"error,omitempty"` +} + +// ErrorDetails encapsulate error details from Elasticsearch. +// It is used in e.g. elastic.Error and elastic.BulkResponseItem. +type ErrorDetails struct { + Type string `json:"type"` + Reason string `json:"reason"` + ResourceType string `json:"resource.type,omitempty"` + ResourceId string `json:"resource.id,omitempty"` + Index string `json:"index,omitempty"` + Phase string `json:"phase,omitempty"` + Grouped bool `json:"grouped,omitempty"` + CausedBy map[string]interface{} `json:"caused_by,omitempty"` + RootCause []*ErrorDetails `json:"root_cause,omitempty"` + FailedShards []map[string]interface{} `json:"failed_shards,omitempty"` +} + +// Error returns a string representation of the error. +func (e *Error) Error() string { + if e.Details != nil && e.Details.Reason != "" { + return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type) + } else { + return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status)) + } +} + +// IsNotFound returns true if the given error indicates that Elasticsearch +// returned HTTP status 404. The err parameter can be of type *elastic.Error, +// elastic.Error, *http.Response or int (indicating the HTTP status code). +func IsNotFound(err interface{}) bool { + switch e := err.(type) { + case *http.Response: + return e.StatusCode == http.StatusNotFound + case *Error: + return e.Status == http.StatusNotFound + case Error: + return e.Status == http.StatusNotFound + case int: + return e == http.StatusNotFound + } + return false +} + +// IsTimeout returns true if the given error indicates that Elasticsearch +// returned HTTP status 408. The err parameter can be of type *elastic.Error, +// elastic.Error, *http.Response or int (indicating the HTTP status code). +func IsTimeout(err interface{}) bool { + switch e := err.(type) { + case *http.Response: + return e.StatusCode == http.StatusRequestTimeout + case *Error: + return e.Status == http.StatusRequestTimeout + case Error: + return e.Status == http.StatusRequestTimeout + case int: + return e == http.StatusRequestTimeout + } + return false +} + +// -- General errors -- + +// shardsInfo represents information from a shard. +type shardsInfo struct { + Total int `json:"total"` + Successful int `json:"successful"` + Failed int `json:"failed"` +} + +// shardOperationFailure represents a shard failure. +type shardOperationFailure struct { + Shard int `json:"shard"` + Index string `json:"index"` + Status string `json:"status"` + // "reason" +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/errors_test.go b/vendor/gopkg.in/olivere/elastic.v5/errors_test.go new file mode 100644 index 000000000..c33dc2d6d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/errors_test.go @@ -0,0 +1,202 @@ +package elastic + +import ( + "bufio" + "fmt" + "net/http" + "strings" + "testing" +) + +func TestResponseError(t *testing.T) { + raw := "HTTP/1.1 404 Not Found\r\n" + + "\r\n" + + `{"error":{"root_cause":[{"type":"index_missing_exception","reason":"no such index","index":"elastic-test"}],"type":"index_missing_exception","reason":"no such index","index":"elastic-test"},"status":404}` + "\r\n" + r := bufio.NewReader(strings.NewReader(raw)) + + req, err := http.NewRequest("GET", "/", nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.ReadResponse(r, nil) + if err != nil { + t.Fatal(err) + } + err = checkResponse(req, resp) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + + // Check for correct error message + expected := fmt.Sprintf("elastic: Error %d (%s): no such index [type=index_missing_exception]", resp.StatusCode, http.StatusText(resp.StatusCode)) + got := err.Error() + if got != expected { + t.Fatalf("expected %q; got: %q", expected, got) + } + + // Check that error is of type *elastic.Error, which contains additional information + e, ok := err.(*Error) + if !ok { + t.Fatal("expected error to be of type *elastic.Error") + } + if e.Status != resp.StatusCode { + t.Fatalf("expected status code %d; got: %d", resp.StatusCode, e.Status) + } + if e.Details == nil { + t.Fatalf("expected error details; got: %v", e.Details) + } + if got, want := e.Details.Index, "elastic-test"; got != want { + t.Fatalf("expected error details index %q; got: %q", want, got) + } + if got, want := e.Details.Type, "index_missing_exception"; got != want { + t.Fatalf("expected error details type %q; got: %q", want, got) + } + if got, want := e.Details.Reason, "no such index"; got != want { + t.Fatalf("expected error details reason %q; got: %q", want, got) + } + if got, want := len(e.Details.RootCause), 1; got != want { + t.Fatalf("expected %d error details root causes; got: %d", want, got) + } + + if got, want := e.Details.RootCause[0].Index, "elastic-test"; got != want { + t.Fatalf("expected root cause index %q; got: %q", want, got) + } + if got, want := e.Details.RootCause[0].Type, "index_missing_exception"; got != want { + t.Fatalf("expected root cause type %q; got: %q", want, got) + } + if got, want := e.Details.RootCause[0].Reason, "no such index"; got != want { + t.Fatalf("expected root cause reason %q; got: %q", want, got) + } +} + +func TestResponseErrorHTML(t *testing.T) { + raw := "HTTP/1.1 413 Request Entity Too Large\r\n" + + "\r\n" + + ` +413 Request Entity Too Large + +

413 Request Entity Too Large

+
nginx/1.6.2
+ +` + "\r\n" + r := bufio.NewReader(strings.NewReader(raw)) + + req, err := http.NewRequest("GET", "/", nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.ReadResponse(r, nil) + if err != nil { + t.Fatal(err) + } + err = checkResponse(req, resp) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + + // Check for correct error message + expected := fmt.Sprintf("elastic: Error %d (%s)", http.StatusRequestEntityTooLarge, http.StatusText(http.StatusRequestEntityTooLarge)) + got := err.Error() + if got != expected { + t.Fatalf("expected %q; got: %q", expected, got) + } +} + +func TestResponseErrorWithIgnore(t *testing.T) { + raw := "HTTP/1.1 404 Not Found\r\n" + + "\r\n" + + `{"some":"response"}` + "\r\n" + r := bufio.NewReader(strings.NewReader(raw)) + + req, err := http.NewRequest("HEAD", "/", nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.ReadResponse(r, nil) + if err != nil { + t.Fatal(err) + } + err = checkResponse(req, resp) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + err = checkResponse(req, resp, 404) // ignore 404 errors + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } +} + +func TestIsNotFound(t *testing.T) { + if got, want := IsNotFound(nil), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(""), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(200), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(404), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsNotFound(&Error{Status: 404}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(&Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsNotFound(Error{Status: 404}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsNotFound(&http.Response{StatusCode: 404}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(&http.Response{StatusCode: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestIsTimeout(t *testing.T) { + if got, want := IsTimeout(nil), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(""), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(200), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(408), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsTimeout(&Error{Status: 408}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(&Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsTimeout(Error{Status: 408}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsTimeout(&http.Response{StatusCode: 408}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(&http.Response{StatusCode: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/example_test.go b/vendor/gopkg.in/olivere/elastic.v5/example_test.go new file mode 100644 index 000000000..540b9bccf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/example_test.go @@ -0,0 +1,549 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic_test + +import ( + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "time" + + "golang.org/x/net/context" + + elastic "gopkg.in/olivere/elastic.v5" +) + +type Tweet struct { + User string `json:"user"` + Message string `json:"message"` + Retweets int `json:"retweets"` + Image string `json:"image,omitempty"` + Created time.Time `json:"created,omitempty"` + Tags []string `json:"tags,omitempty"` + Location string `json:"location,omitempty"` + Suggest *elastic.SuggestField `json:"suggest_field,omitempty"` +} + +func Example() { + errorlog := log.New(os.Stdout, "APP ", log.LstdFlags) + + // Obtain a client. You can also provide your own HTTP client here. + client, err := elastic.NewClient(elastic.SetErrorLog(errorlog)) + if err != nil { + // Handle error + panic(err) + } + + // Trace request and response details like this + //client.SetTracer(log.New(os.Stdout, "", 0)) + + // Ping the Elasticsearch server to get e.g. the version number + info, code, err := client.Ping("http://127.0.0.1:9200").Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number) + + // Getting the ES version number is quite common, so there's a shortcut + esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200") + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Elasticsearch version %s", esversion) + + // Use the IndexExists service to check if a specified index exists. + exists, err := client.IndexExists("twitter").Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + if !exists { + // Create a new index. + createIndex, err := client.CreateIndex("twitter").Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + if !createIndex.Acknowledged { + // Not acknowledged + } + } + + // Index a tweet (using JSON serialization) + tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0} + put1, err := client.Index(). + Index("twitter"). + Type("tweet"). + Id("1"). + BodyJson(tweet1). + Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type) + + // Index a second tweet (by string) + tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}` + put2, err := client.Index(). + Index("twitter"). + Type("tweet"). + Id("2"). + BodyString(tweet2). + Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type) + + // Get tweet with specified ID + get1, err := client.Get(). + Index("twitter"). + Type("tweet"). + Id("1"). + Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + if get1.Found { + fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type) + } + + // Flush to make sure the documents got written. + _, err = client.Flush().Index("twitter").Do(context.Background()) + if err != nil { + panic(err) + } + + // Search with a term query + termQuery := elastic.NewTermQuery("user", "olivere") + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do(context.Background()) // execute + if err != nil { + // Handle error + panic(err) + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Each is a convenience function that iterates over hits in a search result. + // It makes sure you don't need to check for nil values in the response. + // However, it ignores errors in serialization. If you want full control + // over iterating the hits, see below. + var ttyp Tweet + for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + t := item.(Tweet) + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + // TotalHits is another convenience function that works even when something goes wrong. + fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + + // Here's how you iterate through results with full control over each step. + if searchResult.Hits.TotalHits > 0 { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + fmt.Print("Found no tweets\n") + } + + // Update a tweet by the update API of Elasticsearch. + // We just increment the number of retweets. + script := elastic.NewScript("ctx._source.retweets += num").Param("num", 1) + update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script(script). + Upsert(map[string]interface{}{"retweets": 0}). + Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version) + + // ... + + // Delete an index. + deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + if !deleteIndex.Acknowledged { + // Not acknowledged + } +} + +func ExampleClient_NewClient_default() { + // Obtain a client to the Elasticsearch instance on http://127.0.0.1:9200. + client, err := elastic.NewClient() + if err != nil { + // Handle error + fmt.Printf("connection failed: %v\n", err) + } else { + fmt.Println("connected") + } + _ = client + // Output: + // connected +} + +func ExampleClient_NewClient_cluster() { + // Obtain a client for an Elasticsearch cluster of two nodes, + // running on 10.0.1.1 and 10.0.1.2. + client, err := elastic.NewClient(elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200")) + if err != nil { + // Handle error + panic(err) + } + _ = client +} + +func ExampleClient_NewClient_manyOptions() { + // Obtain a client for an Elasticsearch cluster of two nodes, + // running on 10.0.1.1 and 10.0.1.2. Do not run the sniffer. + // Set the healthcheck interval to 10s. When requests fail, + // retry 5 times. Print error messages to os.Stderr and informational + // messages to os.Stdout. + client, err := elastic.NewClient( + elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"), + elastic.SetSniff(false), + elastic.SetHealthcheckInterval(10*time.Second), + elastic.SetMaxRetries(5), + elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)), + elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags))) + if err != nil { + // Handle error + panic(err) + } + _ = client +} + +func ExampleIndexExistsService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + // Use the IndexExists service to check if the index "twitter" exists. + exists, err := client.IndexExists("twitter").Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + if exists { + // ... + } +} + +func ExampleCreateIndexService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + // Create a new index. + createIndex, err := client.CreateIndex("twitter").Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + if !createIndex.Acknowledged { + // Not acknowledged + } +} + +func ExampleDeleteIndexService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + // Delete an index. + deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background()) + if err != nil { + // Handle error + panic(err) + } + if !deleteIndex.Acknowledged { + // Not acknowledged + } +} + +func ExampleSearchService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + + // Search with a term query + termQuery := elastic.NewTermQuery("user", "olivere") + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do(context.Background()) // execute + if err != nil { + // Handle error + panic(err) + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Number of hits + if searchResult.Hits.TotalHits > 0 { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + fmt.Print("Found no tweets\n") + } +} + +func ExampleAggregations() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + + // Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year). + timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc() + histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year") + timeline = timeline.SubAggregation("history", histogram) + + // Search with a term query + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(elastic.NewMatchAllQuery()). // return all results, but ... + SearchType("count"). // ... do not return hits, just the count + Aggregation("timeline", timeline). // add our aggregation to the query + Pretty(true). // pretty print request and response JSON + Do(context.Background()) // execute + if err != nil { + // Handle error + panic(err) + } + + // Access "timeline" aggregate in search result. + agg, found := searchResult.Aggregations.Terms("timeline") + if !found { + log.Fatalf("we should have a terms aggregation called %q", "timeline") + } + for _, userBucket := range agg.Buckets { + // Every bucket should have the user field as key. + user := userBucket.Key + + // The sub-aggregation history should have the number of tweets per year. + histogram, found := userBucket.DateHistogram("history") + if found { + for _, year := range histogram.Buckets { + fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString) + } + } + } +} + +func ExampleSearchResult() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Do a search + searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do(context.Background()) + if err != nil { + panic(err) + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Each is a utility function that iterates over hits in a search result. + // It makes sure you don't need to check for nil values in the response. + // However, it ignores errors in serialization. If you want full control + // over iterating the hits, see below. + var ttyp Tweet + for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + t := item.(Tweet) + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + + // Here's how you iterate hits with full control. + if searchResult.Hits.TotalHits > 0 { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + fmt.Print("Found no tweets\n") + } +} + +func ExamplePutTemplateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Create search template + tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}` + + // Create template + resp, err := client.PutTemplate(). + Id("my-search-template"). // Name of the template + BodyString(tmpl). // Search template itself + Do(context.Background()) // Execute + if err != nil { + panic(err) + } + if resp.Acknowledged { + fmt.Println("search template creation acknowledged") + } +} + +func ExampleGetTemplateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Get template stored under "my-search-template" + resp, err := client.GetTemplate().Id("my-search-template").Do(context.Background()) + if err != nil { + panic(err) + } + fmt.Printf("search template is: %q\n", resp.Template) +} + +func ExampleDeleteTemplateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Delete template + resp, err := client.DeleteTemplate().Id("my-search-template").Do(context.Background()) + if err != nil { + panic(err) + } + if resp != nil && resp.Acknowledged { + fmt.Println("template deleted") + } +} + +func ExampleClusterHealthService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Get cluster health + res, err := client.ClusterHealth().Index("twitter").Do(context.Background()) + if err != nil { + panic(err) + } + if res == nil { + panic(err) + } + fmt.Printf("Cluster status is %q\n", res.Status) +} + +func ExampleClusterHealthService_WaitForGreen() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Wait for status green + res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do(context.Background()) + if err != nil { + panic(err) + } + if res.TimedOut { + fmt.Printf("time out waiting for cluster status %q\n", "green") + } else { + fmt.Printf("cluster status is %q\n", res.Status) + } +} + +func ExampleClusterStateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Get cluster state + res, err := client.ClusterState().Metric("version").Do(context.Background()) + if err != nil { + panic(err) + } + fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/exists.go b/vendor/gopkg.in/olivere/elastic.v5/exists.go new file mode 100644 index 000000000..c193197b9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/exists.go @@ -0,0 +1,177 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// ExistsService checks for the existence of a document using HEAD. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// for details. +type ExistsService struct { + client *Client + pretty bool + id string + index string + typ string + preference string + realtime *bool + refresh string + routing string + parent string +} + +// NewExistsService creates a new ExistsService. +func NewExistsService(client *Client) *ExistsService { + return &ExistsService{ + client: client, + } +} + +// Id is the document ID. +func (s *ExistsService) Id(id string) *ExistsService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *ExistsService) Index(index string) *ExistsService { + s.index = index + return s +} + +// Type is the type of the document (use `_all` to fetch the first document +// matching the ID across all types). +func (s *ExistsService) Type(typ string) *ExistsService { + s.typ = typ + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *ExistsService) Preference(preference string) *ExistsService { + s.preference = preference + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *ExistsService) Realtime(realtime bool) *ExistsService { + s.realtime = &realtime + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *ExistsService) Refresh(refresh string) *ExistsService { + s.refresh = refresh + return s +} + +// Routing is a specific routing value. +func (s *ExistsService) Routing(routing string) *ExistsService { + s.routing = routing + return s +} + +// Parent is the ID of the parent document. +func (s *ExistsService) Parent(parent string) *ExistsService { + s.parent = parent + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExistsService) Pretty(pretty bool) *ExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ExistsService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExistsService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExistsService) Do(ctx context.Context) (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/exists_test.go b/vendor/gopkg.in/olivere/elastic.v5/exists_test.go new file mode 100644 index 000000000..3f6d52bc6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/exists_test.go @@ -0,0 +1,54 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestExists(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + exists, err := client.Exists().Index(testIndexName).Type("comment").Id("1").Parent("tweet").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Fatal("expected document to exist") + } +} + +func TestExistsValidate(t *testing.T) { + client := setupTestClient(t) + + // No index -> fail with error + res, err := NewExistsService(client).Type("tweet").Id("1").Do(context.TODO()) + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } + + // No type -> fail with error + res, err = NewExistsService(client).Index(testIndexName).Id("1").Do(context.TODO()) + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } + + // No id -> fail with error + res, err = NewExistsService(client).Index(testIndexName).Type("tweet").Do(context.TODO()) + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/explain.go b/vendor/gopkg.in/olivere/elastic.v5/explain.go new file mode 100644 index 000000000..39e252ee4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/explain.go @@ -0,0 +1,322 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// ExplainService computes a score explanation for a query and +// a specific document. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html. +type ExplainService struct { + client *Client + pretty bool + id string + index string + typ string + q string + routing string + lenient *bool + analyzer string + df string + fields []string + lowercaseExpandedTerms *bool + xSourceInclude []string + analyzeWildcard *bool + parent string + preference string + xSource []string + defaultOperator string + xSourceExclude []string + source string + bodyJson interface{} + bodyString string +} + +// NewExplainService creates a new ExplainService. +func NewExplainService(client *Client) *ExplainService { + return &ExplainService{ + client: client, + xSource: make([]string, 0), + xSourceExclude: make([]string, 0), + fields: make([]string, 0), + xSourceInclude: make([]string, 0), + } +} + +// Id is the document ID. +func (s *ExplainService) Id(id string) *ExplainService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *ExplainService) Index(index string) *ExplainService { + s.index = index + return s +} + +// Type is the type of the document. +func (s *ExplainService) Type(typ string) *ExplainService { + s.typ = typ + return s +} + +// Source is the URL-encoded query definition (instead of using the request body). +func (s *ExplainService) Source(source string) *ExplainService { + s.source = source + return s +} + +// XSourceExclude is a list of fields to exclude from the returned _source field. +func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *ExplainService) Lenient(lenient bool) *ExplainService { + s.lenient = &lenient + return s +} + +// Query in the Lucene query string syntax. +func (s *ExplainService) Q(q string) *ExplainService { + s.q = q + return s +} + +// Routing sets a specific routing value. +func (s *ExplainService) Routing(routing string) *ExplainService { + s.routing = routing + return s +} + +// AnalyzeWildcard specifies whether wildcards and prefix queries +// in the query string query should be analyzed (default: false). +func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer is the analyzer for the query string query. +func (s *ExplainService) Analyzer(analyzer string) *ExplainService { + s.analyzer = analyzer + return s +} + +// Df is the default field for query string query (default: _all). +func (s *ExplainService) Df(df string) *ExplainService { + s.df = df + return s +} + +// Fields is a list of fields to return in the response. +func (s *ExplainService) Fields(fields ...string) *ExplainService { + s.fields = append(s.fields, fields...) + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// XSourceInclude is a list of fields to extract and return from the _source field. +func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) + return s +} + +// DefaultOperator is the default operator for query string query (AND or OR). +func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService { + s.defaultOperator = defaultOperator + return s +} + +// Parent is the ID of the parent document. +func (s *ExplainService) Parent(parent string) *ExplainService { + s.parent = parent + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *ExplainService) Preference(preference string) *ExplainService { + s.preference = preference + return s +} + +// XSource is true or false to return the _source field or not, or a list of fields to return. +func (s *ExplainService) XSource(xSource ...string) *ExplainService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExplainService) Pretty(pretty bool) *ExplainService { + s.pretty = pretty + return s +} + +// Query sets a query definition using the Query DSL. +func (s *ExplainService) Query(query Query) *ExplainService { + src, err := query.Source() + if err != nil { + // Do nothing in case of an error + return s + } + body := make(map[string]interface{}) + body["query"] = src + s.bodyJson = body + return s +} + +// BodyJson sets the query definition using the Query DSL. +func (s *ExplainService) BodyJson(body interface{}) *ExplainService { + s.bodyJson = body + return s +} + +// BodyString sets the query definition using the Query DSL as a string. +func (s *ExplainService) BodyString(body string) *ExplainService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *ExplainService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.source != "" { + params.Set("source", s.source) + } + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.df != "" { + params.Set("df", s.df) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExplainService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExplainService) Do(ctx context.Context) (*ExplainResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ExplainResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ExplainResponse is the response of ExplainService.Do. +type ExplainResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Matched bool `json:"matched"` + Explanation map[string]interface{} `json:"explanation"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/explain_test.go b/vendor/gopkg.in/olivere/elastic.v5/explain_test.go new file mode 100644 index 000000000..3bae94b26 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/explain_test.go @@ -0,0 +1,45 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestExplain(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Refresh("true"). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // Explain + query := NewTermQuery("user", "olivere") + expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if expl == nil { + t.Fatal("expected to return an explanation") + } + if !expl.Matched { + t.Errorf("expected matched to be %v; got: %v", true, expl.Matched) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go b/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go new file mode 100644 index 000000000..59a453c9e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go @@ -0,0 +1,74 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "strings" +) + +type FetchSourceContext struct { + fetchSource bool + transformSource bool + includes []string + excludes []string +} + +func NewFetchSourceContext(fetchSource bool) *FetchSourceContext { + return &FetchSourceContext{ + fetchSource: fetchSource, + includes: make([]string, 0), + excludes: make([]string, 0), + } +} + +func (fsc *FetchSourceContext) FetchSource() bool { + return fsc.fetchSource +} + +func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) { + fsc.fetchSource = fetchSource +} + +func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext { + fsc.includes = append(fsc.includes, includes...) + return fsc +} + +func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext { + fsc.excludes = append(fsc.excludes, excludes...) + return fsc +} + +func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext { + fsc.transformSource = transformSource + return fsc +} + +func (fsc *FetchSourceContext) Source() (interface{}, error) { + if !fsc.fetchSource { + return false, nil + } + return map[string]interface{}{ + "includes": fsc.includes, + "excludes": fsc.excludes, + }, nil +} + +// Query returns the parameters in a form suitable for a URL query string. +func (fsc *FetchSourceContext) Query() url.Values { + params := url.Values{} + if !fsc.fetchSource { + params.Add("_source", "false") + return params + } + if len(fsc.includes) > 0 { + params.Add("_source_include", strings.Join(fsc.includes, ",")) + } + if len(fsc.excludes) > 0 { + params.Add("_source_exclude", strings.Join(fsc.excludes, ",")) + } + return params +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context_test.go b/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context_test.go new file mode 100644 index 000000000..8c8dd47a5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context_test.go @@ -0,0 +1,125 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFetchSourceContextNoFetchSource(t *testing.T) { + builder := NewFetchSourceContext(false) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `false` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) { + builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `false` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextFetchSource(t *testing.T) { + builder := NewFetchSourceContext(true) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"excludes":[],"includes":[]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextFetchSourceWithIncludesOnly(t *testing.T) { + builder := NewFetchSourceContext(true).Include("a", "b") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"excludes":[],"includes":["a","b"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) { + builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"excludes":["c"],"includes":["a","b"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextQueryDefaults(t *testing.T) { + builder := NewFetchSourceContext(true) + values := builder.Query() + got := values.Encode() + expected := "" + if got != expected { + t.Errorf("expected %q; got: %q", expected, got) + } +} + +func TestFetchSourceContextQueryNoFetchSource(t *testing.T) { + builder := NewFetchSourceContext(false) + values := builder.Query() + got := values.Encode() + expected := "_source=false" + if got != expected { + t.Errorf("expected %q; got: %q", expected, got) + } +} + +func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) { + builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c") + values := builder.Query() + got := values.Encode() + expected := "_source_exclude=c&_source_include=a%2Cb" + if got != expected { + t.Errorf("expected %q; got: %q", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/field_stats.go b/vendor/gopkg.in/olivere/elastic.v5/field_stats.go new file mode 100644 index 000000000..a856dbcc3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/field_stats.go @@ -0,0 +1,257 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +const ( + FieldStatsClusterLevel = "cluster" + FieldStatsIndicesLevel = "indices" +) + +// FieldStatsService allows finding statistical properties of a field without executing a search, +// but looking up measurements that are natively available in the Lucene index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-stats.html +// for details +type FieldStatsService struct { + client *Client + pretty bool + level string + index []string + allowNoIndices *bool + expandWildcards string + fields []string + ignoreUnavailable *bool + bodyJson interface{} + bodyString string +} + +// NewFieldStatsService creates a new FieldStatsService +func NewFieldStatsService(client *Client) *FieldStatsService { + return &FieldStatsService{ + client: client, + index: make([]string, 0), + fields: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *FieldStatsService) Index(index ...string) *FieldStatsService { + s.index = append(s.index, index...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *FieldStatsService) AllowNoIndices(allowNoIndices bool) *FieldStatsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *FieldStatsService) ExpandWildcards(expandWildcards string) *FieldStatsService { + s.expandWildcards = expandWildcards + return s +} + +// Fields is a list of fields for to get field statistics +// for (min value, max value, and more). +func (s *FieldStatsService) Fields(fields ...string) *FieldStatsService { + s.fields = append(s.fields, fields...) + return s +} + +// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed). +func (s *FieldStatsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldStatsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Level sets if stats should be returned on a per index level or on a cluster wide level; +// should be one of 'cluster' or 'indices'; defaults to former +func (s *FieldStatsService) Level(level string) *FieldStatsService { + s.level = level + return s +} + +// ClusterLevel is a helper that sets Level to "cluster". +func (s *FieldStatsService) ClusterLevel() *FieldStatsService { + s.level = FieldStatsClusterLevel + return s +} + +// IndicesLevel is a helper that sets Level to "indices". +func (s *FieldStatsService) IndicesLevel() *FieldStatsService { + s.level = FieldStatsIndicesLevel + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *FieldStatsService) Pretty(pretty bool) *FieldStatsService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds. +func (s *FieldStatsService) BodyJson(body interface{}) *FieldStatsService { + s.bodyJson = body + return s +} + +// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds. +func (s *FieldStatsService) BodyString(body string) *FieldStatsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *FieldStatsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_field_stats", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_field_stats" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.level != "" { + params.Set("level", s.level) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *FieldStatsService) Validate() error { + var invalid []string + if s.level != "" && (s.level != FieldStatsIndicesLevel && s.level != FieldStatsClusterLevel) { + invalid = append(invalid, "Level") + } + if len(invalid) != 0 { + return fmt.Errorf("missing or invalid required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *FieldStatsService) Do(ctx context.Context) (*FieldStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body, http.StatusNotFound) + if err != nil { + return nil, err + } + + // TODO(oe): Is 404 really a valid response here? + if res.StatusCode == http.StatusNotFound { + return &FieldStatsResponse{make(map[string]IndexFieldStats)}, nil + } + + // Return operation response + ret := new(FieldStatsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Request -- + +// FieldStatsRequest can be used to set up the body to be used in the +// Field Stats API. +type FieldStatsRequest struct { + Fields []string `json:"fields"` + IndexConstraints map[string]*FieldStatsConstraints `json:"index_constraints,omitempty"` +} + +// FieldStatsConstraints is a constraint on a field. +type FieldStatsConstraints struct { + Min *FieldStatsComparison `json:"min_value,omitempty"` + Max *FieldStatsComparison `json:"max_value,omitempty"` +} + +// FieldStatsComparison contain all comparison operations that can be used +// in FieldStatsConstraints. +type FieldStatsComparison struct { + Lte interface{} `json:"lte,omitempty"` + Lt interface{} `json:"lt,omitempty"` + Gte interface{} `json:"gte,omitempty"` + Gt interface{} `json:"gt,omitempty"` +} + +// -- Response -- + +// FieldStatsResponse is the response body content +type FieldStatsResponse struct { + Indices map[string]IndexFieldStats `json:"indices,omitempty"` +} + +// IndexFieldStats contains field stats for an index +type IndexFieldStats struct { + Fields map[string]FieldStats `json:"fields,omitempty"` +} + +// FieldStats contains stats of an individual field +type FieldStats struct { + MaxDoc int64 `json:"max_doc"` + DocCount int64 `json:"doc_count"` + Density int64 `json:"density"` + SumDocFrequeny int64 `json:"sum_doc_freq"` + SumTotalTermFrequency int64 `json:"sum_total_term_freq"` + MinValue interface{} `json:"min_value"` + MinValueAsString string `json:"min_value_as_string"` + MaxValue interface{} `json:"max_value"` + MaxValueAsString string `json:"max_value_as_string"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/field_stats_test.go b/vendor/gopkg.in/olivere/elastic.v5/field_stats_test.go new file mode 100644 index 000000000..cd75d1983 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/field_stats_test.go @@ -0,0 +1,267 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/url" + "reflect" + "sort" + "testing" +) + +func TestFieldStatsURLs(t *testing.T) { + tests := []struct { + Service *FieldStatsService + ExpectedPath string + ExpectedParams url.Values + }{ + { + Service: &FieldStatsService{}, + ExpectedPath: "/_field_stats", + ExpectedParams: url.Values{}, + }, + { + Service: &FieldStatsService{ + level: FieldStatsClusterLevel, + }, + ExpectedPath: "/_field_stats", + ExpectedParams: url.Values{"level": []string{FieldStatsClusterLevel}}, + }, + { + Service: &FieldStatsService{ + level: FieldStatsIndicesLevel, + }, + ExpectedPath: "/_field_stats", + ExpectedParams: url.Values{"level": []string{FieldStatsIndicesLevel}}, + }, + { + Service: &FieldStatsService{ + level: FieldStatsClusterLevel, + index: []string{"index1"}, + }, + ExpectedPath: "/index1/_field_stats", + ExpectedParams: url.Values{"level": []string{FieldStatsClusterLevel}}, + }, + { + Service: &FieldStatsService{ + level: FieldStatsIndicesLevel, + index: []string{"index1", "index2"}, + }, + ExpectedPath: "/index1%2Cindex2/_field_stats", + ExpectedParams: url.Values{"level": []string{FieldStatsIndicesLevel}}, + }, + { + Service: &FieldStatsService{ + level: FieldStatsIndicesLevel, + index: []string{"index_*"}, + }, + ExpectedPath: "/index_%2A/_field_stats", + ExpectedParams: url.Values{"level": []string{FieldStatsIndicesLevel}}, + }, + } + + for _, test := range tests { + gotPath, gotParams, err := test.Service.buildURL() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if gotPath != test.ExpectedPath { + t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) + } + if gotParams.Encode() != test.ExpectedParams.Encode() { + t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) + } + } +} + +func TestFieldStatsValidate(t *testing.T) { + tests := []struct { + Service *FieldStatsService + Valid bool + }{ + { + Service: &FieldStatsService{}, + Valid: true, + }, + { + Service: &FieldStatsService{ + fields: []string{"field"}, + }, + Valid: true, + }, + { + Service: &FieldStatsService{ + bodyJson: &FieldStatsRequest{ + Fields: []string{"field"}, + }, + }, + Valid: true, + }, + { + Service: &FieldStatsService{ + level: FieldStatsClusterLevel, + bodyJson: &FieldStatsRequest{ + Fields: []string{"field"}, + }, + }, + Valid: true, + }, + { + Service: &FieldStatsService{ + level: FieldStatsIndicesLevel, + bodyJson: &FieldStatsRequest{ + Fields: []string{"field"}, + }, + }, + Valid: true, + }, + { + Service: &FieldStatsService{ + level: "random", + }, + Valid: false, + }, + } + + for _, test := range tests { + err := test.Service.Validate() + isValid := err == nil + if isValid != test.Valid { + t.Errorf("expected validity to be %v, got %v", test.Valid, isValid) + } + } +} + +func TestFieldStatsRequestSerialize(t *testing.T) { + req := &FieldStatsRequest{ + Fields: []string{"creation_date", "answer_count"}, + IndexConstraints: map[string]*FieldStatsConstraints{ + "creation_date": &FieldStatsConstraints{ + Min: &FieldStatsComparison{Gte: "2014-01-01T00:00:00.000Z"}, + Max: &FieldStatsComparison{Lt: "2015-01-01T10:00:00.000Z"}, + }, + }, + } + data, err := json.Marshal(req) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":["creation_date","answer_count"],"index_constraints":{"creation_date":{"min_value":{"gte":"2014-01-01T00:00:00.000Z"},"max_value":{"lt":"2015-01-01T10:00:00.000Z"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldStatsRequestDeserialize(t *testing.T) { + body := `{ + "fields" : ["creation_date", "answer_count"], + "index_constraints" : { + "creation_date" : { + "min_value" : { + "gte" : "2014-01-01T00:00:00.000Z" + }, + "max_value" : { + "lt" : "2015-01-01T10:00:00.000Z" + } + } + } + }` + + var request FieldStatsRequest + if err := json.Unmarshal([]byte(body), &request); err != nil { + t.Errorf("unexpected error during unmarshalling: %v", err) + } + + sort.Sort(lexicographically{request.Fields}) + + expectedFields := []string{"answer_count", "creation_date"} + if !reflect.DeepEqual(request.Fields, expectedFields) { + t.Errorf("expected fields to be %v, got %v", expectedFields, request.Fields) + } + + constraints, ok := request.IndexConstraints["creation_date"] + if !ok { + t.Errorf("expected field creation_date, didn't find it!") + } + if constraints.Min.Lt != nil { + t.Errorf("expected min value less than constraint to be empty, got %v", constraints.Min.Lt) + } + if constraints.Min.Gte != "2014-01-01T00:00:00.000Z" { + t.Errorf("expected min value >= %v, found %v", "2014-01-01T00:00:00.000Z", constraints.Min.Gte) + } + if constraints.Max.Lt != "2015-01-01T10:00:00.000Z" { + t.Errorf("expected max value < %v, found %v", "2015-01-01T10:00:00.000Z", constraints.Max.Lt) + } +} + +func TestFieldStatsResponseUnmarshalling(t *testing.T) { + clusterStats := `{ + "_shards": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "indices": { + "_all": { + "fields": { + "creation_date": { + "max_doc": 1326564, + "doc_count": 564633, + "density": 42, + "sum_doc_freq": 2258532, + "sum_total_term_freq": -1, + "min_value_as_string": "2008-08-01T16:37:51.513Z", + "max_value_as_string": "2013-06-02T03:23:11.593Z" + }, + "answer_count": { + "max_doc": 1326564, + "doc_count": 139885, + "density": 10, + "sum_doc_freq": 559540, + "sum_total_term_freq": -1, + "min_value_as_string": "0", + "max_value_as_string": "160" + } + } + } + } + }` + + var response FieldStatsResponse + if err := json.Unmarshal([]byte(clusterStats), &response); err != nil { + t.Errorf("unexpected error during unmarshalling: %v", err) + } + + stats, ok := response.Indices["_all"] + if !ok { + t.Errorf("expected _all to be in the indices map, didn't find it") + } + + fieldStats, ok := stats.Fields["creation_date"] + if !ok { + t.Errorf("expected creation_date to be in the fields map, didn't find it") + } + if fieldStats.MinValueAsString != "2008-08-01T16:37:51.513Z" { + t.Errorf("expected creation_date min value to be %v, got %v", "2008-08-01T16:37:51.513Z", fieldStats.MinValueAsString) + } +} + +type lexicographically struct { + strings []string +} + +func (l lexicographically) Len() int { + return len(l.strings) +} + +func (l lexicographically) Less(i, j int) bool { + return l.strings[i] < l.strings[j] +} + +func (l lexicographically) Swap(i, j int) { + l.strings[i], l.strings[j] = l.strings[j], l.strings[i] +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/geo_point.go b/vendor/gopkg.in/olivere/elastic.v5/geo_point.go new file mode 100644 index 000000000..fb243671d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/geo_point.go @@ -0,0 +1,48 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strconv" + "strings" +) + +// GeoPoint is a geographic position described via latitude and longitude. +type GeoPoint struct { + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` +} + +// Source returns the object to be serialized in Elasticsearch DSL. +func (pt *GeoPoint) Source() map[string]float64 { + return map[string]float64{ + "lat": pt.Lat, + "lon": pt.Lon, + } +} + +// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude. +func GeoPointFromLatLon(lat, lon float64) *GeoPoint { + return &GeoPoint{Lat: lat, Lon: lon} +} + +// GeoPointFromString initializes a new GeoPoint by a string that is +// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091". +func GeoPointFromString(latLon string) (*GeoPoint, error) { + latlon := strings.SplitN(latLon, ",", 2) + if len(latlon) != 2 { + return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon) + } + lat, err := strconv.ParseFloat(latlon[0], 64) + if err != nil { + return nil, err + } + lon, err := strconv.ParseFloat(latlon[1], 64) + if err != nil { + return nil, err + } + return &GeoPoint{Lat: lat, Lon: lon}, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/geo_point_test.go b/vendor/gopkg.in/olivere/elastic.v5/geo_point_test.go new file mode 100644 index 000000000..1d085cd38 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/geo_point_test.go @@ -0,0 +1,24 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoPointSource(t *testing.T) { + pt := GeoPoint{Lat: 40, Lon: -70} + + data, err := json.Marshal(pt.Source()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"lat":40,"lon":-70}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/get.go b/vendor/gopkg.in/olivere/elastic.v5/get.go new file mode 100644 index 000000000..f2309f5b4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/get.go @@ -0,0 +1,257 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// GetService allows to get a typed JSON document from the index based +// on its id. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// for details. +type GetService struct { + client *Client + pretty bool + index string + typ string + id string + routing string + preference string + storedFields []string + refresh string + realtime *bool + fsc *FetchSourceContext + version interface{} + versionType string + parent string + ignoreErrorsOnGeneratedFields *bool +} + +// NewGetService creates a new GetService. +func NewGetService(client *Client) *GetService { + return &GetService{ + client: client, + typ: "_all", + } +} + +// Index is the name of the index. +func (s *GetService) Index(index string) *GetService { + s.index = index + return s +} + +// Type is the type of the document (use `_all` to fetch the first document +// matching the ID across all types). +func (s *GetService) Type(typ string) *GetService { + s.typ = typ + return s +} + +// Id is the document ID. +func (s *GetService) Id(id string) *GetService { + s.id = id + return s +} + +// Parent is the ID of the parent document. +func (s *GetService) Parent(parent string) *GetService { + s.parent = parent + return s +} + +// Routing is the specific routing value. +func (s *GetService) Routing(routing string) *GetService { + s.routing = routing + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *GetService) Preference(preference string) *GetService { + s.preference = preference + return s +} + +// StoredFields is a list of fields to return in the response. +func (s *GetService) StoredFields(storedFields ...string) *GetService { + s.storedFields = append(s.storedFields, storedFields...) + return s +} + +func (s *GetService) FetchSource(fetchSource bool) *GetService { + if s.fsc == nil { + s.fsc = NewFetchSourceContext(fetchSource) + } else { + s.fsc.SetFetchSource(fetchSource) + } + return s +} + +func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService { + s.fsc = fetchSourceContext + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *GetService) Refresh(refresh string) *GetService { + s.refresh = refresh + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *GetService) Realtime(realtime bool) *GetService { + s.realtime = &realtime + return s +} + +// VersionType is the specific version type. +func (s *GetService) VersionType(versionType string) *GetService { + s.versionType = versionType + return s +} + +// Version is an explicit version number for concurrency control. +func (s *GetService) Version(version interface{}) *GetService { + s.version = version + return s +} + +// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that +// are generated if the transaction log is accessed. +func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService { + s.ignoreErrorsOnGeneratedFields = &ignore + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *GetService) Pretty(pretty bool) *GetService { + s.pretty = pretty + return s +} + +// Validate checks if the operation is valid. +func (s *GetService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// buildURL builds the URL for the operation. +func (s *GetService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.ignoreErrorsOnGeneratedFields != nil { + params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields)) + } + if s.fsc != nil { + for k, values := range s.fsc.Query() { + params.Add(k, strings.Join(values, ",")) + } + } + return path, params, nil +} + +// Do executes the operation. +func (s *GetService) Do(ctx context.Context) (*GetResult, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(GetResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a get request. + +// GetResult is the outcome of GetService.Do. +type GetResult struct { + Index string `json:"_index"` // index meta field + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // id meta field + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Source *json.RawMessage `json:"_source,omitempty"` + Found bool `json:"found,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + //Error string `json:"error,omitempty"` // used only in MultiGet + // TODO double-check that MultiGet now returns details error information + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/get_template.go b/vendor/gopkg.in/olivere/elastic.v5/get_template.go new file mode 100644 index 000000000..31c44b6d7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/get_template.go @@ -0,0 +1,114 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// GetTemplateService reads a search template. +// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type GetTemplateService struct { + client *Client + pretty bool + id string + version interface{} + versionType string +} + +// NewGetTemplateService creates a new GetTemplateService. +func NewGetTemplateService(client *Client) *GetTemplateService { + return &GetTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *GetTemplateService) Id(id string) *GetTemplateService { + s.id = id + return s +} + +// Version is an explicit version number for concurrency control. +func (s *GetTemplateService) Version(version interface{}) *GetTemplateService { + s.version = version + return s +} + +// VersionType is a specific version type. +func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService { + s.versionType = versionType + return s +} + +// buildURL builds the URL for the operation. +func (s *GetTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *GetTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation and returns the template. +func (s *GetTemplateService) Do(ctx context.Context) (*GetTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(GetTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +type GetTemplateResponse struct { + Template string `json:"template"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/get_template_test.go b/vendor/gopkg.in/olivere/elastic.v5/get_template_test.go new file mode 100644 index 000000000..eff4a7fd5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/get_template_test.go @@ -0,0 +1,53 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestGetPutDeleteTemplate(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // This is a search template, not an index template! + tmpl := `{ + "template": { + "query" : { "term" : { "{{my_field}}" : "{{my_value}}" } }, + "size" : "{{my_size}}" + }, + "params":{ + "my_field" : "user", + "my_value" : "olivere", + "my_size" : 5 + } +}` + putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do(context.TODO()) + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if putres == nil { + t.Fatalf("expected response; got: %v", putres) + } + if !putres.Acknowledged { + t.Fatalf("expected template creation to be acknowledged; got: %v", putres.Acknowledged) + } + + // Always delete template + defer client.DeleteTemplate().Id("elastic-template").Do(context.TODO()) + + // Get template + getres, err := client.GetTemplate().Id("elastic-template").Do(context.TODO()) + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if getres == nil { + t.Fatalf("expected response; got: %v", getres) + } + if getres.Template == "" { + t.Errorf("expected template %q; got: %q", tmpl, getres.Template) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/get_test.go b/vendor/gopkg.in/olivere/elastic.v5/get_test.go new file mode 100644 index 000000000..77eac20f4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/get_test.go @@ -0,0 +1,167 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestGet(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Get document 1 + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + if res.Source == nil { + t.Errorf("expected Source != nil; got %v", res.Source) + } + + // Get non existent document 99 + res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do(context.TODO()) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + if !IsNotFound(err) { + t.Errorf("expected NotFound error; got: %v", err) + } + if res != nil { + t.Errorf("expected no response; got: %v", res) + } +} + +func TestGetWithSourceFiltering(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Get document 1, without source + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + if res.Source != nil { + t.Errorf("expected Source == nil; got %v", res.Source) + } + + // Get document 1, exclude Message field + fsc := NewFetchSourceContext(true).Exclude("message") + res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + if res.Source == nil { + t.Errorf("expected Source != nil; got %v", res.Source) + } + var tw tweet + err = json.Unmarshal(*res.Source, &tw) + if err != nil { + t.Fatal(err) + } + if tw.User != "olivere" { + t.Errorf("expected user %q; got: %q", "olivere", tw.User) + } + if tw.Message != "" { + t.Errorf("expected message %q; got: %q", "", tw.Message) + } +} + +func TestGetWithFields(t *testing.T) { + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Get document 1, specifying fields + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").StoredFields("message").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got: %v", res.Found) + } + + // We must NOT have the "user" field + _, ok := res.Fields["user"] + if ok { + t.Fatalf("expected no field %q in document", "user") + } + + // We must have the "message" field + messageField, ok := res.Fields["message"] + if !ok { + t.Fatalf("expected field %q in document", "message") + } + + // Depending on the version of elasticsearch the message field will be returned + // as a string or a slice of strings. This test works in both cases. + + messageString, ok := messageField.(string) + if !ok { + messageArray, ok := messageField.([]interface{}) + if !ok { + t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField) + } else { + messageString, ok = messageArray[0].(string) + if !ok { + t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField) + } + } + } + + if messageString != tweet1.Message { + t.Errorf("expected message %q; got: %q", tweet1.Message, messageString) + } +} + +func TestGetValidate(t *testing.T) { + // Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name + client := setupTestClientAndCreateIndex(t) + + if _, err := client.Get().Do(context.TODO()); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Index(testIndexName).Do(context.TODO()); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Type("tweet").Do(context.TODO()); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Id("1").Do(context.TODO()); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Index(testIndexName).Type("tweet").Do(context.TODO()); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Type("tweet").Id("1").Do(context.TODO()); err == nil { + t.Fatal("expected Get to fail") + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/highlight.go b/vendor/gopkg.in/olivere/elastic.v5/highlight.go new file mode 100644 index 000000000..e26dbad6b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/highlight.go @@ -0,0 +1,455 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Highlight allows highlighting search results on one or more fields. +// For details, see: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html +type Highlight struct { + fields []*HighlighterField + tagsSchema *string + highlightFilter *bool + fragmentSize *int + numOfFragments *int + preTags []string + postTags []string + order *string + encoder *string + requireFieldMatch *bool + boundaryMaxScan *int + boundaryChars []rune + highlighterType *string + fragmenter *string + highlightQuery Query + noMatchSize *int + phraseLimit *int + options map[string]interface{} + forceSource *bool + useExplicitFieldOrder bool +} + +func NewHighlight() *Highlight { + hl := &Highlight{ + fields: make([]*HighlighterField, 0), + preTags: make([]string, 0), + postTags: make([]string, 0), + boundaryChars: make([]rune, 0), + options: make(map[string]interface{}), + } + return hl +} + +func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight { + hl.fields = append(hl.fields, fields...) + return hl +} + +func (hl *Highlight) Field(name string) *Highlight { + field := NewHighlighterField(name) + hl.fields = append(hl.fields, field) + return hl +} + +func (hl *Highlight) TagsSchema(schemaName string) *Highlight { + hl.tagsSchema = &schemaName + return hl +} + +func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight { + hl.highlightFilter = &highlightFilter + return hl +} + +func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight { + hl.fragmentSize = &fragmentSize + return hl +} + +func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight { + hl.numOfFragments = &numOfFragments + return hl +} + +func (hl *Highlight) Encoder(encoder string) *Highlight { + hl.encoder = &encoder + return hl +} + +func (hl *Highlight) PreTags(preTags ...string) *Highlight { + hl.preTags = append(hl.preTags, preTags...) + return hl +} + +func (hl *Highlight) PostTags(postTags ...string) *Highlight { + hl.postTags = append(hl.postTags, postTags...) + return hl +} + +func (hl *Highlight) Order(order string) *Highlight { + hl.order = &order + return hl +} + +func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight { + hl.requireFieldMatch = &requireFieldMatch + return hl +} + +func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight { + hl.boundaryMaxScan = &boundaryMaxScan + return hl +} + +func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight { + hl.boundaryChars = append(hl.boundaryChars, boundaryChars...) + return hl +} + +func (hl *Highlight) HighlighterType(highlighterType string) *Highlight { + hl.highlighterType = &highlighterType + return hl +} + +func (hl *Highlight) Fragmenter(fragmenter string) *Highlight { + hl.fragmenter = &fragmenter + return hl +} + +func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight { + hl.highlightQuery = highlightQuery + return hl +} + +func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight { + hl.noMatchSize = &noMatchSize + return hl +} + +func (hl *Highlight) Options(options map[string]interface{}) *Highlight { + hl.options = options + return hl +} + +func (hl *Highlight) ForceSource(forceSource bool) *Highlight { + hl.forceSource = &forceSource + return hl +} + +func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight { + hl.useExplicitFieldOrder = useExplicitFieldOrder + return hl +} + +// Creates the query source for the bool query. +func (hl *Highlight) Source() (interface{}, error) { + // Returns the map inside of "highlight": + // "highlight":{ + // ... this ... + // } + source := make(map[string]interface{}) + if hl.tagsSchema != nil { + source["tags_schema"] = *hl.tagsSchema + } + if hl.preTags != nil && len(hl.preTags) > 0 { + source["pre_tags"] = hl.preTags + } + if hl.postTags != nil && len(hl.postTags) > 0 { + source["post_tags"] = hl.postTags + } + if hl.order != nil { + source["order"] = *hl.order + } + if hl.highlightFilter != nil { + source["highlight_filter"] = *hl.highlightFilter + } + if hl.fragmentSize != nil { + source["fragment_size"] = *hl.fragmentSize + } + if hl.numOfFragments != nil { + source["number_of_fragments"] = *hl.numOfFragments + } + if hl.encoder != nil { + source["encoder"] = *hl.encoder + } + if hl.requireFieldMatch != nil { + source["require_field_match"] = *hl.requireFieldMatch + } + if hl.boundaryMaxScan != nil { + source["boundary_max_scan"] = *hl.boundaryMaxScan + } + if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 { + source["boundary_chars"] = hl.boundaryChars + } + if hl.highlighterType != nil { + source["type"] = *hl.highlighterType + } + if hl.fragmenter != nil { + source["fragmenter"] = *hl.fragmenter + } + if hl.highlightQuery != nil { + src, err := hl.highlightQuery.Source() + if err != nil { + return nil, err + } + source["highlight_query"] = src + } + if hl.noMatchSize != nil { + source["no_match_size"] = *hl.noMatchSize + } + if hl.phraseLimit != nil { + source["phrase_limit"] = *hl.phraseLimit + } + if hl.options != nil && len(hl.options) > 0 { + source["options"] = hl.options + } + if hl.forceSource != nil { + source["force_source"] = *hl.forceSource + } + + if hl.fields != nil && len(hl.fields) > 0 { + if hl.useExplicitFieldOrder { + // Use a slice for the fields + var fields []map[string]interface{} + for _, field := range hl.fields { + src, err := field.Source() + if err != nil { + return nil, err + } + fmap := make(map[string]interface{}) + fmap[field.Name] = src + fields = append(fields, fmap) + } + source["fields"] = fields + } else { + // Use a map for the fields + fields := make(map[string]interface{}, 0) + for _, field := range hl.fields { + src, err := field.Source() + if err != nil { + return nil, err + } + fields[field.Name] = src + } + source["fields"] = fields + } + } + + return source, nil +} + +// HighlighterField specifies a highlighted field. +type HighlighterField struct { + Name string + + preTags []string + postTags []string + fragmentSize int + fragmentOffset int + numOfFragments int + highlightFilter *bool + order *string + requireFieldMatch *bool + boundaryMaxScan int + boundaryChars []rune + highlighterType *string + fragmenter *string + highlightQuery Query + noMatchSize *int + matchedFields []string + phraseLimit *int + options map[string]interface{} + forceSource *bool + + /* + Name string + preTags []string + postTags []string + fragmentSize int + numOfFragments int + fragmentOffset int + highlightFilter *bool + order string + requireFieldMatch *bool + boundaryMaxScan int + boundaryChars []rune + highlighterType string + fragmenter string + highlightQuery Query + noMatchSize *int + matchedFields []string + options map[string]interface{} + forceSource *bool + */ +} + +func NewHighlighterField(name string) *HighlighterField { + return &HighlighterField{ + Name: name, + preTags: make([]string, 0), + postTags: make([]string, 0), + fragmentSize: -1, + fragmentOffset: -1, + numOfFragments: -1, + boundaryMaxScan: -1, + boundaryChars: make([]rune, 0), + matchedFields: make([]string, 0), + options: make(map[string]interface{}), + } +} + +func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField { + f.preTags = append(f.preTags, preTags...) + return f +} + +func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField { + f.postTags = append(f.postTags, postTags...) + return f +} + +func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField { + f.fragmentSize = fragmentSize + return f +} + +func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField { + f.fragmentOffset = fragmentOffset + return f +} + +func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField { + f.numOfFragments = numOfFragments + return f +} + +func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField { + f.highlightFilter = &highlightFilter + return f +} + +func (f *HighlighterField) Order(order string) *HighlighterField { + f.order = &order + return f +} + +func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField { + f.requireFieldMatch = &requireFieldMatch + return f +} + +func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField { + f.boundaryMaxScan = boundaryMaxScan + return f +} + +func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField { + f.boundaryChars = append(f.boundaryChars, boundaryChars...) + return f +} + +func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField { + f.highlighterType = &highlighterType + return f +} + +func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField { + f.fragmenter = &fragmenter + return f +} + +func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField { + f.highlightQuery = highlightQuery + return f +} + +func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField { + f.noMatchSize = &noMatchSize + return f +} + +func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField { + f.options = options + return f +} + +func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField { + f.matchedFields = append(f.matchedFields, matchedFields...) + return f +} + +func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField { + f.phraseLimit = &phraseLimit + return f +} + +func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField { + f.forceSource = &forceSource + return f +} + +func (f *HighlighterField) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if f.preTags != nil && len(f.preTags) > 0 { + source["pre_tags"] = f.preTags + } + if f.postTags != nil && len(f.postTags) > 0 { + source["post_tags"] = f.postTags + } + if f.fragmentSize != -1 { + source["fragment_size"] = f.fragmentSize + } + if f.numOfFragments != -1 { + source["number_of_fragments"] = f.numOfFragments + } + if f.fragmentOffset != -1 { + source["fragment_offset"] = f.fragmentOffset + } + if f.highlightFilter != nil { + source["highlight_filter"] = *f.highlightFilter + } + if f.order != nil { + source["order"] = *f.order + } + if f.requireFieldMatch != nil { + source["require_field_match"] = *f.requireFieldMatch + } + if f.boundaryMaxScan != -1 { + source["boundary_max_scan"] = f.boundaryMaxScan + } + if f.boundaryChars != nil && len(f.boundaryChars) > 0 { + source["boundary_chars"] = f.boundaryChars + } + if f.highlighterType != nil { + source["type"] = *f.highlighterType + } + if f.fragmenter != nil { + source["fragmenter"] = *f.fragmenter + } + if f.highlightQuery != nil { + src, err := f.highlightQuery.Source() + if err != nil { + return nil, err + } + source["highlight_query"] = src + } + if f.noMatchSize != nil { + source["no_match_size"] = *f.noMatchSize + } + if f.matchedFields != nil && len(f.matchedFields) > 0 { + source["matched_fields"] = f.matchedFields + } + if f.phraseLimit != nil { + source["phrase_limit"] = *f.phraseLimit + } + if f.options != nil && len(f.options) > 0 { + source["options"] = f.options + } + if f.forceSource != nil { + source["force_source"] = *f.forceSource + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/highlight_test.go b/vendor/gopkg.in/olivere/elastic.v5/highlight_test.go new file mode 100644 index 000000000..ce1b2b189 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/highlight_test.go @@ -0,0 +1,193 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestHighlighterField(t *testing.T) { + field := NewHighlighterField("grade") + src, err := field.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlighterFieldWithOptions(t *testing.T) { + field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1) + src, err := field.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fragment_size":2,"number_of_fragments":1}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithStringField(t *testing.T) { + builder := NewHighlight().Field("grade") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":{"grade":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithFields(t *testing.T) { + gradeField := NewHighlighterField("grade") + builder := NewHighlight().Fields(gradeField) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":{"grade":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithMultipleFields(t *testing.T) { + gradeField := NewHighlighterField("grade") + colorField := NewHighlighterField("color") + builder := NewHighlight().Fields(gradeField, colorField) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":{"color":{},"grade":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlighterWithExplicitFieldOrder(t *testing.T) { + gradeField := NewHighlighterField("grade").FragmentSize(2) + colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1) + builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithTermQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Specify highlighter + hl := NewHighlight() + hl = hl.Fields(NewHighlighterField("message")) + hl = hl.PreTags("").PostTags("") + + // Match all should return all documents + query := NewPrefixQuery("message", "golang") + searchResult, err := client.Search(). + Index(testIndexName). + Highlight(hl). + Query(query). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Fatalf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 1 { + t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 1 { + t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) + } + + hit := searchResult.Hits.Hits[0] + var tw tweet + if err := json.Unmarshal(*hit.Source, &tw); err != nil { + t.Fatal(err) + } + if hit.Highlight == nil || len(hit.Highlight) == 0 { + t.Fatal("expected hit to have a highlight; got nil") + } + if hl, found := hit.Highlight["message"]; found { + if len(hl) != 1 { + t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl)) + } + expected := "Welcome to Golang and Elasticsearch." + if hl[0] != expected { + t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0]) + } + } else { + t.Fatal("expected to have a highlight on field \"message\"; got none") + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/index.go b/vendor/gopkg.in/olivere/elastic.v5/index.go new file mode 100644 index 000000000..8a0a6f54d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/index.go @@ -0,0 +1,289 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndexService adds or updates a typed JSON document in a specified index, +// making it searchable. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-index_.html +// for details. +type IndexService struct { + client *Client + pretty bool + id string + index string + typ string + parent string + routing string + timeout string + timestamp string + ttl string + version interface{} + opType string + versionType string + refresh string + waitForActiveShards string + pipeline string + bodyJson interface{} + bodyString string +} + +// NewIndexService creates a new IndexService. +func NewIndexService(client *Client) *IndexService { + return &IndexService{ + client: client, + } +} + +// Id is the document ID. +func (s *IndexService) Id(id string) *IndexService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *IndexService) Index(index string) *IndexService { + s.index = index + return s +} + +// Type is the type of the document. +func (s *IndexService) Type(typ string) *IndexService { + s.typ = typ + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active +// before proceeding with the index operation. Defaults to 1, meaning the +// primary shard only. Set to `all` for all shard copies, otherwise set to +// any non-negative value less than or equal to the total number of copies +// for the shard (number of replicas + 1). +func (s *IndexService) WaitForActiveShards(waitForActiveShards string) *IndexService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pipeline specifies the pipeline id to preprocess incoming documents with. +func (s *IndexService) Pipeline(pipeline string) *IndexService { + s.pipeline = pipeline + return s +} + +// Refresh the index after performing the operation. +func (s *IndexService) Refresh(refresh string) *IndexService { + s.refresh = refresh + return s +} + +// Ttl is an expiration time for the document. +func (s *IndexService) Ttl(ttl string) *IndexService { + s.ttl = ttl + return s +} + +// TTL is an expiration time for the document (alias for Ttl). +func (s *IndexService) TTL(ttl string) *IndexService { + s.ttl = ttl + return s +} + +// Version is an explicit version number for concurrency control. +func (s *IndexService) Version(version interface{}) *IndexService { + s.version = version + return s +} + +// OpType is an explicit operation type, i.e. "create" or "index" (default). +func (s *IndexService) OpType(opType string) *IndexService { + s.opType = opType + return s +} + +// Parent is the ID of the parent document. +func (s *IndexService) Parent(parent string) *IndexService { + s.parent = parent + return s +} + +// Routing is a specific routing value. +func (s *IndexService) Routing(routing string) *IndexService { + s.routing = routing + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndexService) Timeout(timeout string) *IndexService { + s.timeout = timeout + return s +} + +// Timestamp is an explicit timestamp for the document. +func (s *IndexService) Timestamp(timestamp string) *IndexService { + s.timestamp = timestamp + return s +} + +// VersionType is a specific version type. +func (s *IndexService) VersionType(versionType string) *IndexService { + s.versionType = versionType + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndexService) Pretty(pretty bool) *IndexService { + s.pretty = pretty + return s +} + +// BodyJson is the document as a serializable JSON interface. +func (s *IndexService) BodyJson(body interface{}) *IndexService { + s.bodyJson = body + return s +} + +// BodyString is the document encoded as a string. +func (s *IndexService) BodyString(body string) *IndexService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndexService) buildURL() (string, string, url.Values, error) { + var err error + var method, path string + + if s.id != "" { + // Create document with manual id + method = "PUT" + path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + } else { + // Automatic ID generation + // See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation + method = "POST" + path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } + if err != nil { + return "", "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.opType != "" { + params.Set("op_type", s.opType) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.pipeline != "" { + params.Set("pipeline", s.pipeline) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.timestamp != "" { + params.Set("timestamp", s.timestamp) + } + if s.ttl != "" { + params.Set("ttl", s.ttl) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return method, path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndexService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + method, path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, method, path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndexResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndexResponse is the result of indexing a document in Elasticsearch. +type IndexResponse struct { + // TODO _shards { total, failed, successful } + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/index_test.go b/vendor/gopkg.in/olivere/elastic.v5/index_test.go new file mode 100644 index 000000000..3faf281a2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/index_test.go @@ -0,0 +1,281 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestIndexLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // Exists + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } + + // Get document + getResult, err := client.Get(). + Index(testIndexName). + Type("tweet"). + Id("1"). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if getResult.Index != testIndexName { + t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) + } + if getResult.Type != "tweet" { + t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) + } + if getResult.Id != "1" { + t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id) + } + if getResult.Source == nil { + t.Errorf("expected GetResult.Source to be != nil; got nil") + } + + // Decode the Source field + var tweetGot tweet + err = json.Unmarshal(*getResult.Source, &tweetGot) + if err != nil { + t.Fatal(err) + } + if tweetGot.User != tweet1.User { + t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User) + } + if tweetGot.Message != tweet1.Message { + t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message) + } + + // Delete document again + deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if deleteResult == nil { + t.Errorf("expected result to be != nil; got: %v", deleteResult) + } + + // Exists + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } +} + +func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + BodyJson(&tweet1). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + if indexResult.Id == "" { + t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id) + } + id := indexResult.Id + + // Exists + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } + + // Get document + getResult, err := client.Get(). + Index(testIndexName). + Type("tweet"). + Id(id). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if getResult.Index != testIndexName { + t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) + } + if getResult.Type != "tweet" { + t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) + } + if getResult.Id != id { + t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id) + } + if getResult.Source == nil { + t.Errorf("expected GetResult.Source to be != nil; got nil") + } + + // Decode the Source field + var tweetGot tweet + err = json.Unmarshal(*getResult.Source, &tweetGot) + if err != nil { + t.Fatal(err) + } + if tweetGot.User != tweet1.User { + t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User) + } + if tweetGot.Message != tweet1.Message { + t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message) + } + + // Delete document again + deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if deleteResult == nil { + t.Errorf("expected result to be != nil; got: %v", deleteResult) + } + + // Exists + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } +} + +func TestIndexValidate(t *testing.T) { + client := setupTestClient(t) + + tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // No index name -> fail with error + res, err := NewIndexService(client).Type("tweet").Id("1").BodyJson(&tweet).Do(context.TODO()) + if err == nil { + t.Fatalf("expected Index to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } + + // No index name -> fail with error + res, err = NewIndexService(client).Index(testIndexName).Id("1").BodyJson(&tweet).Do(context.TODO()) + if err == nil { + t.Fatalf("expected Index to fail without type") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} + +func TestIndexCreateExistsOpenCloseDelete(t *testing.T) { + // TODO: Find out how to make these test robust + t.Skip("test fails regularly with 409 (Conflict): " + + "IndexPrimaryShardNotAllocatedException[[elastic-test] " + + "primary not allocated post api... skipping") + + client := setupTestClient(t) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if createIndex == nil { + t.Fatalf("expected response; got: %v", createIndex) + } + if !createIndex.Acknowledged { + t.Errorf("expected ack for creating index; got: %v", createIndex.Acknowledged) + } + + // Exists + indexExists, err := client.IndexExists(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !indexExists { + t.Fatalf("expected index exists=%v; got %v", true, indexExists) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Close index + closeIndex, err := client.CloseIndex(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if closeIndex == nil { + t.Fatalf("expected response; got: %v", closeIndex) + } + if !closeIndex.Acknowledged { + t.Errorf("expected ack for closing index; got: %v", closeIndex.Acknowledged) + } + + // Open index + openIndex, err := client.OpenIndex(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if openIndex == nil { + t.Fatalf("expected response; got: %v", openIndex) + } + if !openIndex.Acknowledged { + t.Errorf("expected ack for opening index; got: %v", openIndex.Acknowledged) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if deleteIndex == nil { + t.Fatalf("expected response; got: %v", deleteIndex) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected ack for deleting index; got %v", deleteIndex.Acknowledged) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_close.go b/vendor/gopkg.in/olivere/elastic.v5/indices_close.go new file mode 100644 index 000000000..2123cc1cf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_close.go @@ -0,0 +1,154 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesCloseService closes an index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html +// for details. +type IndicesCloseService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesCloseService creates and initializes a new IndicesCloseService. +func NewIndicesCloseService(client *Client) *IndicesCloseService { + return &IndicesCloseService{client: client} +} + +// Index is the name of the index to close. +func (s *IndicesCloseService) Index(index string) *IndicesCloseService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified). +func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesCloseService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_close", map[string]string{ + "index": s.index, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesCloseService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesCloseResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesCloseResponse is the response of IndicesCloseService.Do. +type IndicesCloseResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_close_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_close_test.go new file mode 100644 index 000000000..c80a104f8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_close_test.go @@ -0,0 +1,85 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +// TODO(oe): Find out why this test fails on Travis CI. +/* +func TestIndicesOpenAndClose(t *testing.T) { + client := setupTestClient(t) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !createIndex.Acknowledged { + t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) + } + defer func() { + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) + } + }() + + waitForYellow := func() { + // Wait for status yellow + res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res != nil && res.TimedOut { + t.Fatalf("cluster time out waiting for status %q", "yellow") + } + } + + // Wait for cluster + waitForYellow() + + // Close index + cresp, err := client.CloseIndex(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !cresp.Acknowledged { + t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName) + } + + // Wait for cluster + waitForYellow() + + // Open index again + oresp, err := client.OpenIndex(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !oresp.Acknowledged { + t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName) + } +} +*/ + +func TestIndicesCloseValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesCloseService(client).Do(context.TODO()) + if err == nil { + t.Fatalf("expected IndicesClose to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_create.go b/vendor/gopkg.in/olivere/elastic.v5/indices_create.go new file mode 100644 index 000000000..17f1dfc3e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_create.go @@ -0,0 +1,131 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "errors" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesCreateService creates a new index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html +// for details. +type IndicesCreateService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + bodyJson interface{} + bodyString string +} + +// NewIndicesCreateService returns a new IndicesCreateService. +func NewIndicesCreateService(client *Client) *IndicesCreateService { + return &IndicesCreateService{client: client} +} + +// Index is the name of the index to create. +func (b *IndicesCreateService) Index(index string) *IndicesCreateService { + b.index = index + return b +} + +// Timeout the explicit operation timeout, e.g. "5s". +func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService { + s.masterTimeout = masterTimeout + return s +} + +// Body specifies the configuration of the index as a string. +// It is an alias for BodyString. +func (b *IndicesCreateService) Body(body string) *IndicesCreateService { + b.bodyString = body + return b +} + +// BodyString specifies the configuration of the index as a string. +func (b *IndicesCreateService) BodyString(body string) *IndicesCreateService { + b.bodyString = body + return b +} + +// BodyJson specifies the configuration of the index. The interface{} will +// be serializes as a JSON document, so use a map[string]interface{}. +func (b *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService { + b.bodyJson = body + return b +} + +// Pretty indicates that the JSON response be indented and human readable. +func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService { + b.pretty = pretty + return b +} + +// Do executes the operation. +func (b *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, error) { + if b.index == "" { + return nil, errors.New("missing index name") + } + + // Build url + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": b.index, + }) + if err != nil { + return nil, err + } + + params := make(url.Values) + if b.pretty { + params.Set("pretty", "1") + } + if b.masterTimeout != "" { + params.Set("master_timeout", b.masterTimeout) + } + if b.timeout != "" { + params.Set("timeout", b.timeout) + } + + // Setup HTTP request body + var body interface{} + if b.bodyJson != nil { + body = b.bodyJson + } else { + body = b.bodyString + } + + // Get response + res, err := b.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + ret := new(IndicesCreateResult) + if err := b.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a create index request. + +// IndicesCreateResult is the outcome of creating a new index. +type IndicesCreateResult struct { + Acknowledged bool `json:"acknowledged"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_create_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_create_test.go new file mode 100644 index 000000000..96a3bce55 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_create_test.go @@ -0,0 +1,64 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndicesLifecycle(t *testing.T) { + client := setupTestClient(t) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !createIndex.Acknowledged { + t.Errorf("expected IndicesCreateResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) + } + + // Check if index exists + indexExists, err := client.IndexExists(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !indexExists { + t.Fatalf("index %s should exist, but doesn't\n", testIndexName) + } + + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) + } + + // Check if index exists + indexExists, err = client.IndexExists(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if indexExists { + t.Fatalf("index %s should not exist, but does\n", testIndexName) + } +} + +func TestIndicesCreateValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesCreateService(client).Body(testMapping).Do(context.TODO()) + if err == nil { + t.Fatalf("expected IndicesCreate to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_delete.go b/vendor/gopkg.in/olivere/elastic.v5/indices_delete.go new file mode 100644 index 000000000..8127f50d3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_delete.go @@ -0,0 +1,130 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesDeleteService allows to delete existing indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html +// for details. +type IndicesDeleteService struct { + client *Client + pretty bool + index []string + timeout string + masterTimeout string +} + +// NewIndicesDeleteService creates and initializes a new IndicesDeleteService. +func NewIndicesDeleteService(client *Client) *IndicesDeleteService { + return &IndicesDeleteService{ + client: client, + index: make([]string, 0), + } +} + +// Index adds the list of indices to delete. +// Use `_all` or `*` string to delete all indices. +func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(s.index, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesDeleteResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a delete index request. + +// IndicesDeleteResponse is the response of IndicesDeleteService.Do. +type IndicesDeleteResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go new file mode 100644 index 000000000..5e53b4145 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go @@ -0,0 +1,123 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesDeleteTemplateService deletes index templates. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesDeleteTemplateService struct { + client *Client + pretty bool + name string + timeout string + masterTimeout string +} + +// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService. +func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService { + return &IndicesDeleteTemplateService{ + client: client, + } +} + +// Name is the name of the template. +func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService { + s.name = name + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesDeleteTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do. +type IndicesDeleteTemplateResponse struct { + Acknowledged bool `json:"acknowledged,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_delete_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_delete_test.go new file mode 100644 index 000000000..2785e9051 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_delete_test.go @@ -0,0 +1,24 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndicesDeleteValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesDeleteService(client).Do(context.TODO()) + if err == nil { + t.Fatalf("expected IndicesDelete to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists.go new file mode 100644 index 000000000..6fbc4959c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists.go @@ -0,0 +1,151 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesExistsService checks if an index or indices exist or not. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html +// for details. +type IndicesExistsService struct { + client *Client + pretty bool + index []string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + local *bool +} + +// NewIndicesExistsService creates and initializes a new IndicesExistsService. +func NewIndicesExistsService(client *Client) *IndicesExistsService { + return &IndicesExistsService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of one or more indices to check. +func (s *IndicesExistsService) Index(index []string) *IndicesExistsService { + s.index = index + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or +// when no indices have been specified). +func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService { + s.expandWildcards = expandWildcards + return s +} + +// Local, when set, returns local information and does not retrieve the state +// from master node (default: false). +func (s *IndicesExistsService) Local(local bool) *IndicesExistsService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(s.index, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsService) Do(ctx context.Context) (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go new file mode 100644 index 000000000..b01d80157 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go @@ -0,0 +1,114 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesExistsTemplateService checks if a given template exists. +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists +// for documentation. +type IndicesExistsTemplateService struct { + client *Client + pretty bool + name string + local *bool +} + +// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService. +func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService { + return &IndicesExistsTemplateService{ + client: client, + } +} + +// Name is the name of the template. +func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService { + s.name = name + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsTemplateService) Do(ctx context.Context) (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go new file mode 100644 index 000000000..24ee9a2c2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go @@ -0,0 +1,69 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndexExistsTemplate(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tmpl := `{ + "template":"elastic-test*", + "settings":{ + "number_of_shards":1, + "number_of_replicas":0 + }, + "mappings":{ + "tweet":{ + "properties":{ + "tags":{ + "type":"keyword" + }, + "location":{ + "type":"geo_point" + }, + "suggest_field":{ + "type":"completion" + } + } + } + } +}` + putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do(context.TODO()) + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if putres == nil { + t.Fatalf("expected response; got: %v", putres) + } + if !putres.Acknowledged { + t.Fatalf("expected index template to be ack'd; got: %v", putres.Acknowledged) + } + + // Always delete template + defer client.IndexDeleteTemplate("elastic-template").Do(context.TODO()) + + // Check if template exists + exists, err := client.IndexTemplateExists("elastic-template").Do(context.TODO()) + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if !exists { + t.Fatalf("expected index template %q to exist; got: %v", "elastic-template", exists) + } + + // Get template + getres, err := client.IndexGetTemplate("elastic-template").Do(context.TODO()) + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if getres == nil { + t.Fatalf("expected to get index template %q; got: %v", "elastic-template", getres) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_test.go new file mode 100644 index 000000000..d5c0e9511 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_test.go @@ -0,0 +1,24 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndicesExistsWithoutIndex(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesExistsService(client).Do(context.TODO()) + if err == nil { + t.Fatalf("expected IndicesExists to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go new file mode 100644 index 000000000..6766187d3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go @@ -0,0 +1,161 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesExistsTypeService checks if one or more types exist in one or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-types-exists.html +// for details. +type IndicesExistsTypeService struct { + client *Client + pretty bool + typ []string + index []string + expandWildcards string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool +} + +// NewIndicesExistsTypeService creates a new IndicesExistsTypeService. +func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService { + return &IndicesExistsTypeService{ + client: client, + } +} + +// Index is a list of index names; use `_all` to check the types across all indices. +func (s *IndicesExistsTypeService) Index(indices ...string) *IndicesExistsTypeService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types to check. +func (s *IndicesExistsTypeService) Type(types ...string) *IndicesExistsTypeService { + s.typ = append(s.typ, types...) + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService { + s.expandWildcards = expandWildcards + return s +} + +// Local specifies whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsTypeService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(s.typ) == 0 { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsTypeService) Do(ctx context.Context) (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go new file mode 100644 index 000000000..c66d30d98 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go @@ -0,0 +1,136 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndicesExistsTypeBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Expected string + ExpectValidateFailure bool + }{ + { + []string{}, + []string{}, + "", + true, + }, + { + []string{"index1"}, + []string{}, + "", + true, + }, + { + []string{}, + []string{"type1"}, + "", + true, + }, + { + []string{"index1"}, + []string{"type1"}, + "/index1/_mapping/type1", + false, + }, + { + []string{"index1", "index2"}, + []string{"type1"}, + "/index1%2Cindex2/_mapping/type1", + false, + }, + { + []string{"index1", "index2"}, + []string{"type1", "type2"}, + "/index1%2Cindex2/_mapping/type1%2Ctype2", + false, + }, + } + + for i, test := range tests { + err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate() + if err == nil && test.ExpectValidateFailure { + t.Errorf("#%d: expected validate to fail", i+1) + continue + } + if err != nil && !test.ExpectValidateFailure { + t.Errorf("#%d: expected validate to succeed", i+1) + continue + } + if !test.ExpectValidateFailure { + path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL() + if err != nil { + t.Fatalf("#%d: %v", i+1, err) + } + if path != test.Expected { + t.Errorf("#%d: expected %q; got: %q", i+1, test.Expected, path) + } + } + } +} + +func TestIndicesExistsType(t *testing.T) { + client := setupTestClient(t) + + // Create index with tweet type + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if createIndex == nil { + t.Errorf("expected result to be != nil; got: %v", createIndex) + } + if !createIndex.Acknowledged { + t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) + } + + // Check if type exists + exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Fatalf("type %s should exist in index %s, but doesn't\n", "tweet", testIndexName) + } + + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) + } + + // Check if type exists + exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if exists { + t.Fatalf("type %s should not exist in index %s, but it does\n", "tweet", testIndexName) + } +} + +func TestIndicesExistsTypeValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesExistsTypeService(client).Do(context.TODO()) + if err == nil { + t.Fatalf("expected IndicesExistsType to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_flush.go b/vendor/gopkg.in/olivere/elastic.v5/indices_flush.go new file mode 100644 index 000000000..c780db10b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_flush.go @@ -0,0 +1,170 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// Flush allows to flush one or more indices. The flush process of an index +// basically frees memory from the index by flushing data to the index +// storage and clearing the internal transaction log. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html +// for details. +type IndicesFlushService struct { + client *Client + pretty bool + index []string + force *bool + waitIfOngoing *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesFlushService creates a new IndicesFlushService. +func NewIndicesFlushService(client *Client) *IndicesFlushService { + return &IndicesFlushService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string for all indices. +func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService { + s.index = append(s.index, indices...) + return s +} + +// Force indicates whether a flush should be forced even if it is not +// necessarily needed ie. if no changes will be committed to the index. +// This is useful if transaction log IDs should be incremented even if +// no uncommitted changes are present. (This setting can be considered as internal). +func (s *IndicesFlushService) Force(force bool) *IndicesFlushService { + s.force = &force + return s +} + +// WaitIfOngoing, if set to true, indicates that the flush operation will +// block until the flush can be executed if another flush operation is +// already executing. The default is false and will cause an exception +// to be thrown on the shard level if another flush operation is already running.. +func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService { + s.waitIfOngoing = &waitIfOngoing + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or when +// no indices have been specified). +func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards specifies whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesFlushService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_flush", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_flush" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.waitIfOngoing != nil { + params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesFlushService) Validate() error { + return nil +} + +// Do executes the service. +func (s *IndicesFlushService) Do(ctx context.Context) (*IndicesFlushResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesFlushResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a flush request. + +type IndicesFlushResponse struct { + Shards shardsInfo `json:"_shards"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_flush_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_flush_test.go new file mode 100644 index 000000000..77a744ef7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_flush_test.go @@ -0,0 +1,71 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestFlush(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Flush all indices + res, err := client.Flush().Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Errorf("expected res to be != nil; got: %v", res) + } +} + +func TestFlushBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Expected string + ExpectValidateFailure bool + }{ + { + []string{}, + "/_flush", + false, + }, + { + []string{"index1"}, + "/index1/_flush", + false, + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_flush", + false, + }, + } + + for i, test := range tests { + err := NewIndicesFlushService(client).Index(test.Indices...).Validate() + if err == nil && test.ExpectValidateFailure { + t.Errorf("case #%d: expected validate to fail", i+1) + continue + } + if err != nil && !test.ExpectValidateFailure { + t.Errorf("case #%d: expected validate to succeed", i+1) + continue + } + if !test.ExpectValidateFailure { + path, _, err := NewIndicesFlushService(client).Index(test.Indices...).buildURL() + if err != nil { + t.Fatalf("case #%d: %v", i+1, err) + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go b/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go new file mode 100644 index 000000000..7b550f554 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go @@ -0,0 +1,190 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesForcemergeService allows to force merging of one or more indices. +// The merge relates to the number of segments a Lucene index holds +// within each shard. The force merge operation allows to reduce the number +// of segments by merging them. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/2.4/indices-forcemerge.html +// for more information. +type IndicesForcemergeService struct { + client *Client + pretty bool + index []string + allowNoIndices *bool + expandWildcards string + flush *bool + ignoreUnavailable *bool + maxNumSegments interface{} + onlyExpungeDeletes *bool + operationThreading interface{} +} + +// NewIndicesForcemergeService creates a new IndicesForcemergeService. +func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService { + return &IndicesForcemergeService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService { + s.expandWildcards = expandWildcards + return s +} + +// Flush specifies whether the index should be flushed after performing +// the operation (default: true). +func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService { + s.flush = &flush + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// MaxNumSegments specifies the number of segments the index should be +// merged into (default: dynamic). +func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService { + s.maxNumSegments = maxNumSegments + return s +} + +// OnlyExpungeDeletes specifies whether the operation should only expunge +// deleted documents. +func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService { + s.onlyExpungeDeletes = &onlyExpungeDeletes + return s +} + +func (s *IndicesForcemergeService) OperationThreading(operationThreading interface{}) *IndicesForcemergeService { + s.operationThreading = operationThreading + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_forcemerge" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flush != nil { + params.Set("flush", fmt.Sprintf("%v", *s.flush)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.maxNumSegments != nil { + params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments)) + } + if s.onlyExpungeDeletes != nil { + params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) + } + if s.operationThreading != nil { + params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesForcemergeService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesForcemergeService) Do(ctx context.Context) (*IndicesForcemergeResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesForcemergeResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do. +type IndicesForcemergeResponse struct { + Shards shardsInfo `json:"_shards"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge_test.go new file mode 100644 index 000000000..f6b1fb753 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge_test.go @@ -0,0 +1,58 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndicesForcemergeBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_forcemerge", + }, + { + []string{"index1"}, + "/index1/_forcemerge", + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_forcemerge", + }, + } + + for i, test := range tests { + path, _, err := client.Forcemerge().Index(test.Indices...).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestIndicesForcemerge(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + _, err := client.Forcemerge(testIndexName).MaxNumSegments(1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + /* + if !ok { + t.Fatalf("expected forcemerge to succeed; got: %v", ok) + } + */ +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get.go new file mode 100644 index 000000000..589063c98 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get.go @@ -0,0 +1,203 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesGetService retrieves information about one or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html +// for more details. +type IndicesGetService struct { + client *Client + pretty bool + index []string + feature []string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + flatSettings *bool + human *bool +} + +// NewIndicesGetService creates a new IndicesGetService. +func NewIndicesGetService(client *Client) *IndicesGetService { + return &IndicesGetService{ + client: client, + index: make([]string, 0), + feature: make([]string, 0), + } +} + +// Index is a list of index names. +func (s *IndicesGetService) Index(indices ...string) *IndicesGetService { + s.index = append(s.index, indices...) + return s +} + +// Feature is a list of features. +func (s *IndicesGetService) Feature(features ...string) *IndicesGetService { + s.feature = append(s.feature, features...) + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesGetService) Local(local bool) *IndicesGetService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false). +func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard expression +// resolves to no concrete indices (default: false). +func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether wildcard expressions should get +// expanded to open or closed indices (default: open). +func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService { + s.expandWildcards = expandWildcards + return s +} + +/* Disabled because serialization would fail in that case. */ +/* +// FlatSettings make the service return settings in flat format (default: false). +func (s *IndicesGetService) FlatSettings(flatSettings bool) *IndicesGetService { + s.flatSettings = &flatSettings + return s +} +*/ + +// Human indicates whether to return version and creation date values +// in human-readable format (default: false). +func (s *IndicesGetService) Human(human bool) *IndicesGetService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetService) buildURL() (string, url.Values, error) { + var err error + var path string + var index []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.feature) > 0 { + // Build URL + path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{ + "index": strings.Join(index, ","), + "feature": strings.Join(s.feature, ","), + }) + } else { + // Build URL + path, err = uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesGetService) Do(ctx context.Context) (map[string]*IndicesGetResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetResponse + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetResponse is part of the response of IndicesGetService.Do. +type IndicesGetResponse struct { + Aliases map[string]interface{} `json:"aliases"` + Mappings map[string]interface{} `json:"mappings"` + Settings map[string]interface{} `json:"settings"` + Warmers map[string]interface{} `json:"warmers"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go new file mode 100644 index 000000000..24a0da928 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go @@ -0,0 +1,158 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// AliasesService returns the aliases associated with one or more indices. +// See http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html. +type AliasesService struct { + client *Client + index []string + pretty bool +} + +// NewAliasesService instantiates a new AliasesService. +func NewAliasesService(client *Client) *AliasesService { + builder := &AliasesService{ + client: client, + } + return builder +} + +// Pretty asks Elasticsearch to indent the returned JSON. +func (s *AliasesService) Pretty(pretty bool) *AliasesService { + s.pretty = pretty + return s +} + +// Index adds one or more indices. +func (s *AliasesService) Index(index ...string) *AliasesService { + s.index = append(s.index, index...) + return s +} + +// buildURL builds the URL for the operation. +func (s *AliasesService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_aliases", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_aliases" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + return path, params, nil +} + +func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error) { + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // { + // "indexName" : { + // "aliases" : { + // "alias1" : { }, + // "alias2" : { } + // } + // }, + // "indexName2" : { + // ... + // }, + // } + indexMap := make(map[string]interface{}) + if err := s.client.decoder.Decode(res.Body, &indexMap); err != nil { + return nil, err + } + + // Each (indexName, _) + ret := &AliasesResult{ + Indices: make(map[string]indexResult), + } + for indexName, indexData := range indexMap { + indexOut, found := ret.Indices[indexName] + if !found { + indexOut = indexResult{Aliases: make([]aliasResult, 0)} + } + + // { "aliases" : { ... } } + indexDataMap, ok := indexData.(map[string]interface{}) + if ok { + aliasesData, ok := indexDataMap["aliases"].(map[string]interface{}) + if ok { + for aliasName, _ := range aliasesData { + aliasRes := aliasResult{AliasName: aliasName} + indexOut.Aliases = append(indexOut.Aliases, aliasRes) + } + } + } + + ret.Indices[indexName] = indexOut + } + + return ret, nil +} + +// -- Result of an alias request. + +type AliasesResult struct { + Indices map[string]indexResult +} + +type indexResult struct { + Aliases []aliasResult +} + +type aliasResult struct { + AliasName string +} + +func (ar AliasesResult) IndicesByAlias(aliasName string) []string { + var indices []string + for indexName, indexInfo := range ar.Indices { + for _, aliasInfo := range indexInfo.Aliases { + if aliasInfo.AliasName == aliasName { + indices = append(indices, indexName) + } + } + } + return indices +} + +func (ir indexResult) HasAlias(aliasName string) bool { + for _, alias := range ir.Aliases { + if alias.AliasName == aliasName { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go new file mode 100644 index 000000000..1003ac79d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go @@ -0,0 +1,181 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestAliasesBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_aliases", + }, + { + []string{"index1"}, + "/index1/_aliases", + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_aliases", + }, + } + + for i, test := range tests { + path, _, err := client.Aliases().Index(test.Indices...).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestAliases(t *testing.T) { + var err error + + client := setupTestClientAndCreateIndex(t) + + // Some tweets + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."} + tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."} + + // Add tweets to first index + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + // Add tweets to second index + _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Alias should not yet exist + aliasesResult1, err := client.Aliases(). + Index(testIndexName, testIndexName2). + //Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if len(aliasesResult1.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices)) + } + for indexName, indexDetails := range aliasesResult1.Indices { + if len(indexDetails.Aliases) != 0 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases)) + } + } + + // Add both indices to a new alias + aliasCreate, err := client.Alias(). + Add(testIndexName, testAliasName). + Add(testIndexName2, testAliasName). + //Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !aliasCreate.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged) + } + + // Alias should now exist + aliasesResult2, err := client.Aliases(). + Index(testIndexName, testIndexName2). + //Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if len(aliasesResult2.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices)) + } + for indexName, indexDetails := range aliasesResult2.Indices { + if len(indexDetails.Aliases) != 1 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases)) + } + } + + // Check the reverse function: + indexInfo1, found := aliasesResult2.Indices[testIndexName] + if !found { + t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found) + } + aliasFound := indexInfo1.HasAlias(testAliasName) + if !aliasFound { + t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound) + } + + // Check the reverse function: + indexInfo2, found := aliasesResult2.Indices[testIndexName2] + if !found { + t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found) + } + aliasFound = indexInfo2.HasAlias(testAliasName) + if !aliasFound { + t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound) + } + + // Remove first index should remove two tweets, so should only yield 1 + aliasRemove1, err := client.Alias(). + Remove(testIndexName, testAliasName). + //Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !aliasRemove1.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged) + } + + // Alias should now exist only for index 2 + aliasesResult3, err := client.Aliases().Index(testIndexName, testIndexName2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if len(aliasesResult3.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices)) + } + for indexName, indexDetails := range aliasesResult3.Indices { + if indexName == testIndexName { + if len(indexDetails.Aliases) != 0 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases)) + } + } else if indexName == testIndexName2 { + if len(indexDetails.Aliases) != 1 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases)) + } + } else { + t.Errorf("got index %s", indexName) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go new file mode 100644 index 000000000..eaf8864fd --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go @@ -0,0 +1,171 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesGetMappingService retrieves the mapping definitions for an index or +// index/type. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html +// for details. +type IndicesGetMappingService struct { + client *Client + pretty bool + index []string + typ []string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewGetMappingService is an alias for NewIndicesGetMappingService. +// Use NewIndicesGetMappingService. +func NewGetMappingService(client *Client) *IndicesGetMappingService { + return NewIndicesGetMappingService(client) +} + +// NewIndicesGetMappingService creates a new IndicesGetMappingService. +func NewIndicesGetMappingService(client *Client) *IndicesGetMappingService { + return &IndicesGetMappingService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names. +func (s *IndicesGetMappingService) Index(indices ...string) *IndicesGetMappingService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types. +func (s *IndicesGetMappingService) Type(types ...string) *IndicesGetMappingService { + s.typ = append(s.typ, types...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesGetMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetMappingService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesGetMappingService) ExpandWildcards(expandWildcards string) *IndicesGetMappingService { + s.expandWildcards = expandWildcards + return s +} + +// Local indicates whether to return local information, do not retrieve +// the state from master node (default: false). +func (s *IndicesGetMappingService) Local(local bool) *IndicesGetMappingService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesGetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetMappingService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetMappingService) Pretty(pretty bool) *IndicesGetMappingService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) { + var index, typ []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.typ) > 0 { + typ = s.typ + } else { + typ = []string{"_all"} + } + + // Build URL + path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(index, ","), + "type": strings.Join(typ, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetMappingService) Validate() error { + return nil +} + +// Do executes the operation. It returns mapping definitions for an index +// or index/type. +func (s *IndicesGetMappingService) Do(ctx context.Context) (map[string]interface{}, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]interface{} + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping_test.go new file mode 100644 index 000000000..5ec54e7fb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping_test.go @@ -0,0 +1,50 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndicesGetMappingURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all/_mapping/_all", + }, + { + []string{}, + []string{"tweet"}, + "/_all/_mapping/tweet", + }, + { + []string{"twitter"}, + []string{"tweet"}, + "/twitter/_mapping/tweet", + }, + { + []string{"store-1", "store-2"}, + []string{"tweet", "user"}, + "/store-1%2Cstore-2/_mapping/tweet%2Cuser", + }, + } + + for _, test := range tests { + path, _, err := client.GetMapping().Index(test.Indices...).Type(test.Types...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go new file mode 100644 index 000000000..9c18dbc93 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go @@ -0,0 +1,184 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesGetSettingsService allows to retrieve settings of one +// or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html +// for more details. +type IndicesGetSettingsService struct { + client *Client + pretty bool + index []string + name []string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + flatSettings *bool + local *bool +} + +// NewIndicesGetSettingsService creates a new IndicesGetSettingsService. +func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService { + return &IndicesGetSettingsService{ + client: client, + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesGetSettingsService) Index(indices ...string) *IndicesGetSettingsService { + s.index = append(s.index, indices...) + return s +} + +// Name are the names of the settings that should be included. +func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService { + s.name = append(s.name, name...) + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression +// to concrete indices that are open, closed or both. +// Options: open, closed, none, all. Default: open,closed. +func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService { + s.flatSettings = &flatSettings + return s +} + +// Local indicates whether to return local information, do not retrieve +// the state from master node (default: false). +func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) { + var err error + var path string + var index []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.name) > 0 { + // Build URL + path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{ + "index": strings.Join(index, ","), + "name": strings.Join(s.name, ","), + }) + } else { + // Build URL + path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ + "index": strings.Join(index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetSettingsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetSettingsService) Do(ctx context.Context) (map[string]*IndicesGetSettingsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetSettingsResponse + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do. +type IndicesGetSettingsResponse struct { + Settings map[string]interface{} `json:"settings"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings_test.go new file mode 100644 index 000000000..cc6cfe053 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings_test.go @@ -0,0 +1,83 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndexGetSettingsURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Names []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all/_settings", + }, + { + []string{}, + []string{"index.merge.*"}, + "/_all/_settings/index.merge.%2A", + }, + { + []string{"twitter-*"}, + []string{"index.merge.*", "_settings"}, + "/twitter-%2A/_settings/index.merge.%2A%2C_settings", + }, + { + []string{"store-1", "store-2"}, + []string{"index.merge.*", "_settings"}, + "/store-1%2Cstore-2/_settings/index.merge.%2A%2C_settings", + }, + } + + for _, test := range tests { + path, _, err := client.IndexGetSettings().Index(test.Indices...).Name(test.Names...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIndexGetSettingsService(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.4.0" { + t.Skip("Index Get API is available since 1.4") + return + } + + res, err := client.IndexGetSettings().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected result; got: %v", res) + } + info, found := res[testIndexName] + if !found { + t.Fatalf("expected index %q to be found; got: %v", testIndexName, found) + } + if info == nil { + t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info) + } + if info.Settings == nil { + t.Fatalf("expected index settings of %q to be != nil; got: %v", testIndexName, info.Settings) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go new file mode 100644 index 000000000..1339e21c7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go @@ -0,0 +1,129 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesGetTemplateService returns an index template. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesGetTemplateService struct { + client *Client + pretty bool + name []string + flatSettings *bool + local *bool +} + +// NewIndicesGetTemplateService creates a new IndicesGetTemplateService. +func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService { + return &IndicesGetTemplateService{ + client: client, + name: make([]string, 0), + } +} + +// Name is the name of the index template. +func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService { + s.name = append(s.name, name...) + return s +} + +// FlatSettings is returns settings in flat format (default: false). +func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService { + s.flatSettings = &flatSettings + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.name) > 0 { + path, err = uritemplates.Expand("/_template/{name}", map[string]string{ + "name": strings.Join(s.name, ","), + }) + } else { + path = "/_template" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetTemplateService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetTemplateService) Do(ctx context.Context) (map[string]*IndicesGetTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetTemplateResponse + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do. +type IndicesGetTemplateResponse struct { + Order int `json:"order,omitempty"` + Template string `json:"template,omitempty"` + Settings map[string]interface{} `json:"settings,omitempty"` + Mappings map[string]interface{} `json:"mappings,omitempty"` + Aliases map[string]interface{} `json:"aliases,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_template_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_template_test.go new file mode 100644 index 000000000..c884ec1cb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_template_test.go @@ -0,0 +1,41 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndexGetTemplateURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Names []string + Expected string + }{ + { + []string{}, + "/_template", + }, + { + []string{"index1"}, + "/_template/index1", + }, + { + []string{"index1", "index2"}, + "/_template/index1%2Cindex2", + }, + } + + for _, test := range tests { + path, _, err := client.IndexGetTemplate().Name(test.Names...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_test.go new file mode 100644 index 000000000..a0c1c627e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_test.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndicesGetValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesGetService(client).Index("").Do(context.TODO()) + if err == nil { + t.Fatalf("expected IndicesGet to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} + +func TestIndicesGetURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Features []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all", + }, + { + []string{}, + []string{"_mappings"}, + "/_all/_mappings", + }, + { + []string{"twitter"}, + []string{"_mappings", "_settings"}, + "/twitter/_mappings%2C_settings", + }, + { + []string{"store-1", "store-2"}, + []string{"_mappings", "_settings"}, + "/store-1%2Cstore-2/_mappings%2C_settings", + }, + } + + for _, test := range tests { + path, _, err := NewIndicesGetService(client).Index(test.Indices...).Feature(test.Features...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIndicesGetService(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.4.0" { + t.Skip("Index Get API is available since 1.4") + return + } + + res, err := client.IndexGet().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected result; got: %v", res) + } + info, found := res[testIndexName] + if !found { + t.Fatalf("expected index %q to be found; got: %v", testIndexName, found) + } + if info == nil { + t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info) + } + if info.Mappings == nil { + t.Errorf("expected mappings to be != nil; got: %v", info.Mappings) + } + if info.Settings == nil { + t.Errorf("expected settings to be != nil; got: %v", info.Settings) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_open.go b/vendor/gopkg.in/olivere/elastic.v5/indices_open.go new file mode 100644 index 000000000..1f1221101 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_open.go @@ -0,0 +1,158 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesOpenService opens an index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html +// for details. +type IndicesOpenService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesOpenService creates and initializes a new IndicesOpenService. +func NewIndicesOpenService(client *Client) *IndicesOpenService { + return &IndicesOpenService{client: client} +} + +// Index is the name of the index to open. +func (s *IndicesOpenService) Index(index string) *IndicesOpenService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesOpenService) Timeout(timeout string) *IndicesOpenService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesOpenService) MasterTimeout(masterTimeout string) *IndicesOpenService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesOpenService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesOpenService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesOpenService) AllowNoIndices(allowNoIndices bool) *IndicesOpenService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesOpenService) ExpandWildcards(expandWildcards string) *IndicesOpenService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesOpenService) Pretty(pretty bool) *IndicesOpenService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesOpenService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_open", map[string]string{ + "index": s.index, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesOpenService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesOpenService) Do(ctx context.Context) (*IndicesOpenResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesOpenResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesOpenResponse is the response of IndicesOpenService.Do. +type IndicesOpenResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_open_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_open_test.go new file mode 100644 index 000000000..39b848502 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_open_test.go @@ -0,0 +1,24 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndicesOpenValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesOpenService(client).Do(context.TODO()) + if err == nil { + t.Fatalf("expected IndicesOpen to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go new file mode 100644 index 000000000..347b8fa54 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go @@ -0,0 +1,296 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// -- Actions -- + +// AliasAction is an action to apply to an alias, e.g. "add" or "remove". +type AliasAction interface { + Source() (interface{}, error) +} + +// AliasAddAction is an action to add to an alias. +type AliasAddAction struct { + index []string // index name(s) + alias string // alias name + filter Query + routing string + searchRouting string + indexRouting string +} + +// NewAliasAddAction returns an action to add an alias. +func NewAliasAddAction(alias string) *AliasAddAction { + return &AliasAddAction{ + alias: alias, + } +} + +// Index associates one or more indices to the alias. +func (a *AliasAddAction) Index(index ...string) *AliasAddAction { + a.index = append(a.index, index...) + return a +} + +func (a *AliasAddAction) removeBlankIndexNames() { + var indices []string + for _, index := range a.index { + if len(index) > 0 { + indices = append(indices, index) + } + } + a.index = indices +} + +// Filter associates a filter to the alias. +func (a *AliasAddAction) Filter(filter Query) *AliasAddAction { + a.filter = filter + return a +} + +// Routing associates a routing value to the alias. +// This basically sets index and search routing to the same value. +func (a *AliasAddAction) Routing(routing string) *AliasAddAction { + a.routing = routing + return a +} + +// IndexRouting associates an index routing value to the alias. +func (a *AliasAddAction) IndexRouting(routing string) *AliasAddAction { + a.indexRouting = routing + return a +} + +// SearchRouting associates a search routing value to the alias. +func (a *AliasAddAction) SearchRouting(routing ...string) *AliasAddAction { + a.searchRouting = strings.Join(routing, ",") + return a +} + +// Validate checks if the operation is valid. +func (a *AliasAddAction) Validate() error { + var invalid []string + if len(a.alias) == 0 { + invalid = append(invalid, "Alias") + } + if len(a.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Source returns the JSON-serializable data. +func (a *AliasAddAction) Source() (interface{}, error) { + a.removeBlankIndexNames() + if err := a.Validate(); err != nil { + return nil, err + } + src := make(map[string]interface{}) + act := make(map[string]interface{}) + src["add"] = act + act["alias"] = a.alias + switch len(a.index) { + case 1: + act["index"] = a.index[0] + default: + act["indices"] = a.index + } + if a.filter != nil { + f, err := a.filter.Source() + if err != nil { + return nil, err + } + act["filter"] = f + } + if len(a.routing) > 0 { + act["routing"] = a.routing + } + if len(a.indexRouting) > 0 { + act["index_routing"] = a.indexRouting + } + if len(a.searchRouting) > 0 { + act["search_routing"] = a.searchRouting + } + return src, nil +} + +// AliasRemoveAction is an action to remove an alias. +type AliasRemoveAction struct { + index []string // index name(s) + alias string // alias name +} + +// NewAliasRemoveAction returns an action to remove an alias. +func NewAliasRemoveAction(alias string) *AliasRemoveAction { + return &AliasRemoveAction{ + alias: alias, + } +} + +// Index associates one or more indices to the alias. +func (a *AliasRemoveAction) Index(index ...string) *AliasRemoveAction { + a.index = append(a.index, index...) + return a +} + +func (a *AliasRemoveAction) removeBlankIndexNames() { + var indices []string + for _, index := range a.index { + if len(index) > 0 { + indices = append(indices, index) + } + } + a.index = indices +} + +// Validate checks if the operation is valid. +func (a *AliasRemoveAction) Validate() error { + var invalid []string + if len(a.alias) == 0 { + invalid = append(invalid, "Alias") + } + if len(a.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Source returns the JSON-serializable data. +func (a *AliasRemoveAction) Source() (interface{}, error) { + a.removeBlankIndexNames() + if err := a.Validate(); err != nil { + return nil, err + } + src := make(map[string]interface{}) + act := make(map[string]interface{}) + src["remove"] = act + act["alias"] = a.alias + switch len(a.index) { + case 1: + act["index"] = a.index[0] + default: + act["indices"] = a.index + } + return src, nil +} + +// -- Service -- + +// AliasService enables users to add or remove an alias. +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.3/indices-aliases.html +// for details. +type AliasService struct { + client *Client + actions []AliasAction + pretty bool +} + +// NewAliasService implements a service to manage aliases. +func NewAliasService(client *Client) *AliasService { + builder := &AliasService{ + client: client, + } + return builder +} + +// Pretty asks Elasticsearch to indent the HTTP response. +func (s *AliasService) Pretty(pretty bool) *AliasService { + s.pretty = pretty + return s +} + +// Add adds an alias to an index. +func (s *AliasService) Add(indexName string, aliasName string) *AliasService { + action := NewAliasAddAction(aliasName).Index(indexName) + s.actions = append(s.actions, action) + return s +} + +// Add adds an alias to an index and associates a filter to the alias. +func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService { + action := NewAliasAddAction(aliasName).Index(indexName).Filter(filter) + s.actions = append(s.actions, action) + return s +} + +// Remove removes an alias. +func (s *AliasService) Remove(indexName string, aliasName string) *AliasService { + action := NewAliasRemoveAction(aliasName).Index(indexName) + s.actions = append(s.actions, action) + return s +} + +// Action accepts one or more AliasAction instances which can be +// of type AliasAddAction or AliasRemoveAction. +func (s *AliasService) Action(action ...AliasAction) *AliasService { + s.actions = append(s.actions, action...) + return s +} + +// buildURL builds the URL for the operation. +func (s *AliasService) buildURL() (string, url.Values, error) { + path := "/_aliases" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + return path, params, nil +} + +// Do executes the command. +func (s *AliasService) Do(ctx context.Context) (*AliasResult, error) { + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Body with actions + body := make(map[string]interface{}) + var actions []interface{} + for _, action := range s.actions { + src, err := action.Source() + if err != nil { + return nil, err + } + actions = append(actions, src) + } + body["actions"] = actions + + // Get response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return results + ret := new(AliasResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of an alias request. + +// AliasResult is the outcome of calling Do on AliasService. +type AliasResult struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias_test.go new file mode 100644 index 000000000..ce2d75ca9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias_test.go @@ -0,0 +1,223 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +const ( + testAliasName = "elastic-test-alias" +) + +func TestAliasLifecycle(t *testing.T) { + var err error + + client := setupTestClientAndCreateIndex(t) + + // Some tweets + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."} + tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."} + + // Add tweets to first index + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Add tweets to second index + _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Add both indices to a new alias + aliasCreate, err := client.Alias(). + Add(testIndexName, testAliasName). + Action(NewAliasAddAction(testAliasName).Index(testIndexName2)). + //Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !aliasCreate.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged) + } + + // Search should return all 3 tweets + matchAll := NewMatchAllQuery() + searchResult1, err := client.Search().Index(testAliasName).Query(matchAll).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult1.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult1.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult1.Hits.TotalHits) + } + + // Remove first index should remove two tweets, so should only yield 1 + aliasRemove1, err := client.Alias(). + Remove(testIndexName, testAliasName). + //Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if !aliasRemove1.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged) + } + + searchResult2, err := client.Search().Index(testAliasName).Query(matchAll).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult2.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult2.Hits.TotalHits != 1 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult2.Hits.TotalHits) + } +} + +func TestAliasAddAction(t *testing.T) { + var tests = []struct { + Action *AliasAddAction + Expected string + Invalid bool + }{ + { + Action: NewAliasAddAction("").Index(""), + Invalid: true, + }, + { + Action: NewAliasAddAction("alias1").Index(""), + Invalid: true, + }, + { + Action: NewAliasAddAction("").Index("index1"), + Invalid: true, + }, + { + Action: NewAliasAddAction("alias1").Index("index1"), + Expected: `{"add":{"alias":"alias1","index":"index1"}}`, + }, + { + Action: NewAliasAddAction("alias1").Index("index1", "index2"), + Expected: `{"add":{"alias":"alias1","indices":["index1","index2"]}}`, + }, + { + Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1"), + Expected: `{"add":{"alias":"alias1","index":"index1","routing":"routing1"}}`, + }, + { + Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1").IndexRouting("indexRouting1"), + Expected: `{"add":{"alias":"alias1","index":"index1","index_routing":"indexRouting1","routing":"routing1"}}`, + }, + { + Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1").SearchRouting("searchRouting1"), + Expected: `{"add":{"alias":"alias1","index":"index1","routing":"routing1","search_routing":"searchRouting1"}}`, + }, + { + Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1").SearchRouting("searchRouting1", "searchRouting2"), + Expected: `{"add":{"alias":"alias1","index":"index1","routing":"routing1","search_routing":"searchRouting1,searchRouting2"}}`, + }, + { + Action: NewAliasAddAction("alias1").Index("index1").Filter(NewTermQuery("user", "olivere")), + Expected: `{"add":{"alias":"alias1","filter":{"term":{"user":"olivere"}},"index":"index1"}}`, + }, + } + + for i, tt := range tests { + src, err := tt.Action.Source() + if err != nil { + if !tt.Invalid { + t.Errorf("#%d: expected to succeed", i) + } + } else { + if tt.Invalid { + t.Errorf("#%d: expected to fail", i) + } else { + dst, err := json.Marshal(src) + if err != nil { + t.Fatal(err) + } + if want, have := tt.Expected, string(dst); want != have { + t.Errorf("#%d: expected %s, got %s", i, want, have) + } + } + } + } +} + +func TestAliasRemoveAction(t *testing.T) { + var tests = []struct { + Action *AliasRemoveAction + Expected string + Invalid bool + }{ + { + Action: NewAliasRemoveAction(""), + Invalid: true, + }, + { + Action: NewAliasRemoveAction("alias1"), + Invalid: true, + }, + { + Action: NewAliasRemoveAction("").Index("index1"), + Invalid: true, + }, + { + Action: NewAliasRemoveAction("alias1").Index("index1"), + Expected: `{"remove":{"alias":"alias1","index":"index1"}}`, + }, + { + Action: NewAliasRemoveAction("alias1").Index("index1", "index2"), + Expected: `{"remove":{"alias":"alias1","indices":["index1","index2"]}}`, + }, + } + + for i, tt := range tests { + src, err := tt.Action.Source() + if err != nil { + if !tt.Invalid { + t.Errorf("#%d: expected to succeed", i) + } + } else { + if tt.Invalid { + t.Errorf("#%d: expected to fail", i) + } else { + dst, err := json.Marshal(src) + if err != nil { + t.Fatal(err) + } + if want, have := tt.Expected, string(dst); want != have { + t.Errorf("#%d: expected %s, got %s", i, want, have) + } + } + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go similarity index 50% rename from vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer.go rename to vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go index 6c95340c5..f2cb25879 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/indices_put_warmer.go +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -11,65 +11,71 @@ import ( "golang.org/x/net/context" - "gopkg.in/olivere/elastic.v3/uritemplates" + "gopkg.in/olivere/elastic.v5/uritemplates" ) -// IndicesPutWarmerService allows to register a warmer. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html. -type IndicesPutWarmerService struct { +// IndicesPutMappingService allows to register specific mapping definition +// for a specific type. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html +// for details. +type IndicesPutMappingService struct { client *Client pretty bool - typ []string + typ string index []string - name string masterTimeout string ignoreUnavailable *bool allowNoIndices *bool - requestCache *bool expandWildcards string + ignoreConflicts *bool + timeout string bodyJson map[string]interface{} bodyString string } -// NewIndicesPutWarmerService creates a new IndicesPutWarmerService. -func NewIndicesPutWarmerService(client *Client) *IndicesPutWarmerService { - return &IndicesPutWarmerService{ +// NewPutMappingService is an alias for NewIndicesPutMappingService. +// Use NewIndicesPutMappingService. +func NewPutMappingService(client *Client) *IndicesPutMappingService { + return NewIndicesPutMappingService(client) +} + +// NewIndicesPutMappingService creates a new IndicesPutMappingService. +func NewIndicesPutMappingService(client *Client) *IndicesPutMappingService { + return &IndicesPutMappingService{ client: client, index: make([]string, 0), - typ: make([]string, 0), } } // Index is a list of index names the mapping should be added to // (supports wildcards); use `_all` or omit to add the mapping on all indices. -func (s *IndicesPutWarmerService) Index(indices ...string) *IndicesPutWarmerService { +func (s *IndicesPutMappingService) Index(indices ...string) *IndicesPutMappingService { s.index = append(s.index, indices...) return s } -// Type is a list of type names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all types. -func (s *IndicesPutWarmerService) Type(typ ...string) *IndicesPutWarmerService { - s.typ = append(s.typ, typ...) +// Type is the name of the document type. +func (s *IndicesPutMappingService) Type(typ string) *IndicesPutMappingService { + s.typ = typ return s } -// Name specifies the name of the warmer (supports wildcards); -// leave empty to get all warmers -func (s *IndicesPutWarmerService) Name(name string) *IndicesPutWarmerService { - s.name = name +// Timeout is an explicit operation timeout. +func (s *IndicesPutMappingService) Timeout(timeout string) *IndicesPutMappingService { + s.timeout = timeout return s } // MasterTimeout specifies the timeout for connection to master. -func (s *IndicesPutWarmerService) MasterTimeout(masterTimeout string) *IndicesPutWarmerService { +func (s *IndicesPutMappingService) MasterTimeout(masterTimeout string) *IndicesPutMappingService { s.masterTimeout = masterTimeout return s } // IgnoreUnavailable indicates whether specified concrete indices should be // ignored when unavailable (missing or closed). -func (s *IndicesPutWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutWarmerService { +func (s *IndicesPutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutMappingService { s.ignoreUnavailable = &ignoreUnavailable return s } @@ -77,67 +83,57 @@ func (s *IndicesPutWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *Ind // AllowNoIndices indicates whether to ignore if a wildcard indices // expression resolves into no concrete indices. // This includes `_all` string or when no indices have been specified. -func (s *IndicesPutWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesPutWarmerService { +func (s *IndicesPutMappingService) AllowNoIndices(allowNoIndices bool) *IndicesPutMappingService { s.allowNoIndices = &allowNoIndices return s } -// RequestCache specifies whether the request to be warmed should use the request cache, -// defaults to index level setting -func (s *IndicesPutWarmerService) RequestCache(requestCache bool) *IndicesPutWarmerService { - s.requestCache = &requestCache - return s -} - // ExpandWildcards indicates whether to expand wildcard expression to // concrete indices that are open, closed or both. -func (s *IndicesPutWarmerService) ExpandWildcards(expandWildcards string) *IndicesPutWarmerService { +func (s *IndicesPutMappingService) ExpandWildcards(expandWildcards string) *IndicesPutMappingService { s.expandWildcards = expandWildcards return s } +// IgnoreConflicts specifies whether to ignore conflicts while updating +// the mapping (default: false). +func (s *IndicesPutMappingService) IgnoreConflicts(ignoreConflicts bool) *IndicesPutMappingService { + s.ignoreConflicts = &ignoreConflicts + return s +} + // Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesPutWarmerService) Pretty(pretty bool) *IndicesPutWarmerService { +func (s *IndicesPutMappingService) Pretty(pretty bool) *IndicesPutMappingService { s.pretty = pretty return s } // BodyJson contains the mapping definition. -func (s *IndicesPutWarmerService) BodyJson(mapping map[string]interface{}) *IndicesPutWarmerService { +func (s *IndicesPutMappingService) BodyJson(mapping map[string]interface{}) *IndicesPutMappingService { s.bodyJson = mapping return s } // BodyString is the mapping definition serialized as a string. -func (s *IndicesPutWarmerService) BodyString(mapping string) *IndicesPutWarmerService { +func (s *IndicesPutMappingService) BodyString(mapping string) *IndicesPutMappingService { s.bodyString = mapping return s } // buildURL builds the URL for the operation. -func (s *IndicesPutWarmerService) buildURL() (string, url.Values, error) { +func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) { var err error var path string - if len(s.index) == 0 && len(s.typ) == 0 { - path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{ - "name": s.name, - }) - } else if len(s.index) == 0 && len(s.typ) > 0 { - path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{ - "type": strings.Join(s.typ, ","), - "name": s.name, - }) - } else if len(s.index) > 0 && len(s.typ) == 0 { - path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ + // Build URL: Typ MUST be specified and is verified in Validate. + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ "index": strings.Join(s.index, ","), - "name": s.name, + "type": s.typ, }) } else { - path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - "name": s.name, + path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{ + "type": s.typ, }) } if err != nil { @@ -155,12 +151,15 @@ func (s *IndicesPutWarmerService) buildURL() (string, url.Values, error) { if s.allowNoIndices != nil { params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) } - if s.requestCache != nil { - params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) - } if s.expandWildcards != "" { params.Set("expand_wildcards", s.expandWildcards) } + if s.ignoreConflicts != nil { + params.Set("ignore_conflicts", fmt.Sprintf("%v", *s.ignoreConflicts)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } if s.masterTimeout != "" { params.Set("master_timeout", s.masterTimeout) } @@ -168,10 +167,10 @@ func (s *IndicesPutWarmerService) buildURL() (string, url.Values, error) { } // Validate checks if the operation is valid. -func (s *IndicesPutWarmerService) Validate() error { +func (s *IndicesPutMappingService) Validate() error { var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") + if s.typ == "" { + invalid = append(invalid, "Type") } if s.bodyString == "" && s.bodyJson == nil { invalid = append(invalid, "BodyJson") @@ -183,12 +182,7 @@ func (s *IndicesPutWarmerService) Validate() error { } // Do executes the operation. -func (s *IndicesPutWarmerService) Do() (*PutWarmerResponse, error) { - return s.DoC(nil) -} - -// DoC executes the operation. -func (s *IndicesPutWarmerService) DoC(ctx context.Context) (*PutWarmerResponse, error) { +func (s *IndicesPutMappingService) Do(ctx context.Context) (*PutMappingResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err @@ -209,20 +203,20 @@ func (s *IndicesPutWarmerService) DoC(ctx context.Context) (*PutWarmerResponse, } // Get HTTP response - res, err := s.client.PerformRequestC(ctx, "PUT", path, params, body) + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) if err != nil { return nil, err } // Return operation response - ret := new(PutWarmerResponse) + ret := new(PutMappingResponse) if err := s.client.decoder.Decode(res.Body, ret); err != nil { return nil, err } return ret, nil } -// PutWarmerResponse is the response of IndicesPutWarmerService.Do. -type PutWarmerResponse struct { +// PutMappingResponse is the response of IndicesPutMappingService.Do. +type PutMappingResponse struct { Acknowledged bool `json:"acknowledged"` } diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping_test.go new file mode 100644 index 000000000..f95e53c39 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping_test.go @@ -0,0 +1,86 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestPutMappingURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Type string + Expected string + }{ + { + []string{}, + "tweet", + "/_mapping/tweet", + }, + { + []string{"*"}, + "tweet", + "/%2A/_mapping/tweet", + }, + { + []string{"store-1", "store-2"}, + "tweet", + "/store-1%2Cstore-2/_mapping/tweet", + }, + } + + for _, test := range tests { + path, _, err := client.PutMapping().Index(test.Indices...).Type(test.Type).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestMappingLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + mapping := `{ + "tweetdoc":{ + "properties":{ + "field":{ + "type":"string" + } + } + } + }` + + putresp, err := client.PutMapping().Index(testIndexName2).Type("tweetdoc").BodyString(mapping).Do(context.TODO()) + if err != nil { + t.Fatalf("expected put mapping to succeed; got: %v", err) + } + if putresp == nil { + t.Fatalf("expected put mapping response; got: %v", putresp) + } + if !putresp.Acknowledged { + t.Fatalf("expected put mapping ack; got: %v", putresp.Acknowledged) + } + + getresp, err := client.GetMapping().Index(testIndexName2).Type("tweetdoc").Do(context.TODO()) + if err != nil { + t.Fatalf("expected get mapping to succeed; got: %v", err) + } + if getresp == nil { + t.Fatalf("expected get mapping response; got: %v", getresp) + } + props, ok := getresp[testIndexName2] + if !ok { + t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props) + } + + // NOTE There is no Delete Mapping API in Elasticsearch 2.0 +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go new file mode 100644 index 000000000..ab7231e58 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go @@ -0,0 +1,185 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesPutSettingsService changes specific index level settings in +// real time. +// +// See the documentation at +// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html. +type IndicesPutSettingsService struct { + client *Client + pretty bool + index []string + allowNoIndices *bool + expandWildcards string + flatSettings *bool + ignoreUnavailable *bool + masterTimeout string + bodyJson interface{} + bodyString string +} + +// NewIndicesPutSettingsService creates a new IndicesPutSettingsService. +func NewIndicesPutSettingsService(client *Client) *IndicesPutSettingsService { + return &IndicesPutSettingsService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutSettingsService) Index(indices ...string) *IndicesPutSettingsService { + s.index = append(s.index, indices...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` +// string or when no indices have been specified). +func (s *IndicesPutSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesPutSettingsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards specifies whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutSettingsService) ExpandWildcards(expandWildcards string) *IndicesPutSettingsService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesPutSettingsService) FlatSettings(flatSettings bool) *IndicesPutSettingsService { + s.flatSettings = &flatSettings + return s +} + +// IgnoreUnavailable specifies whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutSettingsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// MasterTimeout is the timeout for connection to master. +func (s *IndicesPutSettingsService) MasterTimeout(masterTimeout string) *IndicesPutSettingsService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutSettingsService) Pretty(pretty bool) *IndicesPutSettingsService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: The index settings to be updated. +func (s *IndicesPutSettingsService) BodyJson(body interface{}) *IndicesPutSettingsService { + s.bodyJson = body + return s +} + +// BodyString is documented as: The index settings to be updated. +func (s *IndicesPutSettingsService) BodyString(body string) *IndicesPutSettingsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_settings" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutSettingsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesPutSettingsService) Do(ctx context.Context) (*IndicesPutSettingsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesPutSettingsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do. +type IndicesPutSettingsResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings_test.go new file mode 100644 index 000000000..d0a961794 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings_test.go @@ -0,0 +1,96 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndicesPutSettingsBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_settings", + }, + { + []string{"*"}, + "/%2A/_settings", + }, + { + []string{"store-1", "store-2"}, + "/store-1%2Cstore-2/_settings", + }, + } + + for _, test := range tests { + path, _, err := client.IndexPutSettings().Index(test.Indices...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIndicesSettingsLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + body := `{ + "index":{ + "refresh_interval":"-1" + } + }` + + // Put settings + putres, err := client.IndexPutSettings().Index(testIndexName).BodyString(body).Do(context.TODO()) + if err != nil { + t.Fatalf("expected put settings to succeed; got: %v", err) + } + if putres == nil { + t.Fatalf("expected put settings response; got: %v", putres) + } + if !putres.Acknowledged { + t.Fatalf("expected put settings ack; got: %v", putres.Acknowledged) + } + + // Read settings + getres, err := client.IndexGetSettings().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatalf("expected get mapping to succeed; got: %v", err) + } + if getres == nil { + t.Fatalf("expected get mapping response; got: %v", getres) + } + + // Check settings + index, found := getres[testIndexName] + if !found { + t.Fatalf("expected to return settings for index %q; got: %#v", testIndexName, getres) + } + // Retrieve "index" section of the settings for index testIndexName + sectionIntf, ok := index.Settings["index"] + if !ok { + t.Fatalf("expected settings to have %q field; got: %#v", "index", getres) + } + section, ok := sectionIntf.(map[string]interface{}) + if !ok { + t.Fatalf("expected settings to be of type map[string]interface{}; got: %#v", getres) + } + refintv, ok := section["refresh_interval"] + if !ok { + t.Fatalf(`expected JSON to include "refresh_interval" field; got: %#v`, getres) + } + if got, want := refintv, "-1"; got != want { + t.Fatalf("expected refresh_interval = %v; got: %v", want, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go new file mode 100644 index 000000000..3222539a2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go @@ -0,0 +1,180 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesPutTemplateService creates or updates index mappings. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesPutTemplateService struct { + client *Client + pretty bool + name string + order interface{} + create *bool + timeout string + masterTimeout string + flatSettings *bool + bodyJson interface{} + bodyString string +} + +// NewIndicesPutTemplateService creates a new IndicesPutTemplateService. +func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService { + return &IndicesPutTemplateService{ + client: client, + } +} + +// Name is the name of the index template. +func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService { + s.name = name + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService { + s.masterTimeout = masterTimeout + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService { + s.flatSettings = &flatSettings + return s +} + +// Order is the order for this template when merging multiple matching ones +// (higher numbers are merged later, overriding the lower numbers). +func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService { + s.order = order + return s +} + +// Create indicates whether the index template should only be added if +// new or can also replace an existing one. +func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService { + s.create = &create + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: The template definition. +func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService { + s.bodyJson = body + return s +} + +// BodyString is documented as: The template definition. +func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.order != nil { + params.Set("order", fmt.Sprintf("%v", s.order)) + } + if s.create != nil { + params.Set("create", fmt.Sprintf("%v", *s.create)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutTemplateService) Do(ctx context.Context) (*IndicesPutTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesPutTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do. +type IndicesPutTemplateResponse struct { + Acknowledged bool `json:"acknowledged,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go b/vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go new file mode 100644 index 000000000..a221481f6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go @@ -0,0 +1,105 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// RefreshService explicitly refreshes one or more indices. +// See https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html. +type RefreshService struct { + client *Client + index []string + force *bool + pretty bool +} + +// NewRefreshService creates a new instance of RefreshService. +func NewRefreshService(client *Client) *RefreshService { + builder := &RefreshService{ + client: client, + } + return builder +} + +// Index specifies the indices to refresh. +func (s *RefreshService) Index(index ...string) *RefreshService { + s.index = append(s.index, index...) + return s +} + +// Force forces a refresh. +func (s *RefreshService) Force(force bool) *RefreshService { + s.force = &force + return s +} + +// Pretty asks Elasticsearch to return indented JSON. +func (s *RefreshService) Pretty(pretty bool) *RefreshService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *RefreshService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_refresh", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_refresh" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + return path, params, nil +} + +// Do executes the request. +func (s *RefreshService) Do(ctx context.Context) (*RefreshResult, error) { + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(RefreshResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a refresh request. + +// RefreshResult is the outcome of RefreshService.Do. +type RefreshResult struct { + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_refresh_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_refresh_test.go new file mode 100644 index 000000000..6d486c0ab --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_refresh_test.go @@ -0,0 +1,82 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestRefreshBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_refresh", + }, + { + []string{"index1"}, + "/index1/_refresh", + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_refresh", + }, + } + + for i, test := range tests { + path, _, err := client.Refresh().Index(test.Indices...).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestRefresh(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add some documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Refresh indices + res, err := client.Refresh(testIndexName, testIndexName2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected result; got nil") + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go b/vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go new file mode 100644 index 000000000..133fd2f06 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go @@ -0,0 +1,268 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesRolloverService rolls an alias over to a new index when the +// existing index is considered to be too large or too old. +// +// It is documented at +// https://www.elastic.co/guide/en/elasticsearch/reference/5.0/indices-rollover-index.html. +type IndicesRolloverService struct { + client *Client + pretty bool + dryRun bool + newIndex string + alias string + masterTimeout string + timeout string + waitForActiveShards string + conditions map[string]interface{} + settings map[string]interface{} + mappings map[string]interface{} + bodyJson interface{} + bodyString string +} + +// NewIndicesRolloverService creates a new IndicesRolloverService. +func NewIndicesRolloverService(client *Client) *IndicesRolloverService { + return &IndicesRolloverService{ + client: client, + conditions: make(map[string]interface{}), + settings: make(map[string]interface{}), + mappings: make(map[string]interface{}), + } +} + +// Alias is the name of the alias to rollover. +func (s *IndicesRolloverService) Alias(alias string) *IndicesRolloverService { + s.alias = alias + return s +} + +// NewIndex is the name of the rollover index. +func (s *IndicesRolloverService) NewIndex(newIndex string) *IndicesRolloverService { + s.newIndex = newIndex + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesRolloverService) MasterTimeout(masterTimeout string) *IndicesRolloverService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout sets an explicit operation timeout. +func (s *IndicesRolloverService) Timeout(timeout string) *IndicesRolloverService { + s.timeout = timeout + return s +} + +// WaitForActiveShards sets the number of active shards to wait for on the +// newly created rollover index before the operation returns. +func (s *IndicesRolloverService) WaitForActiveShards(waitForActiveShards string) *IndicesRolloverService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesRolloverService) Pretty(pretty bool) *IndicesRolloverService { + s.pretty = pretty + return s +} + +// DryRun, when set, specifies that only conditions are checked without +// performing the actual rollover. +func (s *IndicesRolloverService) DryRun(dryRun bool) *IndicesRolloverService { + s.dryRun = dryRun + return s +} + +// Conditions allows to specify all conditions as a dictionary. +func (s *IndicesRolloverService) Conditions(conditions map[string]interface{}) *IndicesRolloverService { + s.conditions = conditions + return s +} + +// AddCondition adds a condition to the rollover decision. +func (s *IndicesRolloverService) AddCondition(name string, value interface{}) *IndicesRolloverService { + s.conditions[name] = value + return s +} + +// AddMaxIndexAgeCondition adds a condition to set the max index age. +func (s *IndicesRolloverService) AddMaxIndexAgeCondition(time string) *IndicesRolloverService { + s.conditions["max_age"] = time + return s +} + +// AddMaxIndexDocsCondition adds a condition to set the max documents in the index. +func (s *IndicesRolloverService) AddMaxIndexDocsCondition(docs int64) *IndicesRolloverService { + s.conditions["max_docs"] = docs + return s +} + +// Settings adds the index settings. +func (s *IndicesRolloverService) Settings(settings map[string]interface{}) *IndicesRolloverService { + s.settings = settings + return s +} + +// AddSetting adds an index setting. +func (s *IndicesRolloverService) AddSetting(name string, value interface{}) *IndicesRolloverService { + s.settings[name] = value + return s +} + +// Mappings adds the index mappings. +func (s *IndicesRolloverService) Mappings(mappings map[string]interface{}) *IndicesRolloverService { + s.mappings = mappings + return s +} + +// AddMapping adds a mapping for the given type. +func (s *IndicesRolloverService) AddMapping(typ string, mapping interface{}) *IndicesRolloverService { + s.mappings[typ] = mapping + return s +} + +// BodyJson sets the conditions that needs to be met for executing rollover, +// specified as a serializable JSON instance which is sent as the body of +// the request. +func (s *IndicesRolloverService) BodyJson(body interface{}) *IndicesRolloverService { + s.bodyJson = body + return s +} + +// BodyString sets the conditions that needs to be met for executing rollover, +// specified as a string which is sent as the body of the request. +func (s *IndicesRolloverService) BodyString(body string) *IndicesRolloverService { + s.bodyString = body + return s +} + +// getBody returns the body of the request, if not explicitly set via +// BodyJson or BodyString. +func (s *IndicesRolloverService) getBody() interface{} { + body := make(map[string]interface{}) + if len(s.conditions) > 0 { + body["conditions"] = s.conditions + } + if len(s.settings) > 0 { + body["settings"] = s.settings + } + if len(s.mappings) > 0 { + body["mappings"] = s.mappings + } + return body +} + +// buildURL builds the URL for the operation. +func (s *IndicesRolloverService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if s.newIndex != "" { + path, err = uritemplates.Expand("/{alias}/_rollover/{new_index}", map[string]string{ + "alias": s.alias, + "new_index": s.newIndex, + }) + } else { + path, err = uritemplates.Expand("/{alias}/_rollover", map[string]string{ + "alias": s.alias, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.dryRun { + params.Set("dry_run", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesRolloverService) Validate() error { + var invalid []string + if s.alias == "" { + invalid = append(invalid, "Alias") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesRolloverService) Do(ctx context.Context) (*IndicesRolloverResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } else { + body = s.getBody() + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesRolloverResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesRolloverResponse is the response of IndicesRolloverService.Do. +type IndicesRolloverResponse struct { + OldIndex string `json:"old_index"` + NewIndex string `json:"new_index"` + RolledOver bool `json:"rolled_over"` + DryRun bool `json:"dry_run"` + Acknowledged bool `json:"acknowledged"` + ShardsAcknowledged bool `json:"shards_acknowledged"` + Conditions map[string]bool `json:"conditions"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_rollover_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_rollover_test.go new file mode 100644 index 000000000..77ac1e851 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_rollover_test.go @@ -0,0 +1,116 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestIndicesRolloverBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Alias string + NewIndex string + Expected string + }{ + { + "logs_write", + "", + "/logs_write/_rollover", + }, + { + "logs_write", + "my_new_index_name", + "/logs_write/_rollover/my_new_index_name", + }, + } + + for i, test := range tests { + path, _, err := client.RolloverIndex(test.Alias).NewIndex(test.NewIndex).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestIndicesRolloverBodyConditions(t *testing.T) { + client := setupTestClient(t) + svc := NewIndicesRolloverService(client). + Conditions(map[string]interface{}{ + "max_age": "7d", + "max_docs": 1000, + }) + data, err := json.Marshal(svc.getBody()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"conditions":{"max_age":"7d","max_docs":1000}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestIndicesRolloverBodyAddCondition(t *testing.T) { + client := setupTestClient(t) + svc := NewIndicesRolloverService(client). + AddCondition("max_age", "7d"). + AddCondition("max_docs", 1000) + data, err := json.Marshal(svc.getBody()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"conditions":{"max_age":"7d","max_docs":1000}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestIndicesRolloverBodyAddPredefinedConditions(t *testing.T) { + client := setupTestClient(t) + svc := NewIndicesRolloverService(client). + AddMaxIndexAgeCondition("2d"). + AddMaxIndexDocsCondition(1000000) + data, err := json.Marshal(svc.getBody()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"conditions":{"max_age":"2d","max_docs":1000000}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestIndicesRolloverBodyComplex(t *testing.T) { + client := setupTestClient(t) + svc := NewIndicesRolloverService(client). + AddMaxIndexAgeCondition("2d"). + AddMaxIndexDocsCondition(1000000). + AddSetting("index.number_of_shards", 2). + AddMapping("tweet", map[string]interface{}{ + "properties": map[string]interface{}{ + "user": map[string]interface{}{ + "type": "keyword", + }, + }, + }) + data, err := json.Marshal(svc.getBody()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"conditions":{"max_age":"2d","max_docs":1000000},"mappings":{"tweet":{"properties":{"user":{"type":"keyword"}}}},"settings":{"index.number_of_shards":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go b/vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go new file mode 100644 index 000000000..162bd3986 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go @@ -0,0 +1,174 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesShrinkService allows you to shrink an existing index into a +// new index with fewer primary shards. +// +// For further details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.0/indices-shrink-index.html. +type IndicesShrinkService struct { + client *Client + pretty bool + source string + target string + masterTimeout string + timeout string + waitForActiveShards string + bodyJson interface{} + bodyString string +} + +// NewIndicesShrinkService creates a new IndicesShrinkService. +func NewIndicesShrinkService(client *Client) *IndicesShrinkService { + return &IndicesShrinkService{ + client: client, + } +} + +// Source is the name of the source index to shrink. +func (s *IndicesShrinkService) Source(source string) *IndicesShrinkService { + s.source = source + return s +} + +// Target is the name of the target index to shrink into. +func (s *IndicesShrinkService) Target(target string) *IndicesShrinkService { + s.target = target + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesShrinkService) MasterTimeout(masterTimeout string) *IndicesShrinkService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesShrinkService) Timeout(timeout string) *IndicesShrinkService { + s.timeout = timeout + return s +} + +// WaitForActiveShards sets the number of active shards to wait for on +// the shrunken index before the operation returns. +func (s *IndicesShrinkService) WaitForActiveShards(waitForActiveShards string) *IndicesShrinkService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesShrinkService) Pretty(pretty bool) *IndicesShrinkService { + s.pretty = pretty + return s +} + +// BodyJson is the configuration for the target index (`settings` and `aliases`) +// defined as a JSON-serializable instance to be sent as the request body. +func (s *IndicesShrinkService) BodyJson(body interface{}) *IndicesShrinkService { + s.bodyJson = body + return s +} + +// BodyString is the configuration for the target index (`settings` and `aliases`) +// defined as a string to send as the request body. +func (s *IndicesShrinkService) BodyString(body string) *IndicesShrinkService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesShrinkService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{source}/_shrink/{target}", map[string]string{ + "source": s.source, + "target": s.target, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesShrinkService) Validate() error { + var invalid []string + if s.source == "" { + invalid = append(invalid, "Source") + } + if s.target == "" { + invalid = append(invalid, "Target") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesShrinkService) Do(ctx context.Context) (*IndicesShrinkResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesShrinkResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesShrinkResponse is the response of IndicesShrinkService.Do. +type IndicesShrinkResponse struct { + Acknowledged bool `json:"acknowledged"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_shrink_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_shrink_test.go new file mode 100644 index 000000000..06ab7d923 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_shrink_test.go @@ -0,0 +1,34 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesShrinkBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Source string + Target string + Expected string + }{ + { + "my_source_index", + "my_target_index", + "/my_source_index/_shrink/my_target_index", + }, + } + + for i, test := range tests { + path, _, err := client.ShrinkIndex(test.Source, test.Target).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_stats.go b/vendor/gopkg.in/olivere/elastic.v5/indices_stats.go new file mode 100644 index 000000000..7d7e94a9d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_stats.go @@ -0,0 +1,386 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesStatsService provides stats on various metrics of one or more +// indices. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-stats.html. +type IndicesStatsService struct { + client *Client + pretty bool + metric []string + index []string + level string + types []string + completionFields []string + fielddataFields []string + fields []string + groups []string + human *bool +} + +// NewIndicesStatsService creates a new IndicesStatsService. +func NewIndicesStatsService(client *Client) *IndicesStatsService { + return &IndicesStatsService{ + client: client, + index: make([]string, 0), + metric: make([]string, 0), + completionFields: make([]string, 0), + fielddataFields: make([]string, 0), + fields: make([]string, 0), + groups: make([]string, 0), + types: make([]string, 0), + } +} + +// Metric limits the information returned the specific metrics. Options are: +// docs, store, indexing, get, search, completion, fielddata, flush, merge, +// query_cache, refresh, suggest, and warmer. +func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService { + s.metric = append(s.metric, metric...) + return s +} + +// Index is the list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesStatsService) Index(indices ...string) *IndicesStatsService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types for the `indexing` index metric. +func (s *IndicesStatsService) Type(types ...string) *IndicesStatsService { + s.types = append(s.types, types...) + return s +} + +// Level returns stats aggregated at cluster, index or shard level. +func (s *IndicesStatsService) Level(level string) *IndicesStatsService { + s.level = level + return s +} + +// CompletionFields is a list of fields for `fielddata` and `suggest` +// index metric (supports wildcards). +func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService { + s.completionFields = append(s.completionFields, completionFields...) + return s +} + +// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards). +func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService { + s.fielddataFields = append(s.fielddataFields, fielddataFields...) + return s +} + +// Fields is a list of fields for `fielddata` and `completion` index metric +// (supports wildcards). +func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService { + s.fields = append(s.fields, fields...) + return s +} + +// Groups is a list of search groups for `search` index metric. +func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService { + s.groups = append(s.groups, groups...) + return s +} + +// Human indicates whether to return time and byte values in human-readable format.. +func (s *IndicesStatsService) Human(human bool) *IndicesStatsService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesStatsService) buildURL() (string, url.Values, error) { + var err error + var path string + if len(s.index) > 0 && len(s.metric) > 0 { + path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{ + "index": strings.Join(s.index, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_stats", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.metric) > 0 { + path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{ + "metric": strings.Join(s.metric, ","), + }) + } else { + path = "/_stats" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.groups) > 0 { + params.Set("groups", strings.Join(s.groups, ",")) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.level != "" { + params.Set("level", s.level) + } + if len(s.types) > 0 { + params.Set("types", strings.Join(s.types, ",")) + } + if len(s.completionFields) > 0 { + params.Set("completion_fields", strings.Join(s.completionFields, ",")) + } + if len(s.fielddataFields) > 0 { + params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesStatsService) Do(ctx context.Context) (*IndicesStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesStatsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesStatsResponse is the response of IndicesStatsService.Do. +type IndicesStatsResponse struct { + // Shards provides information returned from shards. + Shards shardsInfo `json:"_shards"` + + // All provides summary stats about all indices. + All *IndexStats `json:"_all,omitempty"` + + // Indices provides a map into the stats of an index. The key of the + // map is the index name. + Indices map[string]*IndexStats `json:"indices,omitempty"` +} + +// IndexStats is index stats for a specific index. +type IndexStats struct { + Primaries *IndexStatsDetails `json:"primaries,omitempty"` + Total *IndexStatsDetails `json:"total,omitempty"` +} + +type IndexStatsDetails struct { + Docs *IndexStatsDocs `json:"docs,omitempty"` + Store *IndexStatsStore `json:"store,omitempty"` + Indexing *IndexStatsIndexing `json:"indexing,omitempty"` + Get *IndexStatsGet `json:"get,omitempty"` + Search *IndexStatsSearch `json:"search,omitempty"` + Merges *IndexStatsMerges `json:"merges,omitempty"` + Refresh *IndexStatsRefresh `json:"refresh,omitempty"` + Flush *IndexStatsFlush `json:"flush,omitempty"` + Warmer *IndexStatsWarmer `json:"warmer,omitempty"` + FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"` + IdCache *IndexStatsIdCache `json:"id_cache,omitempty"` + Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"` + Percolate *IndexStatsPercolate `json:"percolate,omitempty"` + Completion *IndexStatsCompletion `json:"completion,omitempty"` + Segments *IndexStatsSegments `json:"segments,omitempty"` + Translog *IndexStatsTranslog `json:"translog,omitempty"` + Suggest *IndexStatsSuggest `json:"suggest,omitempty"` + QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"` +} + +type IndexStatsDocs struct { + Count int64 `json:"count,omitempty"` + Deleted int64 `json:"deleted,omitempty"` +} + +type IndexStatsStore struct { + Size string `json:"size,omitempty"` // human size, e.g. 119.3mb + SizeInBytes int64 `json:"size_in_bytes,omitempty"` + ThrottleTime string `json:"throttle_time,omitempty"` // human time, e.g. 0s + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` +} + +type IndexStatsIndexing struct { + IndexTotal int64 `json:"index_total,omitempty"` + IndexTime string `json:"index_time,omitempty"` + IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"` + IndexCurrent int64 `json:"index_current,omitempty"` + DeleteTotal int64 `json:"delete_total,omitempty"` + DeleteTime string `json:"delete_time,omitempty"` + DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"` + DeleteCurrent int64 `json:"delete_current,omitempty"` + NoopUpdateTotal int64 `json:"noop_update_total,omitempty"` + IsThrottled bool `json:"is_throttled,omitempty"` + ThrottleTime string `json:"throttle_time,omitempty"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` +} + +type IndexStatsGet struct { + Total int64 `json:"total,omitempty"` + GetTime string `json:"get_time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + ExistsTotal int64 `json:"exists_total,omitempty"` + ExistsTime string `json:"exists_time,omitempty"` + ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"` + MissingTotal int64 `json:"missing_total,omitempty"` + MissingTime string `json:"missing_time,omitempty"` + MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` +} + +type IndexStatsSearch struct { + OpenContexts int64 `json:"open_contexts,omitempty"` + QueryTotal int64 `json:"query_total,omitempty"` + QueryTime string `json:"query_time,omitempty"` + QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"` + QueryCurrent int64 `json:"query_current,omitempty"` + FetchTotal int64 `json:"fetch_total,omitempty"` + FetchTime string `json:"fetch_time,omitempty"` + FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"` + FetchCurrent int64 `json:"fetch_current,omitempty"` +} + +type IndexStatsMerges struct { + Current int64 `json:"current,omitempty"` + CurrentDocs int64 `json:"current_docs,omitempty"` + CurrentSize string `json:"current_size,omitempty"` + CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"` + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` + TotalDocs int64 `json:"total_docs,omitempty"` + TotalSize string `json:"total_size,omitempty"` + TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"` +} + +type IndexStatsRefresh struct { + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsFlush struct { + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsWarmer struct { + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsFilterCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` +} + +type IndexStatsIdCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` +} + +type IndexStatsFielddata struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` +} + +type IndexStatsPercolate struct { + Total int64 `json:"total,omitempty"` + GetTime string `json:"get_time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Queries int64 `json:"queries,omitempty"` +} + +type IndexStatsCompletion struct { + Size string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes,omitempty"` +} + +type IndexStatsSegments struct { + Count int64 `json:"count,omitempty"` + Memory string `json:"memory,omitempty"` + MemoryInBytes int64 `json:"memory_in_bytes,omitempty"` + IndexWriterMemory string `json:"index_writer_memory,omitempty"` + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"` + IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"` + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"` + VersionMapMemory string `json:"version_map_memory,omitempty"` + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"` + FixedBitSetMemory string `json:"fixed_bit_set,omitempty"` + FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"` +} + +type IndexStatsTranslog struct { + Operations int64 `json:"operations,omitempty"` + Size string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes,omitempty"` +} + +type IndexStatsSuggest struct { + Total int64 `json:"total,omitempty"` + Time string `json:"time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` +} + +type IndexStatsQueryCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` + HitCount int64 `json:"hit_count,omitempty"` + MissCount int64 `json:"miss_count,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_stats_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_stats_test.go new file mode 100644 index 000000000..367d63ba9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_stats_test.go @@ -0,0 +1,87 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestIndexStatsBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Metrics []string + Expected string + }{ + { + []string{}, + []string{}, + "/_stats", + }, + { + []string{"index1"}, + []string{}, + "/index1/_stats", + }, + { + []string{}, + []string{"metric1"}, + "/_stats/metric1", + }, + { + []string{"index1"}, + []string{"metric1"}, + "/index1/_stats/metric1", + }, + { + []string{"index1", "index2"}, + []string{"metric1"}, + "/index1%2Cindex2/_stats/metric1", + }, + { + []string{"index1", "index2"}, + []string{"metric1", "metric2"}, + "/index1%2Cindex2/_stats/metric1%2Cmetric2", + }, + } + + for i, test := range tests { + path, _, err := client.IndexStats().Index(test.Indices...).Metric(test.Metrics...).buildURL() + if err != nil { + t.Fatalf("case #%d: %v", i+1, err) + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestIndexStats(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + stats, err := client.IndexStats(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if stats == nil { + t.Fatalf("expected response; got: %v", stats) + } + stat, found := stats.Indices[testIndexName] + if !found { + t.Fatalf("expected stats about index %q; got: %v", testIndexName, found) + } + if stat.Total == nil { + t.Fatalf("expected total to be != nil; got: %v", stat.Total) + } + if stat.Total.Docs == nil { + t.Fatalf("expected total docs to be != nil; got: %v", stat.Total.Docs) + } + if stat.Total.Docs.Count == 0 { + t.Fatalf("expected total docs count to be > 0; got: %d", stat.Total.Docs.Count) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go new file mode 100644 index 000000000..641c1eb26 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go @@ -0,0 +1,124 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestDeletePipelineService deletes pipelines by ID. +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.0/delete-pipeline-api.html. +type IngestDeletePipelineService struct { + client *Client + pretty bool + id string + masterTimeout string + timeout string +} + +// NewIngestDeletePipelineService creates a new IngestDeletePipelineService. +func NewIngestDeletePipelineService(client *Client) *IngestDeletePipelineService { + return &IngestDeletePipelineService{ + client: client, + } +} + +// Id is documented as: Pipeline ID. +func (s *IngestDeletePipelineService) Id(id string) *IngestDeletePipelineService { + s.id = id + return s +} + +// MasterTimeout is documented as: Explicit operation timeout for connection to master node. +func (s *IngestDeletePipelineService) MasterTimeout(masterTimeout string) *IngestDeletePipelineService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout is documented as: Explicit operation timeout. +func (s *IngestDeletePipelineService) Timeout(timeout string) *IngestDeletePipelineService { + s.timeout = timeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestDeletePipelineService) Pretty(pretty bool) *IngestDeletePipelineService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestDeletePipelineService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestDeletePipelineService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IngestDeletePipelineService) Do(ctx context.Context) (*IngestDeletePipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IngestDeletePipelineResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestDeletePipelineResponse is the response of IngestDeletePipelineService.Do. +type IngestDeletePipelineResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline_test.go new file mode 100644 index 000000000..1163e0f17 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline_test.go @@ -0,0 +1,31 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIngestDeletePipelineURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Id string + Expected string + }{ + { + "my-pipeline-id", + "/_ingest/pipeline/my-pipeline-id", + }, + } + + for _, test := range tests { + path, _, err := client.IngestDeletePipeline(test.Id).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go new file mode 100644 index 000000000..ecff1a862 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go @@ -0,0 +1,118 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestGetPipelineService returns pipelines based on ID. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/get-pipeline-api.html +// for documentation. +type IngestGetPipelineService struct { + client *Client + pretty bool + id []string + masterTimeout string +} + +// NewIngestGetPipelineService creates a new IngestGetPipelineService. +func NewIngestGetPipelineService(client *Client) *IngestGetPipelineService { + return &IngestGetPipelineService{ + client: client, + } +} + +// Id is a list of pipeline ids. Wildcards supported. +func (s *IngestGetPipelineService) Id(id ...string) *IngestGetPipelineService { + s.id = append(s.id, id...) + return s +} + +// MasterTimeout is an explicit operation timeout for connection to master node. +func (s *IngestGetPipelineService) MasterTimeout(masterTimeout string) *IngestGetPipelineService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestGetPipelineService) Pretty(pretty bool) *IngestGetPipelineService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestGetPipelineService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if len(s.id) > 0 { + path, err = uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ + "id": strings.Join(s.id, ","), + }) + } else { + path = "/_ingest/pipeline" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestGetPipelineService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IngestGetPipelineService) Do(ctx context.Context) (IngestGetPipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret IngestGetPipelineResponse + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestGetPipelineResponse is the response of IngestGetPipelineService.Do. +type IngestGetPipelineResponse map[string]*IngestGetPipeline + +type IngestGetPipeline struct { + ID string `json:"id"` + Config map[string]interface{} `json:"config"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline_test.go new file mode 100644 index 000000000..ddafe9fce --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline_test.go @@ -0,0 +1,118 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "context" + "testing" +) + +func TestIngestGetPipelineURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Id []string + Expected string + }{ + { + nil, + "/_ingest/pipeline", + }, + { + []string{"my-pipeline-id"}, + "/_ingest/pipeline/my-pipeline-id", + }, + { + []string{"*"}, + "/_ingest/pipeline/%2A", + }, + { + []string{"pipeline-1", "pipeline-2"}, + "/_ingest/pipeline/pipeline-1%2Cpipeline-2", + }, + } + + for _, test := range tests { + path, _, err := client.IngestGetPipeline(test.Id...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIngestLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + // Get all pipelines (returns 404 that indicates an error) + getres, err := client.IngestGetPipeline().Do(context.TODO()) + if err == nil { + t.Fatal(err) + } + if getres != nil { + t.Fatalf("expected no response, got %v", getres) + } + + // Add a pipeline + pipelineDef := `{ + "description" : "reset retweets", + "processors" : [ + { + "set" : { + "field": "retweets", + "value": 0 + } + } + ] +}` + putres, err := client.IngestPutPipeline("my-pipeline").BodyString(pipelineDef).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if putres == nil { + t.Fatal("expected response, got nil") + } + if want, have := true, putres.Acknowledged; want != have { + t.Fatalf("expected ack = %v, got %v", want, have) + } + + // Get all pipelines again + getres, err = client.IngestGetPipeline().Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if want, have := 1, len(getres); want != have { + t.Fatalf("expected %d pipelines, got %d", want, have) + } + if _, found := getres["my-pipeline"]; !found { + t.Fatalf("expected to find pipline with id %q", "my-pipeline") + } + + // Get all pipeline by ID + getres, err = client.IngestGetPipeline("my-pipeline").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if want, have := 1, len(getres); want != have { + t.Fatalf("expected %d pipelines, got %d", want, have) + } + if _, found := getres["my-pipeline"]; !found { + t.Fatalf("expected to find pipline with id %q", "my-pipeline") + } + + // Delete pipeline + delres, err := client.IngestDeletePipeline("my-pipeline").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if delres == nil { + t.Fatal("expected response, got nil") + } + if want, have := true, delres.Acknowledged; want != have { + t.Fatalf("expected ack = %v, got %v", want, have) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go new file mode 100644 index 000000000..723a8ad78 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go @@ -0,0 +1,152 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestPutPipelineService adds pipelines and updates existing pipelines in +// the cluster. +// +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.0/put-pipeline-api.html. +type IngestPutPipelineService struct { + client *Client + pretty bool + id string + masterTimeout string + timeout string + bodyJson interface{} + bodyString string +} + +// NewIngestPutPipelineService creates a new IngestPutPipelineService. +func NewIngestPutPipelineService(client *Client) *IngestPutPipelineService { + return &IngestPutPipelineService{ + client: client, + } +} + +// Id is the pipeline ID. +func (s *IngestPutPipelineService) Id(id string) *IngestPutPipelineService { + s.id = id + return s +} + +// MasterTimeout is an explicit operation timeout for connection to master node. +func (s *IngestPutPipelineService) MasterTimeout(masterTimeout string) *IngestPutPipelineService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout specifies an explicit operation timeout. +func (s *IngestPutPipelineService) Timeout(timeout string) *IngestPutPipelineService { + s.timeout = timeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestPutPipelineService) Pretty(pretty bool) *IngestPutPipelineService { + s.pretty = pretty + return s +} + +// BodyJson is the ingest definition, defined as a JSON-serializable document. +// Use e.g. a map[string]interface{} here. +func (s *IngestPutPipelineService) BodyJson(body interface{}) *IngestPutPipelineService { + s.bodyJson = body + return s +} + +// BodyString is the ingest definition, specified as a string. +func (s *IngestPutPipelineService) BodyString(body string) *IngestPutPipelineService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestPutPipelineService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestPutPipelineService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IngestPutPipelineService) Do(ctx context.Context) (*IngestPutPipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IngestPutPipelineResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestPutPipelineResponse is the response of IngestPutPipelineService.Do. +type IngestPutPipelineResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline_test.go new file mode 100644 index 000000000..9609f2f53 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline_test.go @@ -0,0 +1,31 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIngestPutPipelineURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Id string + Expected string + }{ + { + "my-pipeline-id", + "/_ingest/pipeline/my-pipeline-id", + }, + } + + for _, test := range tests { + path, _, err := client.IngestPutPipeline(test.Id).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go new file mode 100644 index 000000000..212327dfb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go @@ -0,0 +1,157 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestSimulatePipelineService executes a specific pipeline against the set of +// documents provided in the body of the request. +// +// The API is documented at +// https://www.elastic.co/guide/en/elasticsearch/reference/5.0/simulate-pipeline-api.html. +type IngestSimulatePipelineService struct { + client *Client + pretty bool + id string + verbose *bool + bodyJson interface{} + bodyString string +} + +// NewIngestSimulatePipelineService creates a new IngestSimulatePipeline. +func NewIngestSimulatePipelineService(client *Client) *IngestSimulatePipelineService { + return &IngestSimulatePipelineService{ + client: client, + } +} + +// Id specifies the pipeline ID. +func (s *IngestSimulatePipelineService) Id(id string) *IngestSimulatePipelineService { + s.id = id + return s +} + +// Verbose mode. Display data output for each processor in executed pipeline. +func (s *IngestSimulatePipelineService) Verbose(verbose bool) *IngestSimulatePipelineService { + s.verbose = &verbose + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestSimulatePipelineService) Pretty(pretty bool) *IngestSimulatePipelineService { + s.pretty = pretty + return s +} + +// BodyJson is the ingest definition, defined as a JSON-serializable simulate +// definition. Use e.g. a map[string]interface{} here. +func (s *IngestSimulatePipelineService) BodyJson(body interface{}) *IngestSimulatePipelineService { + s.bodyJson = body + return s +} + +// BodyString is the simulate definition, defined as a string. +func (s *IngestSimulatePipelineService) BodyString(body string) *IngestSimulatePipelineService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestSimulatePipelineService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if s.id != "" { + path, err = uritemplates.Expand("/_ingest/pipeline/{id}/_simulate", map[string]string{ + "id": s.id, + }) + } else { + path = "/_ingest/pipeline/_simulate" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.verbose != nil { + params.Set("verbose", fmt.Sprintf("%v", *s.verbose)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestSimulatePipelineService) Validate() error { + var invalid []string + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IngestSimulatePipelineService) Do(ctx context.Context) (*IngestSimulatePipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IngestSimulatePipelineResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestSimulatePipelineResponse is the response of IngestSimulatePipeline.Do. +type IngestSimulatePipelineResponse struct { + Docs []*IngestSimulateDocumentResult `json:"docs"` +} + +type IngestSimulateDocumentResult struct { + Doc map[string]interface{} `json:"doc"` + ProcessorResults []*IngestSimulateProcessorResult `json:"processor_results"` +} + +type IngestSimulateProcessorResult struct { + ProcessorTag string `json:"tag"` + Doc map[string]interface{} `json:"doc"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline_test.go new file mode 100644 index 000000000..a254f85ff --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline_test.go @@ -0,0 +1,35 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIngestSimulatePipelineURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Id string + Expected string + }{ + { + "", + "/_ingest/pipeline/_simulate", + }, + { + "my-pipeline-id", + "/_ingest/pipeline/my-pipeline-id/_simulate", + }, + } + + for _, test := range tests { + path, _, err := client.IngestSimulatePipeline().Id(test.Id).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/inner_hit.go b/vendor/gopkg.in/olivere/elastic.v5/inner_hit.go new file mode 100644 index 000000000..2200bcd00 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/inner_hit.go @@ -0,0 +1,160 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// InnerHit implements a simple join for parent/child, nested, and even +// top-level documents in Elasticsearch. +// It is an experimental feature for Elasticsearch versions 1.5 (or greater). +// See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html +// for documentation. +// +// See the tests for SearchSource, HasChildFilter, HasChildQuery, +// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery +// for usage examples. +type InnerHit struct { + source *SearchSource + path string + typ string + + name string +} + +// NewInnerHit creates a new InnerHit. +func NewInnerHit() *InnerHit { + return &InnerHit{source: NewSearchSource()} +} + +func (hit *InnerHit) Path(path string) *InnerHit { + hit.path = path + return hit +} + +func (hit *InnerHit) Type(typ string) *InnerHit { + hit.typ = typ + return hit +} + +func (hit *InnerHit) Query(query Query) *InnerHit { + hit.source.Query(query) + return hit +} + +func (hit *InnerHit) From(from int) *InnerHit { + hit.source.From(from) + return hit +} + +func (hit *InnerHit) Size(size int) *InnerHit { + hit.source.Size(size) + return hit +} + +func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit { + hit.source.TrackScores(trackScores) + return hit +} + +func (hit *InnerHit) Explain(explain bool) *InnerHit { + hit.source.Explain(explain) + return hit +} + +func (hit *InnerHit) Version(version bool) *InnerHit { + hit.source.Version(version) + return hit +} + +func (hit *InnerHit) StoredField(storedFieldName string) *InnerHit { + hit.source.StoredField(storedFieldName) + return hit +} + +func (hit *InnerHit) StoredFields(storedFieldNames ...string) *InnerHit { + hit.source.StoredFields(storedFieldNames...) + return hit +} + +func (hit *InnerHit) NoStoredFields() *InnerHit { + hit.source.NoStoredFields() + return hit +} + +func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit { + hit.source.FetchSource(fetchSource) + return hit +} + +func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit { + hit.source.FetchSourceContext(fetchSourceContext) + return hit +} + +func (hit *InnerHit) DocvalueFields(docvalueFields ...string) *InnerHit { + hit.source.DocvalueFields(docvalueFields...) + return hit +} + +func (hit *InnerHit) DocvalueField(docvalueField string) *InnerHit { + hit.source.DocvalueField(docvalueField) + return hit +} + +func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit { + hit.source.ScriptFields(scriptFields...) + return hit +} + +func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit { + hit.source.ScriptField(scriptField) + return hit +} + +func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit { + hit.source.Sort(field, ascending) + return hit +} + +func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit { + hit.source.SortWithInfo(info) + return hit +} + +func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit { + hit.source.SortBy(sorter...) + return hit +} + +func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit { + hit.source.Highlight(highlight) + return hit +} + +func (hit *InnerHit) Highlighter() *Highlight { + return hit.source.Highlighter() +} + +func (hit *InnerHit) Name(name string) *InnerHit { + hit.name = name + return hit +} + +func (hit *InnerHit) Source() (interface{}, error) { + src, err := hit.source.Source() + if err != nil { + return nil, err + } + source, ok := src.(map[string]interface{}) + if !ok { + return nil, nil + } + + // Notice that hit.typ and hit.path are not exported here. + // They are only used with SearchSource and serialized there. + + if hit.name != "" { + source["name"] = hit.name + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_not_test.go b/vendor/gopkg.in/olivere/elastic.v5/inner_hit_test.go similarity index 51% rename from vendor/gopkg.in/olivere/elastic.v3/search_queries_not_test.go rename to vendor/gopkg.in/olivere/elastic.v5/inner_hit_test.go index 4c4f1c0ab..fd9bd2e8a 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_not_test.go +++ b/vendor/gopkg.in/olivere/elastic.v5/inner_hit_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -9,9 +9,9 @@ import ( "testing" ) -func TestNotQuery(t *testing.T) { - f := NewNotQuery(NewTermQuery("user", "olivere")) - src, err := f.Source() +func TestInnerHitEmpty(t *testing.T) { + hit := NewInnerHit() + src, err := hit.Source() if err != nil { t.Fatal(err) } @@ -20,17 +20,15 @@ func TestNotQuery(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"not":{"query":{"term":{"user":"olivere"}}}}` + expected := `{}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } } -func TestNotQueryWithParams(t *testing.T) { - postDateFilter := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01") - f := NewNotQuery(postDateFilter) - f = f.QueryName("MyQueryName") - src, err := f.Source() +func TestInnerHitWithName(t *testing.T) { + hit := NewInnerHit().Name("comments") + src, err := hit.Source() if err != nil { t.Fatal(err) } @@ -39,7 +37,7 @@ func TestNotQueryWithParams(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"not":{"_name":"MyQueryName","query":{"range":{"postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}}}` + expected := `{"name":"comments"}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/vendor/gopkg.in/olivere/elastic.v5/logger.go b/vendor/gopkg.in/olivere/elastic.v5/logger.go new file mode 100644 index 000000000..095eb4cd4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/logger.go @@ -0,0 +1,10 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Logger specifies the interface for all log operations. +type Logger interface { + Printf(format string, v ...interface{}) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/mget.go b/vendor/gopkg.in/olivere/elastic.v5/mget.go new file mode 100644 index 000000000..dcf72c624 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/mget.go @@ -0,0 +1,253 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// MgetService allows to get multiple documents based on an index, +// type (optional) and id (possibly routing). The response includes +// a docs array with all the fetched documents, each element similar +// in structure to a document provided by the Get API. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-multi-get.html +// for details. +type MgetService struct { + client *Client + pretty bool + preference string + realtime *bool + refresh string + routing string + storedFields []string + items []*MultiGetItem +} + +// NewMgetService initializes a new Multi GET API request call. +func NewMgetService(client *Client) *MgetService { + builder := &MgetService{ + client: client, + } + return builder +} + +// Preference specifies the node or shard the operation should be performed +// on (default: random). +func (s *MgetService) Preference(preference string) *MgetService { + s.preference = preference + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *MgetService) Refresh(refresh string) *MgetService { + s.refresh = refresh + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *MgetService) Realtime(realtime bool) *MgetService { + s.realtime = &realtime + return s +} + +// Routing is the specific routing value. +func (s *MgetService) Routing(routing string) *MgetService { + s.routing = routing + return s +} + +// StoredFields is a list of fields to return in the response. +func (s *MgetService) StoredFields(storedFields ...string) *MgetService { + s.storedFields = append(s.storedFields, storedFields...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *MgetService) Pretty(pretty bool) *MgetService { + s.pretty = pretty + return s +} + +// Add an item to the request. +func (s *MgetService) Add(items ...*MultiGetItem) *MgetService { + s.items = append(s.items, items...) + return s +} + +// Source returns the request body, which will be serialized into JSON. +func (s *MgetService) Source() (interface{}, error) { + source := make(map[string]interface{}) + items := make([]interface{}, len(s.items)) + for i, item := range s.items { + src, err := item.Source() + if err != nil { + return nil, err + } + items[i] = src + } + source["docs"] = items + return source, nil +} + +// Do executes the request. +func (s *MgetService) Do(ctx context.Context) (*MgetResponse, error) { + // Build url + path := "/_mget" + + params := make(url.Values) + if s.realtime != nil { + params.Add("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.preference != "" { + params.Add("preference", s.preference) + } + if s.refresh != "" { + params.Add("refresh", s.refresh) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) + } + + // Set body + body, err := s.Source() + if err != nil { + return nil, err + } + + // Get response + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(MgetResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Multi Get Item -- + +// MultiGetItem is a single document to retrieve via the MgetService. +type MultiGetItem struct { + index string + typ string + id string + routing string + storedFields []string + version *int64 // see org.elasticsearch.common.lucene.uid.Versions + versionType string // see org.elasticsearch.index.VersionType + fsc *FetchSourceContext +} + +// NewMultiGetItem initializes a new, single item for a Multi GET request. +func NewMultiGetItem() *MultiGetItem { + return &MultiGetItem{} +} + +// Index specifies the index name. +func (item *MultiGetItem) Index(index string) *MultiGetItem { + item.index = index + return item +} + +// Type specifies the type name. +func (item *MultiGetItem) Type(typ string) *MultiGetItem { + item.typ = typ + return item +} + +// Id specifies the identifier of the document. +func (item *MultiGetItem) Id(id string) *MultiGetItem { + item.id = id + return item +} + +// Routing is the specific routing value. +func (item *MultiGetItem) Routing(routing string) *MultiGetItem { + item.routing = routing + return item +} + +// StoredFields is a list of fields to return in the response. +func (item *MultiGetItem) StoredFields(storedFields ...string) *MultiGetItem { + item.storedFields = append(item.storedFields, storedFields...) + return item +} + +// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1), +// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions. +// The default in Elasticsearch is MatchAny (-3). +func (item *MultiGetItem) Version(version int64) *MultiGetItem { + item.version = &version + return item +} + +// VersionType can be "internal", "external", "external_gt", "external_gte", +// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source. +// It is "internal" by default. +func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem { + item.versionType = versionType + return item +} + +// FetchSource allows to specify source filtering. +func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem { + item.fsc = fetchSourceContext + return item +} + +// Source returns the serialized JSON to be sent to Elasticsearch as +// part of a MultiGet search. +func (item *MultiGetItem) Source() (interface{}, error) { + source := make(map[string]interface{}) + + source["_id"] = item.id + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.fsc != nil { + src, err := item.fsc.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + if item.routing != "" { + source["_routing"] = item.routing + } + if len(item.storedFields) > 0 { + source["stored_fields"] = strings.Join(item.storedFields, ",") + } + if item.version != nil { + source["version"] = fmt.Sprintf("%d", *item.version) + } + if item.versionType != "" { + source["version_type"] = item.versionType + } + + return source, nil +} + +// -- Result of a Multi Get request. + +// MgetResponse is the outcome of a Multi GET API request. +type MgetResponse struct { + Docs []*GetResult `json:"docs,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/mget_test.go b/vendor/gopkg.in/olivere/elastic.v5/mget_test.go new file mode 100644 index 000000000..30391dfed --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/mget_test.go @@ -0,0 +1,97 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestMultiGet(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add some documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Get documents 1 and 3 + res, err := client.MultiGet(). + Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("1")). + Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("3")). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected result to be != nil; got nil") + } + if res.Docs == nil { + t.Fatal("expected result docs to be != nil; got nil") + } + if len(res.Docs) != 2 { + t.Fatalf("expected to have 2 docs; got %d", len(res.Docs)) + } + + item := res.Docs[0] + if item.Error != nil { + t.Errorf("expected no error on item 0; got %v", item.Error) + } + if item.Source == nil { + t.Errorf("expected Source != nil; got %v", item.Source) + } + var doc tweet + if err := json.Unmarshal(*item.Source, &doc); err != nil { + t.Fatalf("expected to unmarshal item Source; got %v", err) + } + if doc.Message != tweet1.Message { + t.Errorf("expected Message of first tweet to be %q; got %q", tweet1.Message, doc.Message) + } + + item = res.Docs[1] + if item.Error != nil { + t.Errorf("expected no error on item 1; got %v", item.Error) + } + if item.Source == nil { + t.Errorf("expected Source != nil; got %v", item.Source) + } + if err := json.Unmarshal(*item.Source, &doc); err != nil { + t.Fatalf("expected to unmarshal item Source; got %v", err) + } + if doc.Message != tweet3.Message { + t.Errorf("expected Message of second tweet to be %q; got %q", tweet3.Message, doc.Message) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/msearch.go b/vendor/gopkg.in/olivere/elastic.v5/msearch.go new file mode 100644 index 000000000..a568acd92 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/msearch.go @@ -0,0 +1,98 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// MultiSearch executes one or more searches in one roundtrip. +// See http://www.elasticsearch.org/guide/reference/api/multi-search/ +type MultiSearchService struct { + client *Client + requests []*SearchRequest + indices []string + pretty bool + routing string + preference string +} + +func NewMultiSearchService(client *Client) *MultiSearchService { + builder := &MultiSearchService{ + client: client, + requests: make([]*SearchRequest, 0), + indices: make([]string, 0), + } + return builder +} + +func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService { + s.requests = append(s.requests, requests...) + return s +} + +func (s *MultiSearchService) Index(indices ...string) *MultiSearchService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService { + s.pretty = pretty + return s +} + +func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) { + // Build url + path := "/_msearch" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Set body + var lines []string + for _, sr := range s.requests { + // Set default indices if not specified in the request + if !sr.HasIndices() && len(s.indices) > 0 { + sr = sr.Index(s.indices...) + } + + header, err := json.Marshal(sr.header()) + if err != nil { + return nil, err + } + body, err := json.Marshal(sr.body()) + if err != nil { + return nil, err + } + lines = append(lines, string(header)) + lines = append(lines, string(body)) + } + body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n + + // Get response + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(MultiSearchResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +type MultiSearchResult struct { + Responses []*SearchResult `json:"responses,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go b/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go new file mode 100644 index 000000000..d2d1e1896 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go @@ -0,0 +1,199 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" + + "golang.org/x/net/context" +) + +func TestMultiSearch(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Spawn two search queries with one roundtrip + q1 := NewMatchAllQuery() + q2 := NewTermQuery("tags", "golang") + + sreq1 := NewSearchRequest().Index(testIndexName, testIndexName2). + Source(NewSearchSource().Query(q1).Size(10)) + sreq2 := NewSearchRequest().Index(testIndexName).Type("tweet"). + Source(NewSearchSource().Query(q2)) + + searchResult, err := client.MultiSearch(). + Add(sreq1, sreq2). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Responses == nil { + t.Fatal("expected responses != nil; got nil") + } + if len(searchResult.Responses) != 2 { + t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses)) + } + + sres := searchResult.Responses[0] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } + + sres = searchResult.Responses[1] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 2 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 2 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMultiSearchWithOneRequest(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Spawn two search queries with one roundtrip + query := NewMatchAllQuery() + source := NewSearchSource().Query(query).Size(10) + sreq := NewSearchRequest().Source(source) + + searchResult, err := client.MultiSearch(). + Index(testIndexName). + Add(sreq). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Responses == nil { + t.Fatal("expected responses != nil; got nil") + } + if len(searchResult.Responses) != 1 { + t.Fatalf("expected 1 responses; got %d", len(searchResult.Responses)) + } + + sres := searchResult.Responses[0] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go b/vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go new file mode 100644 index 000000000..a313cd92c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go @@ -0,0 +1,471 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// MultiTermvectorService returns information and statistics on terms in the +// fields of a particular document. The document could be stored in the +// index or artificially provided by the user. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html +// for documentation. +type MultiTermvectorService struct { + client *Client + pretty bool + index string + typ string + fieldStatistics *bool + fields []string + ids []string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool + version interface{} + versionType string + bodyJson interface{} + bodyString string + docs []*MultiTermvectorItem +} + +// NewMultiTermvectorService creates a new MultiTermvectorService. +func NewMultiTermvectorService(client *Client) *MultiTermvectorService { + return &MultiTermvectorService{ + client: client, + } +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *MultiTermvectorService) Pretty(pretty bool) *MultiTermvectorService { + s.pretty = pretty + return s +} + +// Add adds documents to MultiTermvectors service. +func (s *MultiTermvectorService) Add(docs ...*MultiTermvectorItem) *MultiTermvectorService { + s.docs = append(s.docs, docs...) + return s +} + +// Index in which the document resides. +func (s *MultiTermvectorService) Index(index string) *MultiTermvectorService { + s.index = index + return s +} + +// Type of the document. +func (s *MultiTermvectorService) Type(typ string) *MultiTermvectorService { + s.typ = typ + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) FieldStatistics(fieldStatistics bool) *MultiTermvectorService { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields is a comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Fields(fields []string) *MultiTermvectorService { + s.fields = fields + return s +} + +// Ids is a comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body. +func (s *MultiTermvectorService) Ids(ids []string) *MultiTermvectorService { + s.ids = ids + return s +} + +// Offsets specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Offsets(offsets bool) *MultiTermvectorService { + s.offsets = &offsets + return s +} + +// Parent id of documents. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Parent(parent string) *MultiTermvectorService { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Payloads(payloads bool) *MultiTermvectorService { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Positions(positions bool) *MultiTermvectorService { + s.positions = &positions + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Preference(preference string) *MultiTermvectorService { + s.preference = preference + return s +} + +// Realtime specifies if requests are real-time as opposed to near-real-time (default: true). +func (s *MultiTermvectorService) Realtime(realtime bool) *MultiTermvectorService { + s.realtime = &realtime + return s +} + +// Routing specific routing value. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Routing(routing string) *MultiTermvectorService { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) TermStatistics(termStatistics bool) *MultiTermvectorService { + s.termStatistics = &termStatistics + return s +} + +// Version is explicit version number for concurrency control. +func (s *MultiTermvectorService) Version(version interface{}) *MultiTermvectorService { + s.version = version + return s +} + +// VersionType is specific version type. +func (s *MultiTermvectorService) VersionType(versionType string) *MultiTermvectorService { + s.versionType = versionType + return s +} + +// BodyJson is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation.. +func (s *MultiTermvectorService) BodyJson(body interface{}) *MultiTermvectorService { + s.bodyJson = body + return s +} + +// BodyString is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation.. +func (s *MultiTermvectorService) BodyString(body string) *MultiTermvectorService { + s.bodyString = body + return s +} + +func (s *MultiTermvectorService) Source() interface{} { + source := make(map[string]interface{}) + docs := make([]interface{}, len(s.docs)) + for i, doc := range s.docs { + docs[i] = doc.Source() + } + source["docs"] = docs + return source +} + +// buildURL builds the URL for the operation. +func (s *MultiTermvectorService) buildURL() (string, url.Values, error) { + var path string + var err error + + if s.index != "" && s.typ != "" { + path, err = uritemplates.Expand("/{index}/{type}/_mtermvectors", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } else if s.index != "" && s.typ == "" { + path, err = uritemplates.Expand("/{index}/_mtermvectors", map[string]string{ + "index": s.index, + }) + } else { + path = "/_mtermvectors" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.fieldStatistics != nil { + params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics)) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if len(s.ids) > 0 { + params.Set("ids", strings.Join(s.ids, ",")) + } + if s.offsets != nil { + params.Set("offsets", fmt.Sprintf("%v", *s.offsets)) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.payloads != nil { + params.Set("payloads", fmt.Sprintf("%v", *s.payloads)) + } + if s.positions != nil { + params.Set("positions", fmt.Sprintf("%v", *s.positions)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.termStatistics != nil { + params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *MultiTermvectorService) Validate() error { + var invalid []string + if s.index == "" && s.typ != "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *MultiTermvectorService) Do(ctx context.Context) (*MultiTermvectorResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if len(s.bodyString) > 0 { + body = s.bodyString + } else { + body = s.Source() + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(MultiTermvectorResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// MultiTermvectorResponse is the response of MultiTermvectorService.Do. +type MultiTermvectorResponse struct { + Docs []*TermvectorsResponse `json:"docs"` +} + +// -- MultiTermvectorItem -- + +// MultiTermvectorItem is a single document to retrieve via MultiTermvectorService. +type MultiTermvectorItem struct { + index string + typ string + id string + doc interface{} + fieldStatistics *bool + fields []string + perFieldAnalyzer map[string]string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool +} + +func NewMultiTermvectorItem() *MultiTermvectorItem { + return &MultiTermvectorItem{} +} + +func (s *MultiTermvectorItem) Index(index string) *MultiTermvectorItem { + s.index = index + return s +} + +func (s *MultiTermvectorItem) Type(typ string) *MultiTermvectorItem { + s.typ = typ + return s +} + +func (s *MultiTermvectorItem) Id(id string) *MultiTermvectorItem { + s.id = id + return s +} + +// Doc is the document to analyze. +func (s *MultiTermvectorItem) Doc(doc interface{}) *MultiTermvectorItem { + s.doc = doc + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies +// and sum of total term frequencies should be returned. +func (s *MultiTermvectorItem) FieldStatistics(fieldStatistics bool) *MultiTermvectorItem { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields a list of fields to return. +func (s *MultiTermvectorItem) Fields(fields ...string) *MultiTermvectorItem { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +// PerFieldAnalyzer allows to specify a different analyzer than the one +// at the field. +func (s *MultiTermvectorItem) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *MultiTermvectorItem { + s.perFieldAnalyzer = perFieldAnalyzer + return s +} + +// Offsets specifies if term offsets should be returned. +func (s *MultiTermvectorItem) Offsets(offsets bool) *MultiTermvectorItem { + s.offsets = &offsets + return s +} + +// Parent id of documents. +func (s *MultiTermvectorItem) Parent(parent string) *MultiTermvectorItem { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. +func (s *MultiTermvectorItem) Payloads(payloads bool) *MultiTermvectorItem { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. +func (s *MultiTermvectorItem) Positions(positions bool) *MultiTermvectorItem { + s.positions = &positions + return s +} + +// Preference specify the node or shard the operation +// should be performed on (default: random). +func (s *MultiTermvectorItem) Preference(preference string) *MultiTermvectorItem { + s.preference = preference + return s +} + +// Realtime specifies if request is real-time as opposed to +// near-real-time (default: true). +func (s *MultiTermvectorItem) Realtime(realtime bool) *MultiTermvectorItem { + s.realtime = &realtime + return s +} + +// Routing is a specific routing value. +func (s *MultiTermvectorItem) Routing(routing string) *MultiTermvectorItem { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency +// should be returned. +func (s *MultiTermvectorItem) TermStatistics(termStatistics bool) *MultiTermvectorItem { + s.termStatistics = &termStatistics + return s +} + +// Source returns the serialized JSON to be sent to Elasticsearch as +// part of a MultiTermvector. +func (s *MultiTermvectorItem) Source() interface{} { + source := make(map[string]interface{}) + + source["_id"] = s.id + + if s.index != "" { + source["_index"] = s.index + } + if s.typ != "" { + source["_type"] = s.typ + } + if s.fields != nil { + source["fields"] = s.fields + } + if s.fieldStatistics != nil { + source["field_statistics"] = fmt.Sprintf("%v", *s.fieldStatistics) + } + if s.offsets != nil { + source["offsets"] = s.offsets + } + if s.parent != "" { + source["parent"] = s.parent + } + if s.payloads != nil { + source["payloads"] = fmt.Sprintf("%v", *s.payloads) + } + if s.positions != nil { + source["positions"] = fmt.Sprintf("%v", *s.positions) + } + if s.preference != "" { + source["preference"] = s.preference + } + if s.realtime != nil { + source["realtime"] = fmt.Sprintf("%v", *s.realtime) + } + if s.routing != "" { + source["routing"] = s.routing + } + if s.termStatistics != nil { + source["term_statistics"] = fmt.Sprintf("%v", *s.termStatistics) + } + if s.doc != nil { + source["doc"] = s.doc + } + if s.perFieldAnalyzer != nil && len(s.perFieldAnalyzer) > 0 { + source["per_field_analyzer"] = s.perFieldAnalyzer + } + + return source +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/mtermvectors_test.go b/vendor/gopkg.in/olivere/elastic.v5/mtermvectors_test.go new file mode 100644 index 000000000..c22fcd43d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/mtermvectors_test.go @@ -0,0 +1,135 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestMultiTermVectorsValidateAndBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Index string + Type string + Expected string + ExpectValidateFailure bool + }{ + // #0: No index, no type + { + "", + "", + "/_mtermvectors", + false, + }, + // #1: Index only + { + "twitter", + "", + "/twitter/_mtermvectors", + false, + }, + // #2: Type without index + { + "", + "tweet", + "", + true, + }, + // #3: Both index and type + { + "twitter", + "tweet", + "/twitter/tweet/_mtermvectors", + false, + }, + } + + for i, test := range tests { + builder := client.MultiTermVectors().Index(test.Index).Type(test.Type) + // Validate + err := builder.Validate() + if err != nil { + if !test.ExpectValidateFailure { + t.Errorf("#%d: expected no error, got: %v", i, err) + continue + } + } else { + if test.ExpectValidateFailure { + t.Errorf("#%d: expected error, got: nil", i) + continue + } + // Build + path, _, err := builder.buildURL() + if err != nil { + t.Errorf("#%d: expected no error, got: %v", i, err) + continue + } + if path != test.Expected { + t.Errorf("#%d: expected %q; got: %q", i, test.Expected, path) + } + } + } +} + +func TestMultiTermVectorsWithIds(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // MultiTermVectors by specifying ID by 1 and 3 + field := "Message" + res, err := client.MultiTermVectors(). + Index(testIndexName). + Type("tweet"). + Add(NewMultiTermvectorItem().Index(testIndexName).Type("tweet").Id("1").Fields(field)). + Add(NewMultiTermvectorItem().Index(testIndexName).Type("tweet").Id("3").Fields(field)). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected to return information and statistics") + } + if res.Docs == nil { + t.Fatal("expected result docs to be != nil; got nil") + } + if len(res.Docs) != 2 { + t.Fatalf("expected to have 2 docs; got %d", len(res.Docs)) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/nodes_info.go b/vendor/gopkg.in/olivere/elastic.v5/nodes_info.go new file mode 100644 index 000000000..c956a5eac --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/nodes_info.go @@ -0,0 +1,310 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// NodesInfoService allows to retrieve one or more or all of the +// cluster nodes information. +// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html. +type NodesInfoService struct { + client *Client + pretty bool + nodeId []string + metric []string + flatSettings *bool + human *bool +} + +// NewNodesInfoService creates a new NodesInfoService. +func NewNodesInfoService(client *Client) *NodesInfoService { + return &NodesInfoService{ + client: client, + nodeId: []string{"_all"}, + metric: []string{"_all"}, + } +} + +// NodeId is a list of node IDs or names to limit the returned information. +// Use "_local" to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService { + s.nodeId = append(s.nodeId, nodeId...) + return s +} + +// Metric is a list of metrics you wish returned. Leave empty to return all. +// Valid metrics are: settings, os, process, jvm, thread_pool, network, +// transport, http, and plugins. +func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService { + s.metric = append(s.metric, metric...) + return s +} + +// FlatSettings returns settings in flat format (default: false). +func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService { + s.flatSettings = &flatSettings + return s +} + +// Human indicates whether to return time and byte values in human-readable format. +func (s *NodesInfoService) Human(human bool) *NodesInfoService { + s.human = &human + return s +} + +// Pretty indicates whether to indent the returned JSON. +func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *NodesInfoService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + "metric": strings.Join(s.metric, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.pretty { + params.Set("pretty", "1") + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *NodesInfoService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *NodesInfoService) Do(ctx context.Context) (*NodesInfoResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(NodesInfoResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// NodesInfoResponse is the response of NodesInfoService.Do. +type NodesInfoResponse struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]*NodesInfoNode `json:"nodes"` +} + +type NodesInfoNode struct { + // Name of the node, e.g. "Mister Fear" + Name string `json:"name"` + // TransportAddress, e.g. "127.0.0.1:9300" + TransportAddress string `json:"transport_address"` + // Host is the host name, e.g. "macbookair" + Host string `json:"host"` + // IP is the IP address, e.g. "192.168.1.2" + IP string `json:"ip"` + // Version is the Elasticsearch version running on the node, e.g. "1.4.3" + Version string `json:"version"` + // Build is the Elasticsearch build, e.g. "36a29a7" + Build string `json:"build"` + // HTTPAddress, e.g. "127.0.0.1:9200" + HTTPAddress string `json:"http_address"` + // HTTPSAddress, e.g. "127.0.0.1:9200" + HTTPSAddress string `json:"https_address"` + + // Attributes of the node. + Attributes map[string]interface{} `json:"attributes"` + + // Settings of the node, e.g. paths and pidfile. + Settings map[string]interface{} `json:"settings"` + + // OS information, e.g. CPU and memory. + OS *NodesInfoNodeOS `json:"os"` + + // Process information, e.g. max file descriptors. + Process *NodesInfoNodeProcess `json:"process"` + + // JVM information, e.g. VM version. + JVM *NodesInfoNodeProcess `json:"jvm"` + + // ThreadPool information. + ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"` + + // Network information. + Network *NodesInfoNodeNetwork `json:"network"` + + // Network information. + Transport *NodesInfoNodeTransport `json:"transport"` + + // HTTP information. + HTTP *NodesInfoNodeHTTP `json:"http"` + + // Plugins information. + Plugins []*NodesInfoNodePlugin `json:"plugins"` +} + +type NodesInfoNodeOS struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + AvailableProcessors int `json:"available_processors"` // e.g. 4 + + // CPU information + CPU struct { + Vendor string `json:"vendor"` // e.g. Intel + Model string `json:"model"` // e.g. iMac15,1 + MHz int `json:"mhz"` // e.g. 3500 + TotalCores int `json:"total_cores"` // e.g. 4 + TotalSockets int `json:"total_sockets"` // e.g. 4 + CoresPerSocket int `json:"cores_per_socket"` // e.g. 16 + CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256 + } `json:"cpu"` + + // Mem information + Mem struct { + Total string `json:"total"` // e.g. 16gb + TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184 + } `json:"mem"` + + // Swap information + Swap struct { + Total string `json:"total"` // e.g. 1gb + TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824 + } `json:"swap"` +} + +type NodesInfoNodeProcess struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + ID int `json:"id"` // process id, e.g. 87079 + MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768 + Mlockall bool `json:"mlockall"` // e.g. false +} + +type NodesInfoNodeJVM struct { + PID int `json:"pid"` // process id, e.g. 87079 + Version string `json:"version"` // e.g. "1.8.0_25" + VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" + VMVersion string `json:"vm_version"` // e.g. "25.25-b02" + VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" + StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z" + StartTimeInMillis int64 `json:"start_time_in_millis"` + + // Mem information + Mem struct { + HeapInit string `json:"heap_init"` // e.g. 1gb + HeapInitInBytes int `json:"heap_init_in_bytes"` + HeapMax string `json:"heap_max"` // e.g. 4gb + HeapMaxInBytes int `json:"heap_max_in_bytes"` + NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb + NonHeapInitInBytes int `json:"non_heap_init_in_bytes"` + NonHeapMax string `json:"non_heap_max"` // e.g. 0b + NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"` + DirectMax string `json:"direct_max"` // e.g. 4gb + DirectMaxInBytes int `json:"direct_max_in_bytes"` + } `json:"mem"` + + GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"] + MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"] +} + +type NodesInfoNodeThreadPool struct { + Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"` + Bench *NodesInfoNodeThreadPoolSection `json:"bench"` + Listener *NodesInfoNodeThreadPoolSection `json:"listener"` + Index *NodesInfoNodeThreadPoolSection `json:"index"` + Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"` + Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"` + Generic *NodesInfoNodeThreadPoolSection `json:"generic"` + Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"` + Search *NodesInfoNodeThreadPoolSection `json:"search"` + Flush *NodesInfoNodeThreadPoolSection `json:"flush"` + Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"` + Management *NodesInfoNodeThreadPoolSection `json:"management"` + Get *NodesInfoNodeThreadPoolSection `json:"get"` + Merge *NodesInfoNodeThreadPoolSection `json:"merge"` + Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"` + Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"` +} + +type NodesInfoNodeThreadPoolSection struct { + Type string `json:"type"` // e.g. fixed + Min int `json:"min"` // e.g. 4 + Max int `json:"max"` // e.g. 4 + KeepAlive string `json:"keep_alive"` // e.g. "5m" + QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1 +} + +type NodesInfoNodeNetwork struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + PrimaryInterface struct { + Address string `json:"address"` // e.g. 192.168.1.2 + Name string `json:"name"` // e.g. en0 + MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66 + } `json:"primary_interface"` +} + +type NodesInfoNodeTransport struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` + Profiles map[string]*NodesInfoNodeTransportProfile `json:"profiles"` +} + +type NodesInfoNodeTransportProfile struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` +} + +type NodesInfoNodeHTTP struct { + BoundAddress []string `json:"bound_address"` // e.g. ["127.0.0.1:9200", "[fe80::1]:9200", "[::1]:9200"] + PublishAddress string `json:"publish_address"` // e.g. "127.0.0.1:9300" + MaxContentLength string `json:"max_content_length"` // e.g. "100mb" + MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"` +} + +type NodesInfoNodePlugin struct { + Name string `json:"name"` + Description string `json:"description"` + Site bool `json:"site"` + JVM bool `json:"jvm"` + URL string `json:"url"` // e.g. /_plugin/dummy/ +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/nodes_info_test.go b/vendor/gopkg.in/olivere/elastic.v5/nodes_info_test.go new file mode 100644 index 000000000..626f6bfd4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/nodes_info_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestNodesInfo(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + + info, err := client.NodesInfo().Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if info == nil { + t.Fatal("expected nodes info") + } + + if info.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", info.ClusterName) + } + if len(info.Nodes) == 0 { + t.Errorf("expected some nodes; got: %d", len(info.Nodes)) + } + for id, node := range info.Nodes { + if id == "" { + t.Errorf("expected node id; got: %q", id) + } + if node == nil { + t.Fatalf("expected node info; got: %v", node) + } + if node.IP == "" { + t.Errorf("expected node IP; got: %q", node.IP) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go b/vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go new file mode 100644 index 000000000..9af56d56e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go @@ -0,0 +1,707 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// NodesStatsService returns node statistics. +// See http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html +// for details. +type NodesStatsService struct { + client *Client + pretty bool + metric []string + indexMetric []string + nodeId []string + completionFields []string + fielddataFields []string + fields []string + groups *bool + human *bool + level string + timeout string + types []string +} + +// NewNodesStatsService creates a new NodesStatsService. +func NewNodesStatsService(client *Client) *NodesStatsService { + return &NodesStatsService{ + client: client, + } +} + +// Metric limits the information returned to the specified metrics. +func (s *NodesStatsService) Metric(metric ...string) *NodesStatsService { + s.metric = append(s.metric, metric...) + return s +} + +// IndexMetric limits the information returned for `indices` metric +// to the specific index metrics. Isn't used if `indices` (or `all`) +// metric isn't specified.. +func (s *NodesStatsService) IndexMetric(indexMetric ...string) *NodesStatsService { + s.indexMetric = append(s.indexMetric, indexMetric...) + return s +} + +// NodeId is a list of node IDs or names to limit the returned information; +// use `_local` to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *NodesStatsService) NodeId(nodeId ...string) *NodesStatsService { + s.nodeId = append(s.nodeId, nodeId...) + return s +} + +// CompletionFields is a list of fields for `fielddata` and `suggest` +// index metric (supports wildcards). +func (s *NodesStatsService) CompletionFields(completionFields ...string) *NodesStatsService { + s.completionFields = append(s.completionFields, completionFields...) + return s +} + +// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards). +func (s *NodesStatsService) FielddataFields(fielddataFields ...string) *NodesStatsService { + s.fielddataFields = append(s.fielddataFields, fielddataFields...) + return s +} + +// Fields is a list of fields for `fielddata` and `completion` index metric (supports wildcards). +func (s *NodesStatsService) Fields(fields ...string) *NodesStatsService { + s.fields = append(s.fields, fields...) + return s +} + +// Groups is a list of search groups for `search` index metric. +func (s *NodesStatsService) Groups(groups bool) *NodesStatsService { + s.groups = &groups + return s +} + +// Human indicates whether to return time and byte values in human-readable format. +func (s *NodesStatsService) Human(human bool) *NodesStatsService { + s.human = &human + return s +} + +// Level specifies whether to return indices stats aggregated at node, index or shard level. +func (s *NodesStatsService) Level(level string) *NodesStatsService { + s.level = level + return s +} + +// Timeout specifies an explicit operation timeout. +func (s *NodesStatsService) Timeout(timeout string) *NodesStatsService { + s.timeout = timeout + return s +} + +// Types a list of document types for the `indexing` index metric. +func (s *NodesStatsService) Types(types ...string) *NodesStatsService { + s.types = append(s.types, types...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *NodesStatsService) Pretty(pretty bool) *NodesStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *NodesStatsService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 { + path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}/{index_metric}", map[string]string{ + "index_metric": strings.Join(s.indexMetric, ","), + "node_id": strings.Join(s.nodeId, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 { + path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 { + path, err = uritemplates.Expand("/_nodes/{node_id}/stats/_all/{index_metric}", map[string]string{ + "index_metric": strings.Join(s.indexMetric, ","), + "node_id": strings.Join(s.nodeId, ","), + }) + } else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 { + path, err = uritemplates.Expand("/_nodes/{node_id}/stats", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + }) + } else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 { + path, err = uritemplates.Expand("/_nodes/stats/{metric}/{index_metric}", map[string]string{ + "index_metric": strings.Join(s.indexMetric, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 { + path, err = uritemplates.Expand("/_nodes/stats/{metric}", map[string]string{ + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 { + path, err = uritemplates.Expand("/_nodes/stats/_all/{index_metric}", map[string]string{ + "index_metric": strings.Join(s.indexMetric, ","), + }) + } else { // if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 { + path = "/_nodes/stats" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.completionFields) > 0 { + params.Set("completion_fields", strings.Join(s.completionFields, ",")) + } + if len(s.fielddataFields) > 0 { + params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.groups != nil { + params.Set("groups", fmt.Sprintf("%v", *s.groups)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.level != "" { + params.Set("level", s.level) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if len(s.types) > 0 { + params.Set("types", strings.Join(s.types, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *NodesStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *NodesStatsService) Do(ctx context.Context) (*NodesStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(NodesStatsResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// NodesStatsResponse is the response of NodesStatsService.Do. +type NodesStatsResponse struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]*NodesStatsNode `json:"nodes"` +} + +type NodesStatsNode struct { + // Timestamp when these stats we're gathered. + Timestamp int64 `json:"timestamp"` + // Name of the node, e.g. "Mister Fear" + Name string `json:"name"` + // TransportAddress, e.g. "127.0.0.1:9300" + TransportAddress string `json:"transport_address"` + // Host is the host name, e.g. "macbookair" + Host string `json:"host"` + // IP is an IP address, e.g. "192.168.1.2" + IP string `json:"ip"` + // Roles is a list of the roles of the node, e.g. master, data, ingest. + Roles []string `json:"roles"` + + // Attributes of the node. + Attributes map[string]interface{} `json:"attributes"` + + // Indices returns index information. + Indices *NodesStatsIndex `json:"indices"` + + // OS information, e.g. CPU and memory. + OS *NodesStatsNodeOS `json:"os"` + + // Process information, e.g. max file descriptors. + Process *NodesStatsNodeProcess `json:"process"` + + // JVM information, e.g. VM version. + JVM *NodesStatsNodeJVM `json:"jvm"` + + // ThreadPool information. + ThreadPool map[string]*NodesStatsNodeThreadPool `json:"thread_pool"` + + // FS returns information about the filesystem. + FS *NodesStatsNodeFS `json:"fs"` + + // Network information. + Transport *NodesStatsNodeTransport `json:"transport"` + + // HTTP information. + HTTP *NodesStatsNodeHTTP `json:"http"` + + // Breaker contains information about circuit breakers. + Breaker map[string]*NodesStatsBreaker `json:"breaker"` + + // ScriptStats information. + ScriptStats *NodesStatsScriptStats `json:"script"` + + // Discovery information. + Discovery *NodesStatsDiscovery `json:"discovery"` + + // Ingest information + Ingest *NodesStatsIngest `json:"ingest"` +} + +type NodesStatsIndex struct { + Docs *NodesStatsDocsStats `json:"docs"` + Store *NodesStatsStoreStats `json:"store"` + Indexing *NodesStatsIndexingStats `json:"indexing"` + Get *NodesStatsGetStats `json:"get"` + Search *NodesStatsSearchStats `json:"search"` + Merges *NodesStatsMergeStats `json:"merges"` + Refresh *NodesStatsRefreshStats `json:"refresh"` + Flush *NodesStatsFlushStats `json:"flush"` + Warmer *NodesStatsWarmerStats `json:"warmer"` + QueryCache *NodesStatsQueryCacheStats `json:"query_cache"` + Fielddata *NodesStatsFielddataStats `json:"fielddata"` + Percolate *NodesStatsPercolateStats `json:"percolate"` + Completion *NodesStatsCompletionStats `json:"completion"` + Segments *NodesStatsSegmentsStats `json:"segments"` + Translog *NodesStatsTranslogStats `json:"translog"` + Suggest *NodesStatsSuggestStats `json:"suggest"` + RequestCache *NodesStatsRequestCacheStats `json:"request_cache"` + Recovery NodesStatsRecoveryStats `json:"recovery"` + + Indices map[string]*NodesStatsIndex `json:"indices"` // for level=indices + Shards map[string]*NodesStatsIndex `json:"shards"` // for level=shards +} + +type NodesStatsDocsStats struct { + Count int64 `json:"count"` + Deleted int64 `json:"deleted"` +} + +type NodesStatsStoreStats struct { + Size string `json:"size"` + SizeInBytes int64 `json:"size_in_bytes"` + ThrottleTime string `json:"throttle_time"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` +} + +type NodesStatsIndexingStats struct { + IndexTotal int64 `json:"index_total"` + IndexTime string `json:"index_time"` + IndexTimeInMillis int64 `json:"index_time_in_millis"` + IndexCurrent int64 `json:"index_current"` + IndexFailed int64 `json:"index_failed"` + DeleteTotal int64 `json:"delete_total"` + DeleteTime string `json:"delete_time"` + DeleteTimeInMillis int64 `json:"delete_time_in_millis"` + DeleteCurrent int64 `json:"delete_current"` + NoopUpdateTotal int64 `json:"noop_update_total"` + IsThrottled bool `json:"is_throttled"` + ThrottleTime string `json:"throttle_time"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` + + Types map[string]*NodesStatsIndexingStats `json:"types"` // stats for individual types +} + +type NodesStatsGetStats struct { + Total int64 `json:"total"` + Time string `json:"get_time"` + TimeInMillis int64 `json:"time_in_millis"` + Exists int64 `json:"exists"` + ExistsTime string `json:"exists_time"` + ExistsTimeInMillis int64 `json:"exists_in_millis"` + Missing int64 `json:"missing"` + MissingTime string `json:"missing_time"` + MissingTimeInMillis int64 `json:"missing_in_millis"` + Current int64 `json:"current"` +} + +type NodesStatsSearchStats struct { + OpenContexts int64 `json:"open_contexts"` + QueryTotal int64 `json:"query_total"` + QueryTime string `json:"query_time"` + QueryTimeInMillis int64 `json:"query_time_in_millis"` + QueryCurrent int64 `json:"query_current"` + FetchTotal int64 `json:"fetch_total"` + FetchTime string `json:"fetch_time"` + FetchTimeInMillis int64 `json:"fetch_time_in_millis"` + FetchCurrent int64 `json:"fetch_current"` + ScrollTotal int64 `json:"scroll_total"` + ScrollTime string `json:"scroll_time"` + ScrollTimeInMillis int64 `json:"scroll_time_in_millis"` + ScrollCurrent int64 `json:"scroll_current"` + + Groups map[string]*NodesStatsSearchStats `json:"groups"` // stats for individual groups +} + +type NodesStatsMergeStats struct { + Current int64 `json:"current"` + CurrentDocs int64 `json:"current_docs"` + CurrentSize string `json:"current_size"` + CurrentSizeInBytes int64 `json:"current_size_in_bytes"` + Total int64 `json:"total"` + TotalTime string `json:"total_time"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` + TotalDocs int64 `json:"total_docs"` + TotalSize string `json:"total_size"` + TotalSizeInBytes int64 `json:"total_size_in_bytes"` + TotalStoppedTime string `json:"total_stopped_time"` + TotalStoppedTimeInMillis int64 `json:"total_stopped_time_in_millis"` + TotalThrottledTime string `json:"total_throttled_time"` + TotalThrottledTimeInMillis int64 `json:"total_throttled_time_in_millis"` + TotalThrottleBytes string `json:"total_auto_throttle"` + TotalThrottleBytesInBytes int64 `json:"total_auto_throttle_in_bytes"` +} + +type NodesStatsRefreshStats struct { + Total int64 `json:"total"` + TotalTime string `json:"total_time"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +type NodesStatsFlushStats struct { + Total int64 `json:"total"` + TotalTime string `json:"total_time"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +type NodesStatsWarmerStats struct { + Current int64 `json:"current"` + Total int64 `json:"total"` + TotalTime string `json:"total_time"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +type NodesStatsQueryCacheStats struct { + MemorySize string `json:"memory_size"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + TotalCount int64 `json:"total_count"` + HitCount int64 `json:"hit_count"` + MissCount int64 `json:"miss_count"` + CacheSize int64 `json:"cache_size"` + CacheCount int64 `json:"cache_count"` + Evictions int64 `json:"evictions"` +} + +type NodesStatsFielddataStats struct { + MemorySize string `json:"memory_size"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` + Fields map[string]struct { + MemorySize string `json:"memory_size"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + } `json:"fields"` +} + +type NodesStatsPercolateStats struct { + Total int64 `json:"total"` + Time string `json:"time"` + TimeInMillis int64 `json:"time_in_millis"` + Current int64 `json:"current"` + MemorySize string `json:"memory_size"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Queries int64 `json:"queries"` +} + +type NodesStatsCompletionStats struct { + Size string `json:"size"` + SizeInBytes int64 `json:"size_in_bytes"` + Fields map[string]struct { + Size string `json:"size"` + SizeInBytes int64 `json:"size_in_bytes"` + } `json:"fields"` +} + +type NodesStatsSegmentsStats struct { + Count int64 `json:"count"` + Memory string `json:"memory"` + MemoryInBytes int64 `json:"memory_in_bytes"` + TermsMemory string `json:"terms_memory"` + TermsMemoryInBytes int64 `json:"terms_memory_in_bytes"` + StoredFieldsMemory string `json:"stored_fields_memory"` + StoredFieldsMemoryInBytes int64 `json:"stored_fields_memory_in_bytes"` + TermVectorsMemory string `json:"term_vectors_memory"` + TermVectorsMemoryInBytes int64 `json:"term_vectors_memory_in_bytes"` + NormsMemory string `json:"norms_memory"` + NormsMemoryInBytes int64 `json:"norms_memory_in_bytes"` + DocValuesMemory string `json:"doc_values_memory"` + DocValuesMemoryInBytes int64 `json:"doc_values_memory_in_bytes"` + IndexWriterMemory string `json:"index_writer_memory"` + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` + IndexWriterMaxMemory string `json:"index_writer_max_memory"` + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"` + VersionMapMemory string `json:"version_map_memory"` + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` + FixedBitSetMemory string `json:"fixed_bit_set"` // not a typo + FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` +} + +type NodesStatsTranslogStats struct { + Operations int64 `json:"operations"` + Size string `json:"size"` + SizeInBytes int64 `json:"size_in_bytes"` +} + +type NodesStatsSuggestStats struct { + Total int64 `json:"total"` + TotalTime string `json:"total_time"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` + Current int64 `json:"current"` +} + +type NodesStatsRequestCacheStats struct { + MemorySize string `json:"memory_size"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` + HitCount int64 `json:"hit_count"` + MissCount int64 `json:"miss_count"` +} + +type NodesStatsRecoveryStats struct { + CurrentAsSource int `json:"current_as_source"` + CurrentAsTarget int `json:"current_as_target"` + ThrottleTime string `json:"throttle_time"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` +} + +type NodesStatsNodeOS struct { + Timestamp int64 `json:"timestamp"` + CPU *NodesStatsNodeOSCPU `json:"cpu"` + Mem *NodesStatsNodeOSMem `json:"mem"` + Swap *NodesStatsNodeOSSwap `json:"swap"` +} + +type NodesStatsNodeOSCPU struct { + Percent int `json:"percent"` + LoadAverage map[string]float64 `json:"load_average"` // keys are: 1m, 5m, and 15m +} + +type NodesStatsNodeOSMem struct { + Total string `json:"total"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` + FreeInBytes int64 `json:"free_in_bytes"` + Used string `json:"used"` + UsedInBytes int64 `json:"used_in_bytes"` + FreePercent int `json:"free_percent"` + UsedPercent int `json:"used_percent"` +} + +type NodesStatsNodeOSSwap struct { + Total string `json:"total"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` + FreeInBytes int64 `json:"free_in_bytes"` + Used string `json:"used"` + UsedInBytes int64 `json:"used_in_bytes"` +} + +type NodesStatsNodeProcess struct { + Timestamp int64 `json:"timestamp"` + OpenFileDescriptors int64 `json:"open_file_descriptors"` + MaxFileDescriptors int64 `json:"max_file_descriptors"` + CPU struct { + Percent int `json:"percent"` + Total string `json:"total"` + TotalInMillis int64 `json:"total_in_millis"` + } `json:"cpu"` + Mem struct { + TotalVirtual string `json:"total_virtual"` + TotalVirtualInBytes int64 `json:"total_virtual_in_bytes"` + } `json:"mem"` +} + +type NodesStatsNodeJVM struct { + Timestamp int64 `json:"timestamp"` + Uptime string `json:"uptime"` + UptimeInMillis int64 `json:"uptime_in_millis"` + Mem *NodesStatsNodeJVMMem `json:"mem"` + Threads *NodesStatsNodeJVMThreads `json:"threads"` + GC *NodesStatsNodeJVMGC `json:"gc"` + BufferPools map[string]*NodesStatsNodeJVMBufferPool `json:"buffer_pools"` + Classes *NodesStatsNodeJVMClasses `json:"classes"` +} + +type NodesStatsNodeJVMMem struct { + HeapUsed string `json:"heap_used"` + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` + HeapUsedPercent int `json:"heap_used_percent"` + HeapCommitted string `json:"heap_committed"` + HeapCommittedInBytes int64 `json:"heap_committed_in_bytes"` + HeapMax string `json:"heap_max"` + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` + NonHeapUsed string `json:"non_heap_used"` + NonHeapUsedInBytes int64 `json:"non_heap_used_in_bytes"` + NonHeapCommitted string `json:"non_heap_committed"` + NonHeapCommittedInBytes int64 `json:"non_heap_committed_in_bytes"` + Pools map[string]struct { + Used string `json:"used"` + UsedInBytes int64 `json:"used_in_bytes"` + Max string `json:"max"` + MaxInBytes int64 `json:"max_in_bytes"` + PeakUsed string `json:"peak_used"` + PeakUsedInBytes int64 `json:"peak_used_in_bytes"` + PeakMax string `json:"peak_max"` + PeakMaxInBytes int64 `json:"peak_max_in_bytes"` + } `json:"pools"` +} + +type NodesStatsNodeJVMThreads struct { + Count int64 `json:"count"` + PeakCount int64 `json:"peak_count"` +} + +type NodesStatsNodeJVMGC struct { + Collectors map[string]*NodesStatsNodeJVMGCCollector `json:"collectors"` +} + +type NodesStatsNodeJVMGCCollector struct { + CollectionCount int64 `json:"collection_count"` + CollectionTime string `json:"collection_time"` + CollectionTimeInMillis int64 `json:"collection_time_in_millis"` +} + +type NodesStatsNodeJVMBufferPool struct { + Count int64 `json:"count"` + TotalCapacity string `json:"total_capacity"` + TotalCapacityInBytes int64 `json:"total_capacity_in_bytes"` +} + +type NodesStatsNodeJVMClasses struct { + CurrentLoadedCount int64 `json:"current_loaded_count"` + TotalLoadedCount int64 `json:"total_loaded_count"` + TotalUnloadedCount int64 `json:"total_unloaded_count"` +} + +type NodesStatsNodeThreadPool struct { + Threads int `json:"threads"` + Queue int `json:"queue"` + Active int `json:"active"` + Rejected int64 `json:"rejected"` + Largest int `json:"largest"` + Completed int64 `json:"completed"` +} + +type NodesStatsNodeFS struct { + Timestamp int64 `json:"timestamp"` + Total *NodesStatsNodeFSEntry `json:"total"` + Data []*NodesStatsNodeFSEntry `json:"data"` + IOStats *NodesStatsNodeFSIOStats `json:"io_stats"` +} + +type NodesStatsNodeFSEntry struct { + Path string `json:"path"` + Mount string `json:"mount"` + Type string `json:"type"` + Total string `json:"total"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` + FreeInBytes int64 `json:"free_in_bytes"` + Available string `json:"available"` + AvailableInBytes int64 `json:"available_in_bytes"` + Spins string `json:"spins"` +} + +type NodesStatsNodeFSIOStats struct { + Devices []*NodesStatsNodeFSIOStatsEntry `json:"devices"` + Total *NodesStatsNodeFSIOStatsEntry `json:"total"` +} + +type NodesStatsNodeFSIOStatsEntry struct { + DeviceName string `json:"device_name"` + Operations int64 `json:"operations"` + ReadOperations int64 `json:"read_operations"` + WriteOperations int64 `json:"write_operations"` + ReadKilobytes int64 `json:"read_kilobytes"` + WriteKilobytes int64 `json:"write_kilobytes"` +} + +type NodesStatsNodeTransport struct { + ServerOpen int `json:"server_open"` + RxCount int64 `json:"rx_count"` + RxSize string `json:"rx_size"` + RxSizeInBytes int64 `json:"rx_size_in_bytes"` + TxCount int64 `json:"tx_count"` + TxSize string `json:"tx_size"` + TxSizeInBytes int64 `json:"tx_size_in_bytes"` +} + +type NodesStatsNodeHTTP struct { + CurrentOpen int `json:"current_open"` + TotalOpened int `json:"total_opened"` +} + +type NodesStatsBreaker struct { + LimitSize string `json:"limit_size"` + LimitSizeInBytes int64 `json:"limit_size_in_bytes"` + EstimatedSize string `json:"estimated_size"` + EstimatedSizeInBytes int64 `json:"estimated_size_in_bytes"` + Overhead float64 `json:"overhead"` + Tripped int64 `json:"tripped"` +} + +type NodesStatsScriptStats struct { + Compilations int64 `json:"compilations"` + CacheEvictions int64 `json:"cache_evictions"` +} + +type NodesStatsDiscovery struct { + ClusterStateQueue *NodesStatsDiscoveryStats `json:"cluster_state_queue"` +} + +type NodesStatsDiscoveryStats struct { + Total int64 `json:"total"` + Pending int64 `json:"pending"` + Committed int64 `json:"committed"` +} + +type NodesStatsIngest struct { + Total *NodesStatsIngestStats `json:"total"` + Pipelines interface{} `json:"pipelines"` +} + +type NodesStatsIngestStats struct { + Count int64 `json:"count"` + Time string `json:"time"` + TimeInMillis int64 `json:"time_in_millis"` + Current int64 `json:"current"` + Failed int64 `json:"failed"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/nodes_stats_test.go b/vendor/gopkg.in/olivere/elastic.v5/nodes_stats_test.go new file mode 100644 index 000000000..d74134243 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/nodes_stats_test.go @@ -0,0 +1,139 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestNodesStats(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + + info, err := client.NodesStats().Human(true).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if info == nil { + t.Fatal("expected nodes stats") + } + + if info.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", info.ClusterName) + } + if len(info.Nodes) == 0 { + t.Errorf("expected some nodes; got: %d", len(info.Nodes)) + } + for id, node := range info.Nodes { + if id == "" { + t.Errorf("expected node id; got: %q", id) + } + if node == nil { + t.Fatalf("expected node info; got: %v", node) + } + if len(node.Name) == 0 { + t.Errorf("expected node name; got: %q", node.Name) + } + if node.Timestamp == 0 { + t.Errorf("expected timestamp; got: %q", node.Timestamp) + } + } +} + +func TestNodesStatsBuildURL(t *testing.T) { + tests := []struct { + NodeIds []string + Metrics []string + IndexMetrics []string + Expected string + }{ + { + NodeIds: nil, + Metrics: nil, + IndexMetrics: nil, + Expected: "/_nodes/stats", + }, + { + NodeIds: []string{"node1"}, + Metrics: nil, + IndexMetrics: nil, + Expected: "/_nodes/node1/stats", + }, + { + NodeIds: []string{"node1", "node2"}, + Metrics: nil, + IndexMetrics: nil, + Expected: "/_nodes/node1%2Cnode2/stats", + }, + { + NodeIds: nil, + Metrics: []string{"indices"}, + IndexMetrics: nil, + Expected: "/_nodes/stats/indices", + }, + { + NodeIds: nil, + Metrics: []string{"indices", "jvm"}, + IndexMetrics: nil, + Expected: "/_nodes/stats/indices%2Cjvm", + }, + { + NodeIds: []string{"node1"}, + Metrics: []string{"indices", "jvm"}, + IndexMetrics: nil, + Expected: "/_nodes/node1/stats/indices%2Cjvm", + }, + { + NodeIds: nil, + Metrics: nil, + IndexMetrics: []string{"fielddata"}, + Expected: "/_nodes/stats/_all/fielddata", + }, + { + NodeIds: []string{"node1"}, + Metrics: nil, + IndexMetrics: []string{"fielddata"}, + Expected: "/_nodes/node1/stats/_all/fielddata", + }, + { + NodeIds: nil, + Metrics: []string{"indices"}, + IndexMetrics: []string{"fielddata"}, + Expected: "/_nodes/stats/indices/fielddata", + }, + { + NodeIds: []string{"node1"}, + Metrics: []string{"indices"}, + IndexMetrics: []string{"fielddata"}, + Expected: "/_nodes/node1/stats/indices/fielddata", + }, + { + NodeIds: []string{"node1", "node2"}, + Metrics: []string{"indices", "jvm"}, + IndexMetrics: []string{"fielddata", "docs"}, + Expected: "/_nodes/node1%2Cnode2/stats/indices%2Cjvm/fielddata%2Cdocs", + }, + } + + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + for i, tt := range tests { + svc := client.NodesStats().NodeId(tt.NodeIds...).Metric(tt.Metrics...).IndexMetric(tt.IndexMetrics...) + path, _, err := svc.buildURL() + if err != nil { + t.Errorf("#%d: expected no error, got %v", i, err) + } else { + if want, have := tt.Expected, path; want != have { + t.Errorf("#%d: expected %q, got %q", i, want, have) + } + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/percolate_test.go b/vendor/gopkg.in/olivere/elastic.v5/percolate_test.go new file mode 100644 index 000000000..b847f02a8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/percolate_test.go @@ -0,0 +1,58 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestPercolate(t *testing.T) { + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + // Add a document + _, err := client.Index(). + Index(testIndexName). + Type("queries"). + Id("1"). + BodyJson(`{"query":{"match":{"message":"bonsai tree"}}}`). + Refresh("wait_for"). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Percolate should return our registered query + pq := NewPercolatorQuery(). + Field("query"). + DocumentType("doctype"). + Document(doctype{Message: "A new bonsai tree in the office"}) + res, err := client.Search(testIndexName).Query(pq).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected results != nil; got nil") + } + if res.Hits == nil { + t.Fatal("expected SearchResult.Hits != nil; got nil") + } + if got, want := res.Hits.TotalHits, int64(1); got != want { + t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got) + } + if got, want := len(res.Hits.Hits), 1; got != want { + t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got) + } + hit := res.Hits.Hits[0] + if hit.Index != testIndexName { + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + got := string(*hit.Source) + expected := `{"query":{"match":{"message":"bonsai tree"}}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ping.go b/vendor/gopkg.in/olivere/elastic.v5/ping.go new file mode 100644 index 000000000..5ec7f0f9a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ping.go @@ -0,0 +1,129 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/http" + "net/url" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// PingService checks if an Elasticsearch server on a given URL is alive. +// When asked for, it can also return various information about the +// Elasticsearch server, e.g. the Elasticsearch version number. +// +// Ping simply starts a HTTP GET request to the URL of the server. +// If the server responds with HTTP Status code 200 OK, the server is alive. +type PingService struct { + client *Client + url string + timeout string + httpHeadOnly bool + pretty bool +} + +// PingResult is the result returned from querying the Elasticsearch server. +type PingResult struct { + Name string `json:"name"` + ClusterName string `json:"cluster_name"` + Version struct { + Number string `json:"number"` + BuildHash string `json:"build_hash"` + BuildTimestamp string `json:"build_timestamp"` + BuildSnapshot bool `json:"build_snapshot"` + LuceneVersion string `json:"lucene_version"` + } `json:"version"` + TagLine string `json:"tagline"` +} + +func NewPingService(client *Client) *PingService { + return &PingService{ + client: client, + url: DefaultURL, + httpHeadOnly: false, + pretty: false, + } +} + +func (s *PingService) URL(url string) *PingService { + s.url = url + return s +} + +func (s *PingService) Timeout(timeout string) *PingService { + s.timeout = timeout + return s +} + +// HeadOnly makes the service to only return the status code in Do; +// the PingResult will be nil. +func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService { + s.httpHeadOnly = httpHeadOnly + return s +} + +func (s *PingService) Pretty(pretty bool) *PingService { + s.pretty = pretty + return s +} + +// Do returns the PingResult, the HTTP status code of the Elasticsearch +// server, and an error. +func (s *PingService) Do(ctx context.Context) (*PingResult, int, error) { + s.client.mu.RLock() + basicAuth := s.client.basicAuth + basicAuthUsername := s.client.basicAuthUsername + basicAuthPassword := s.client.basicAuthPassword + s.client.mu.RUnlock() + + url_ := s.url + "/" + + params := make(url.Values) + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.pretty { + params.Set("pretty", "1") + } + if len(params) > 0 { + url_ += "?" + params.Encode() + } + + var method string + if s.httpHeadOnly { + method = "HEAD" + } else { + method = "GET" + } + + // Notice: This service must NOT use PerformRequest! + req, err := NewRequest(method, url_) + if err != nil { + return nil, 0, err + } + + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + + res, err := ctxhttp.Do(ctx, s.client.c, (*http.Request)(req)) + if err != nil { + return nil, 0, err + } + defer res.Body.Close() + + var ret *PingResult + if !s.httpHeadOnly { + ret = new(PingResult) + if err := json.NewDecoder(res.Body).Decode(ret); err != nil { + return nil, res.StatusCode, err + } + } + + return ret, res.StatusCode, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ping_test.go b/vendor/gopkg.in/olivere/elastic.v5/ping_test.go new file mode 100644 index 000000000..1462b3585 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ping_test.go @@ -0,0 +1,66 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/http" + "testing" + + "golang.org/x/net/context" +) + +func TestPingGet(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + res, code, err := client.Ping(DefaultURL).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if code != http.StatusOK { + t.Errorf("expected status code = %d; got %d", http.StatusOK, code) + } + if res == nil { + t.Fatalf("expected to return result, got: %v", res) + } + if res.Name == "" { + t.Errorf("expected Name != \"\"; got %q", res.Name) + } + if res.Version.Number == "" { + t.Errorf("expected Version.Number != \"\"; got %q", res.Version.Number) + } +} + +func TestPingHead(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + res, code, err := client.Ping(DefaultURL).HttpHeadOnly(true).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if code != http.StatusOK { + t.Errorf("expected status code = %d; got %d", http.StatusOK, code) + } + if res != nil { + t.Errorf("expected not to return result, got: %v", res) + } +} + +func TestPingHeadFailure(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + res, code, err := client. + Ping("http://127.0.0.1:9299"). + HttpHeadOnly(true). + Do(context.TODO()) + if err == nil { + t.Error("expected error, got nil") + } + if code == http.StatusOK { + t.Errorf("expected status code != %d; got %d", http.StatusOK, code) + } + if res != nil { + t.Errorf("expected not to return result, got: %v", res) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/plugins.go b/vendor/gopkg.in/olivere/elastic.v5/plugins.go new file mode 100644 index 000000000..a46ac3748 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/plugins.go @@ -0,0 +1,40 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "golang.org/x/net/context" + +// HasPlugin indicates whether the cluster has the named plugin. +func (c *Client) HasPlugin(name string) (bool, error) { + plugins, err := c.Plugins() + if err != nil { + return false, nil + } + for _, plugin := range plugins { + if plugin == name { + return true, nil + } + } + return false, nil +} + +// Plugins returns the list of all registered plugins. +func (c *Client) Plugins() ([]string, error) { + stats, err := c.ClusterStats().Do(context.Background()) + if err != nil { + return nil, err + } + if stats == nil { + return nil, err + } + if stats.Nodes == nil { + return nil, err + } + var plugins []string + for _, plugin := range stats.Nodes.Plugins { + plugins = append(plugins, plugin.Name) + } + return plugins, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/plugins_test.go b/vendor/gopkg.in/olivere/elastic.v5/plugins_test.go new file mode 100644 index 000000000..969f0b0e5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/plugins_test.go @@ -0,0 +1,32 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestClientPlugins(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + _, err = client.Plugins() + if err != nil { + t.Fatal(err) + } +} + +func TestClientHasPlugin(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + found, err := client.HasPlugin("no-such-plugin") + if err != nil { + t.Fatal(err) + } + if found { + t.Fatalf("expected to not find plugin %q", "no-such-plugin") + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/put_template.go b/vendor/gopkg.in/olivere/elastic.v5/put_template.go new file mode 100644 index 000000000..13635a052 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/put_template.go @@ -0,0 +1,146 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// PutTemplateService creates or updates a search template. +// The documentation can be found at +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type PutTemplateService struct { + client *Client + pretty bool + id string + opType string + version *int + versionType string + bodyJson interface{} + bodyString string +} + +// NewPutTemplateService creates a new PutTemplateService. +func NewPutTemplateService(client *Client) *PutTemplateService { + return &PutTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *PutTemplateService) Id(id string) *PutTemplateService { + s.id = id + return s +} + +// OpType is an explicit operation type. +func (s *PutTemplateService) OpType(opType string) *PutTemplateService { + s.opType = opType + return s +} + +// Version is an explicit version number for concurrency control. +func (s *PutTemplateService) Version(version int) *PutTemplateService { + s.version = &version + return s +} + +// VersionType is a specific version type. +func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService { + s.versionType = versionType + return s +} + +// BodyJson is the document as a JSON serializable object. +func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService { + s.bodyJson = body + return s +} + +// BodyString is the document as a string. +func (s *PutTemplateService) BodyString(body string) *PutTemplateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *PutTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%d", *s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.opType != "" { + params.Set("op_type", s.opType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *PutTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *PutTemplateService) Do(ctx context.Context) (*AcknowledgedResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(AcknowledgedResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/put_template_test.go b/vendor/gopkg.in/olivere/elastic.v5/put_template_test.go new file mode 100644 index 000000000..ea7971958 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/put_template_test.go @@ -0,0 +1,54 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestSearchTemplatesLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Template + tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}` + + // Create template + cresp, err := client.PutTemplate().Id("elastic-test").BodyString(tmpl).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if cresp == nil { + t.Fatalf("expected response != nil; got: %v", cresp) + } + if !cresp.Acknowledged { + t.Errorf("expected acknowledged = %v; got: %v", true, cresp.Acknowledged) + } + + // Get template + resp, err := client.GetTemplate().Id("elastic-test").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("expected response != nil; got: %v", resp) + } + if resp.Template == "" { + t.Errorf("expected template != %q; got: %q", "", resp.Template) + } + + // Delete template + dresp, err := client.DeleteTemplate().Id("elastic-test").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if dresp == nil { + t.Fatalf("expected response != nil; got: %v", dresp) + } + if !dresp.Acknowledged { + t.Fatalf("expected acknowledged = %v; got: %v", true, dresp.Acknowledged) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/query.go b/vendor/gopkg.in/olivere/elastic.v5/query.go new file mode 100644 index 000000000..ad01354a0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/query.go @@ -0,0 +1,13 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Query represents the generic query interface. A query's sole purpose +// is to return the source of the query as a JSON-serializable object. +// Returning map[string]interface{} is the norm for queries. +type Query interface { + // Source returns the JSON-serializable query request. + Source() (interface{}, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/reindex.go b/vendor/gopkg.in/olivere/elastic.v5/reindex.go new file mode 100644 index 000000000..4fce544e7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/reindex.go @@ -0,0 +1,553 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" +) + +// ReindexService is a method to copy documents from one index to another. +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-reindex.html. +type ReindexService struct { + client *Client + pretty bool + refresh string + timeout string + waitForActiveShards string + waitForCompletion *bool + requestsPerSecond *int + body interface{} + source *ReindexSource + destination *ReindexDestination + conflicts string + size *int + script *Script +} + +// NewReindexService creates a new ReindexService. +func NewReindexService(client *Client) *ReindexService { + return &ReindexService{ + client: client, + } +} + +// WaitForActiveShards sets the number of shard copies that must be active before +// proceeding with the reindex operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than or +// equal to the total number of copies for the shard (number of replicas + 1). +func (s *ReindexService) WaitForActiveShards(waitForActiveShards string) *ReindexService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// RequestsPerSecond specifies the throttle to set on this request in sub-requests per second. +// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. +func (s *ReindexService) RequestsPerSecond(requestsPerSecond int) *ReindexService { + s.requestsPerSecond = &requestsPerSecond + return s +} + +// Refresh indicates whether Elasticsearch should refresh the effected indexes +// immediately. +func (s *ReindexService) Refresh(refresh string) *ReindexService { + s.refresh = refresh + return s +} + +// Timeout is the time each individual bulk request should wait for shards +// that are unavailable. +func (s *ReindexService) Timeout(timeout string) *ReindexService { + s.timeout = timeout + return s +} + +// WaitForCompletion indicates whether Elasticsearch should block until the +// reindex is complete. +func (s *ReindexService) WaitForCompletion(waitForCompletion bool) *ReindexService { + s.waitForCompletion = &waitForCompletion + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ReindexService) Pretty(pretty bool) *ReindexService { + s.pretty = pretty + return s +} + +// Source specifies the source of the reindexing process. +func (s *ReindexService) Source(source *ReindexSource) *ReindexService { + s.source = source + return s +} + +// SourceIndex specifies the source index of the reindexing process. +func (s *ReindexService) SourceIndex(index string) *ReindexService { + if s.source == nil { + s.source = NewReindexSource() + } + s.source = s.source.Index(index) + return s +} + +// Destination specifies the destination of the reindexing process. +func (s *ReindexService) Destination(destination *ReindexDestination) *ReindexService { + s.destination = destination + return s +} + +// DestinationIndex specifies the destination index of the reindexing process. +func (s *ReindexService) DestinationIndex(index string) *ReindexService { + if s.destination == nil { + s.destination = NewReindexDestination() + } + s.destination = s.destination.Index(index) + return s +} + +// DestinationIndexAndType specifies both the destination index and type +// of the reindexing process. +func (s *ReindexService) DestinationIndexAndType(index, typ string) *ReindexService { + if s.destination == nil { + s.destination = NewReindexDestination() + } + s.destination = s.destination.Index(index) + s.destination = s.destination.Type(typ) + return s +} + +// Conflicts indicates what to do when the process detects version conflicts. +// Possible values are "proceed" and "abort". +func (s *ReindexService) Conflicts(conflicts string) *ReindexService { + s.conflicts = conflicts + return s +} + +// AbortOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("abort"). +func (s *ReindexService) AbortOnVersionConflict() *ReindexService { + s.conflicts = "abort" + return s +} + +// ProceedOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("proceed"). +func (s *ReindexService) ProceedOnVersionConflict() *ReindexService { + s.conflicts = "proceed" + return s +} + +// Size sets an upper limit for the number of processed documents. +func (s *ReindexService) Size(size int) *ReindexService { + s.size = &size + return s +} + +// Script allows for modification of the documents as they are reindexed +// from source to destination. +func (s *ReindexService) Script(script *Script) *ReindexService { + s.script = script + return s +} + +// Body specifies the body of the request to send to Elasticsearch. +// It overrides settings specified with other setters, e.g. Query. +func (s *ReindexService) Body(body interface{}) *ReindexService { + s.body = body + return s +} + +// buildURL builds the URL for the operation. +func (s *ReindexService) buildURL() (string, url.Values, error) { + // Build URL path + path := "/_reindex" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.requestsPerSecond != nil { + params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ReindexService) Validate() error { + var invalid []string + if s.body != nil { + return nil + } + if s.source == nil { + invalid = append(invalid, "Source") + } else { + if len(s.source.indices) == 0 { + invalid = append(invalid, "Source.Index") + } + } + if s.destination == nil { + invalid = append(invalid, "Destination") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// getBody returns the body part of the document request. +func (s *ReindexService) getBody() (interface{}, error) { + if s.body != nil { + return s.body, nil + } + + body := make(map[string]interface{}) + + if s.conflicts != "" { + body["conflicts"] = s.conflicts + } + if s.size != nil { + body["size"] = *s.size + } + if s.script != nil { + out, err := s.script.Source() + if err != nil { + return nil, err + } + body["script"] = out + } + + src, err := s.source.Source() + if err != nil { + return nil, err + } + body["source"] = src + + dst, err := s.destination.Source() + if err != nil { + return nil, err + } + body["dest"] = dst + + return body, nil +} + +// Do executes the operation. +func (s *ReindexService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body, err := s.getBody() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(BulkIndexByScrollResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Source of Reindex -- + +// ReindexSource specifies the source of a Reindex process. +type ReindexSource struct { + searchType string // default in ES is "query_then_fetch" + indices []string + types []string + routing *string + preference *string + requestCache *bool + scroll string + query Query + sorts []SortInfo + sorters []Sorter + searchSource *SearchSource +} + +// NewReindexSource creates a new ReindexSource. +func NewReindexSource() *ReindexSource { + return &ReindexSource{} +} + +// SearchType is the search operation type. Possible values are +// "query_then_fetch" and "dfs_query_then_fetch". +func (r *ReindexSource) SearchType(searchType string) *ReindexSource { + r.searchType = searchType + return r +} + +func (r *ReindexSource) SearchTypeDfsQueryThenFetch() *ReindexSource { + return r.SearchType("dfs_query_then_fetch") +} + +func (r *ReindexSource) SearchTypeQueryThenFetch() *ReindexSource { + return r.SearchType("query_then_fetch") +} + +func (r *ReindexSource) Index(indices ...string) *ReindexSource { + r.indices = append(r.indices, indices...) + return r +} + +func (r *ReindexSource) Type(types ...string) *ReindexSource { + r.types = append(r.types, types...) + return r +} + +func (r *ReindexSource) Preference(preference string) *ReindexSource { + r.preference = &preference + return r +} + +func (r *ReindexSource) RequestCache(requestCache bool) *ReindexSource { + r.requestCache = &requestCache + return r +} + +func (r *ReindexSource) Scroll(scroll string) *ReindexSource { + r.scroll = scroll + return r +} + +func (r *ReindexSource) Query(query Query) *ReindexSource { + r.query = query + return r +} + +// Sort adds a sort order. +func (s *ReindexSource) Sort(field string, ascending bool) *ReindexSource { + s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending}) + return s +} + +// SortWithInfo adds a sort order. +func (s *ReindexSource) SortWithInfo(info SortInfo) *ReindexSource { + s.sorts = append(s.sorts, info) + return s +} + +// SortBy adds a sort order. +func (s *ReindexSource) SortBy(sorter ...Sorter) *ReindexSource { + s.sorters = append(s.sorters, sorter...) + return s +} + +// Source returns a serializable JSON request for the request. +func (r *ReindexSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if r.query != nil { + src, err := r.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } else if r.searchSource != nil { + src, err := r.searchSource.Source() + if err != nil { + return nil, err + } + source["source"] = src + } + + if r.searchType != "" { + source["search_type"] = r.searchType + } + + switch len(r.indices) { + case 0: + case 1: + source["index"] = r.indices[0] + default: + source["index"] = r.indices + } + + switch len(r.types) { + case 0: + case 1: + source["type"] = r.types[0] + default: + source["type"] = r.types + } + + if r.preference != nil && *r.preference != "" { + source["preference"] = *r.preference + } + + if r.requestCache != nil { + source["request_cache"] = fmt.Sprintf("%v", *r.requestCache) + } + + if r.scroll != "" { + source["scroll"] = r.scroll + } + + if len(r.sorters) > 0 { + var sortarr []interface{} + for _, sorter := range r.sorters { + src, err := sorter.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } else if len(r.sorts) > 0 { + var sortarr []interface{} + for _, sort := range r.sorts { + src, err := sort.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } + + return source, nil +} + +// -source Destination of Reindex -- + +// ReindexDestination is the destination of a Reindex API call. +// It is basically the meta data of a BulkIndexRequest. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.3/docs-reindex.html +// fsourcer details. +type ReindexDestination struct { + index string + typ string + routing string + parent string + opType string + version int64 // default is MATCH_ANY + versionType string // default is "internal" +} + +// NewReindexDestination returns a new ReindexDestination. +func NewReindexDestination() *ReindexDestination { + return &ReindexDestination{} +} + +// Index specifies name of the Elasticsearch index to use as the destination +// of a reindexing process. +func (r *ReindexDestination) Index(index string) *ReindexDestination { + r.index = index + return r +} + +// Type specifies the Elasticsearch type to use for reindexing. +func (r *ReindexDestination) Type(typ string) *ReindexDestination { + r.typ = typ + return r +} + +// Routing specifies a routing value for the reindexing request. +// It can be "keep", "discard", or start with "=". The latter specifies +// the routing on the bulk request. +func (r *ReindexDestination) Routing(routing string) *ReindexDestination { + r.routing = routing + return r +} + +// Keep sets the routing on the bulk request sent for each match to the routing +// of the match (the default). +func (r *ReindexDestination) Keep() *ReindexDestination { + r.routing = "keep" + return r +} + +// Discard sets the routing on the bulk request sent for each match to null. +func (r *ReindexDestination) Discard() *ReindexDestination { + r.routing = "discard" + return r +} + +// Parent specifies the identifier of the parent document (if available). +func (r *ReindexDestination) Parent(parent string) *ReindexDestination { + r.parent = parent + return r +} + +// OpType specifies if this request should follow create-only or upsert +// behavior. This follows the OpType of the standard document index API. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#operation-type +// for details. +func (r *ReindexDestination) OpType(opType string) *ReindexDestination { + r.opType = opType + return r +} + +// Version indicates the version of the document as part of an optimistic +// concurrency model. +func (r *ReindexDestination) Version(version int64) *ReindexDestination { + r.version = version + return r +} + +// VersionType specifies how versions are created. +func (r *ReindexDestination) VersionType(versionType string) *ReindexDestination { + r.versionType = versionType + return r +} + +// Source returns a serializable JSON request for the request. +func (r *ReindexDestination) Source() (interface{}, error) { + source := make(map[string]interface{}) + if r.index != "" { + source["index"] = r.index + } + if r.typ != "" { + source["type"] = r.typ + } + if r.routing != "" { + source["routing"] = r.routing + } + if r.opType != "" { + source["op_type"] = r.opType + } + if r.parent != "" { + source["parent"] = r.parent + } + if r.version > 0 { + source["version"] = r.version + } + if r.versionType != "" { + source["version_type"] = r.versionType + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/reindex_test.go b/vendor/gopkg.in/olivere/elastic.v5/reindex_test.go new file mode 100644 index 000000000..5e7b8fe40 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/reindex_test.go @@ -0,0 +1,292 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestReindexSourceWithBodyMap(t *testing.T) { + client := setupTestClient(t) + out, err := client.Reindex().Body(map[string]interface{}{ + "source": map[string]interface{}{ + "index": "twitter", + }, + "dest": map[string]interface{}{ + "index": "new_twitter", + }, + }).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithBodyString(t *testing.T) { + client := setupTestClient(t) + got, err := client.Reindex().Body(`{"source":{"index":"twitter"},"dest":{"index":"new_twitter"}}`).getBody() + if err != nil { + t.Fatal(err) + } + want := `{"source":{"index":"twitter"},"dest":{"index":"new_twitter"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithSourceIndexAndDestinationIndex(t *testing.T) { + client := setupTestClient(t) + out, err := client.Reindex().SourceIndex("twitter").DestinationIndex("new_twitter").getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithSourceAndDestinationAndVersionType(t *testing.T) { + client := setupTestClient(t) + src := NewReindexSource().Index("twitter") + dst := NewReindexDestination().Index("new_twitter").VersionType("external") + out, err := client.Reindex().Source(src).Destination(dst).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"dest":{"index":"new_twitter","version_type":"external"},"source":{"index":"twitter"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithSourceAndDestinationAndOpType(t *testing.T) { + client := setupTestClient(t) + src := NewReindexSource().Index("twitter") + dst := NewReindexDestination().Index("new_twitter").OpType("create") + out, err := client.Reindex().Source(src).Destination(dst).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"dest":{"index":"new_twitter","op_type":"create"},"source":{"index":"twitter"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithConflictsProceed(t *testing.T) { + client := setupTestClient(t) + src := NewReindexSource().Index("twitter") + dst := NewReindexDestination().Index("new_twitter").OpType("create") + out, err := client.Reindex().Conflicts("proceed").Source(src).Destination(dst).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"conflicts":"proceed","dest":{"index":"new_twitter","op_type":"create"},"source":{"index":"twitter"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithProceedOnVersionConflict(t *testing.T) { + client := setupTestClient(t) + src := NewReindexSource().Index("twitter") + dst := NewReindexDestination().Index("new_twitter").OpType("create") + out, err := client.Reindex().ProceedOnVersionConflict().Source(src).Destination(dst).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"conflicts":"proceed","dest":{"index":"new_twitter","op_type":"create"},"source":{"index":"twitter"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithQuery(t *testing.T) { + client := setupTestClient(t) + src := NewReindexSource().Index("twitter").Type("tweet").Query(NewTermQuery("user", "olivere")) + dst := NewReindexDestination().Index("new_twitter") + out, err := client.Reindex().Source(src).Destination(dst).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter","query":{"term":{"user":"olivere"}},"type":"tweet"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithMultipleSourceIndicesAndTypes(t *testing.T) { + client := setupTestClient(t) + src := NewReindexSource().Index("twitter", "blog").Type("tweet", "post") + dst := NewReindexDestination().Index("all_together") + out, err := client.Reindex().Source(src).Destination(dst).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"dest":{"index":"all_together"},"source":{"index":["twitter","blog"],"type":["tweet","post"]}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithSourceAndSize(t *testing.T) { + client := setupTestClient(t) + src := NewReindexSource().Index("twitter").Sort("date", false) + dst := NewReindexDestination().Index("new_twitter") + out, err := client.Reindex().Size(10000).Source(src).Destination(dst).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"dest":{"index":"new_twitter"},"size":10000,"source":{"index":"twitter","sort":[{"date":{"order":"desc"}}]}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithScript(t *testing.T) { + client := setupTestClient(t) + src := NewReindexSource().Index("twitter") + dst := NewReindexDestination().Index("new_twitter").VersionType("external") + scr := NewScriptInline("if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}") + out, err := client.Reindex().Source(src).Destination(dst).Script(scr).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"dest":{"index":"new_twitter","version_type":"external"},"script":{"inline":"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"},"source":{"index":"twitter"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindexSourceWithRouting(t *testing.T) { + client := setupTestClient(t) + src := NewReindexSource().Index("source").Query(NewMatchQuery("company", "cat")) + dst := NewReindexDestination().Index("dest").Routing("=cat") + out, err := client.Reindex().Source(src).Destination(dst).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"dest":{"index":"dest","routing":"=cat"},"source":{"index":"source","query":{"match":{"company":{"query":"cat"}}}}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestReindex(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0))) + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "2.3.0" { + t.Skipf("Elasticsearch %v does not support Reindex API yet", esversion) + } + + sourceCount, err := client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + targetCount, err := client.Count(testIndexName2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if targetCount != 0 { + t.Fatalf("expected %d documents; got: %d", 0, targetCount) + } + + // Simple copying + src := NewReindexSource().Index(testIndexName) + dst := NewReindexDestination().Index(testIndexName2) + res, err := client.Reindex().Source(src).Destination(dst).Refresh("true").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected result != nil") + } + if res.Total != sourceCount { + t.Errorf("expected %d, got %d", sourceCount, res.Total) + } + if res.Updated != 0 { + t.Errorf("expected %d, got %d", 0, res.Updated) + } + if res.Created != sourceCount { + t.Errorf("expected %d, got %d", sourceCount, res.Created) + } + + targetCount, err = client.Count(testIndexName2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if targetCount != sourceCount { + t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/request.go b/vendor/gopkg.in/olivere/elastic.v5/request.go new file mode 100644 index 000000000..e0e71b5e3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/request.go @@ -0,0 +1,123 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "runtime" + "strings" +) + +// Elasticsearch-specific HTTP request +type Request http.Request + +// NewRequest is a http.Request and adds features such as encoding the body. +func NewRequest(method, url string) (*Request, error) { + req, err := http.NewRequest(method, url, nil) + if err != nil { + return nil, err + } + req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")") + req.Header.Add("Accept", "application/json") + return (*Request)(req), nil +} + +// SetBasicAuth wraps http.Request's SetBasicAuth. +func (r *Request) SetBasicAuth(username, password string) { + ((*http.Request)(r)).SetBasicAuth(username, password) +} + +// SetBody encodes the body in the request. Optionally, it performs GZIP compression. +func (r *Request) SetBody(body interface{}, gzipCompress bool) error { + switch b := body.(type) { + case string: + if gzipCompress { + return r.setBodyGzip(b) + } else { + return r.setBodyString(b) + } + default: + if gzipCompress { + return r.setBodyGzip(body) + } else { + return r.setBodyJson(body) + } + } +} + +// setBodyJson encodes the body as a struct to be marshaled via json.Marshal. +func (r *Request) setBodyJson(data interface{}) error { + body, err := json.Marshal(data) + if err != nil { + return err + } + r.Header.Set("Content-Type", "application/json") + r.setBodyReader(bytes.NewReader(body)) + return nil +} + +// setBodyString encodes the body as a string. +func (r *Request) setBodyString(body string) error { + return r.setBodyReader(strings.NewReader(body)) +} + +// setBodyGzip gzip's the body. It accepts both strings and structs as body. +// The latter will be encoded via json.Marshal. +func (r *Request) setBodyGzip(body interface{}) error { + switch b := body.(type) { + case string: + buf := new(bytes.Buffer) + w := gzip.NewWriter(buf) + if _, err := w.Write([]byte(b)); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + r.Header.Add("Content-Encoding", "gzip") + r.Header.Add("Vary", "Accept-Encoding") + return r.setBodyReader(bytes.NewReader(buf.Bytes())) + default: + data, err := json.Marshal(b) + if err != nil { + return err + } + buf := new(bytes.Buffer) + w := gzip.NewWriter(buf) + if _, err := w.Write(data); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + r.Header.Add("Content-Encoding", "gzip") + r.Header.Add("Vary", "Accept-Encoding") + r.Header.Set("Content-Type", "application/json") + return r.setBodyReader(bytes.NewReader(buf.Bytes())) + } +} + +// setBodyReader writes the body from an io.Reader. +func (r *Request) setBodyReader(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + r.Body = rc + if body != nil { + switch v := body.(type) { + case *strings.Reader: + r.ContentLength = int64(v.Len()) + case *bytes.Buffer: + r.ContentLength = int64(v.Len()) + } + } + return nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/request_test.go b/vendor/gopkg.in/olivere/elastic.v5/request_test.go new file mode 100644 index 000000000..2a2d229df --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/request_test.go @@ -0,0 +1,107 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +var testReq *Request // used as a temporary variable to avoid compiler optimizations in tests/benchmarks + +func BenchmarkRequestSetBodyString(b *testing.B) { + req, err := NewRequest("GET", "/") + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + body := `{"query":{"match_all":{}}}` + err = req.SetBody(body, false) + if err != nil { + b.Fatal(err) + } + } + testReq = req +} + +func BenchmarkRequestSetBodyStringGzip(b *testing.B) { + req, err := NewRequest("GET", "/") + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + body := `{"query":{"match_all":{}}}` + err = req.SetBody(body, true) + if err != nil { + b.Fatal(err) + } + } + testReq = req +} + +func BenchmarkRequestSetBodyBytes(b *testing.B) { + req, err := NewRequest("GET", "/") + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + body := []byte(`{"query":{"match_all":{}}}`) + err = req.SetBody(body, false) + if err != nil { + b.Fatal(err) + } + } + testReq = req +} + +func BenchmarkRequestSetBodyBytesGzip(b *testing.B) { + req, err := NewRequest("GET", "/") + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + body := []byte(`{"query":{"match_all":{}}}`) + err = req.SetBody(body, true) + if err != nil { + b.Fatal(err) + } + } + testReq = req +} + +func BenchmarkRequestSetBodyMap(b *testing.B) { + req, err := NewRequest("GET", "/") + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + body := map[string]interface{}{ + "query": map[string]interface{}{ + "match_all": map[string]interface{}{}, + }, + } + err = req.SetBody(body, false) + if err != nil { + b.Fatal(err) + } + } + testReq = req +} + +func BenchmarkRequestSetBodyMapGzip(b *testing.B) { + req, err := NewRequest("GET", "/") + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + body := map[string]interface{}{ + "query": map[string]interface{}{ + "match_all": map[string]interface{}{}, + }, + } + err = req.SetBody(body, true) + if err != nil { + b.Fatal(err) + } + } + testReq = req +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/rescore.go b/vendor/gopkg.in/olivere/elastic.v5/rescore.go new file mode 100644 index 000000000..9b7eaee1d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/rescore.go @@ -0,0 +1,44 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +type Rescore struct { + rescorer Rescorer + windowSize *int + defaultRescoreWindowSize *int +} + +func NewRescore() *Rescore { + return &Rescore{} +} + +func (r *Rescore) WindowSize(windowSize int) *Rescore { + r.windowSize = &windowSize + return r +} + +func (r *Rescore) IsEmpty() bool { + return r.rescorer == nil +} + +func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore { + r.rescorer = rescorer + return r +} + +func (r *Rescore) Source() (interface{}, error) { + source := make(map[string]interface{}) + if r.windowSize != nil { + source["window_size"] = *r.windowSize + } else if r.defaultRescoreWindowSize != nil { + source["window_size"] = *r.defaultRescoreWindowSize + } + rescorerSrc, err := r.rescorer.Source() + if err != nil { + return nil, err + } + source[r.rescorer.Name()] = rescorerSrc + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/rescorer.go b/vendor/gopkg.in/olivere/elastic.v5/rescorer.go new file mode 100644 index 000000000..ccd4bb854 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/rescorer.go @@ -0,0 +1,64 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +type Rescorer interface { + Name() string + Source() (interface{}, error) +} + +// -- Query Rescorer -- + +type QueryRescorer struct { + query Query + rescoreQueryWeight *float64 + queryWeight *float64 + scoreMode string +} + +func NewQueryRescorer(query Query) *QueryRescorer { + return &QueryRescorer{ + query: query, + } +} + +func (r *QueryRescorer) Name() string { + return "query" +} + +func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer { + r.rescoreQueryWeight = &rescoreQueryWeight + return r +} + +func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer { + r.queryWeight = &queryWeight + return r +} + +func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer { + r.scoreMode = scoreMode + return r +} + +func (r *QueryRescorer) Source() (interface{}, error) { + rescoreQuery, err := r.query.Source() + if err != nil { + return nil, err + } + + source := make(map[string]interface{}) + source["rescore_query"] = rescoreQuery + if r.queryWeight != nil { + source["query_weight"] = *r.queryWeight + } + if r.rescoreQueryWeight != nil { + source["rescore_query_weight"] = *r.rescoreQueryWeight + } + if r.scoreMode != "" { + source["score_mode"] = r.scoreMode + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/response.go b/vendor/gopkg.in/olivere/elastic.v5/response.go new file mode 100644 index 000000000..e7380d98a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/response.go @@ -0,0 +1,43 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "io/ioutil" + "net/http" +) + +// Response represents a response from Elasticsearch. +type Response struct { + // StatusCode is the HTTP status code, e.g. 200. + StatusCode int + // Header is the HTTP header from the HTTP response. + // Keys in the map are canonicalized (see http.CanonicalHeaderKey). + Header http.Header + // Body is the deserialized response body. + Body json.RawMessage +} + +// newResponse creates a new response from the HTTP response. +func (c *Client) newResponse(res *http.Response) (*Response, error) { + r := &Response{ + StatusCode: res.StatusCode, + Header: res.Header, + } + if res.Body != nil { + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + // HEAD requests return a body but no content + if len(slurp) > 0 { + if err := c.decoder.Decode(slurp, &r.Body); err != nil { + return nil, err + } + } + } + return r, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-beta1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-beta1.sh new file mode 100755 index 000000000..08c67ea17 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-beta1.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.0-beta1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-rc1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-rc1.sh new file mode 100755 index 000000000..d4586acca --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-rc1.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.0-rc1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh new file mode 100755 index 000000000..e7a98c8fc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.0 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh new file mode 100755 index 000000000..528670211 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/script.go b/vendor/gopkg.in/olivere/elastic.v5/script.go new file mode 100644 index 000000000..57b4d74fd --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/script.go @@ -0,0 +1,131 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// Script holds all the paramaters necessary to compile or find in cache +// and then execute a script. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// for details of scripting. +type Script struct { + script string + typ string + lang string + params map[string]interface{} +} + +// NewScript creates and initializes a new Script. +func NewScript(script string) *Script { + return &Script{ + script: script, + typ: "", // default type is "inline" + params: make(map[string]interface{}), + } +} + +// NewScriptInline creates and initializes a new Script of type "inline". +func NewScriptInline(script string) *Script { + return NewScript(script).Type("inline") +} + +// NewScriptId creates and initializes a new Script of type "id". +func NewScriptId(script string) *Script { + return NewScript(script).Type("id") +} + +// NewScriptFile creates and initializes a new Script of type "file". +func NewScriptFile(script string) *Script { + return NewScript(script).Type("file") +} + +// Script is either the cache key of the script to be compiled/executed +// or the actual script source code for inline scripts. For indexed +// scripts this is the id used in the request. For file scripts this is +// the file name. +func (s *Script) Script(script string) *Script { + s.script = script + return s +} + +// Type sets the type of script: "inline", "id", or "file". +func (s *Script) Type(typ string) *Script { + s.typ = typ + return s +} + +// Lang sets the language of the script. Permitted values are "groovy", +// "expression", "mustache", "mvel" (default), "javascript", "python". +// To use certain languages, you need to configure your server and/or +// add plugins. See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// for details. +func (s *Script) Lang(lang string) *Script { + s.lang = lang + return s +} + +// Param adds a key/value pair to the parameters that this script will be executed with. +func (s *Script) Param(name string, value interface{}) *Script { + if s.params == nil { + s.params = make(map[string]interface{}) + } + s.params[name] = value + return s +} + +// Params sets the map of parameters this script will be executed with. +func (s *Script) Params(params map[string]interface{}) *Script { + s.params = params + return s +} + +// Source returns the JSON serializable data for this Script. +func (s *Script) Source() (interface{}, error) { + if s.typ == "" && s.lang == "" && len(s.params) == 0 { + return s.script, nil + } + source := make(map[string]interface{}) + if s.typ == "" { + source["inline"] = s.script + } else { + source[s.typ] = s.script + } + if s.lang != "" { + source["lang"] = s.lang + } + if len(s.params) > 0 { + source["params"] = s.params + } + return source, nil +} + +// -- Script Field -- + +// ScriptField is a single script field. +type ScriptField struct { + FieldName string // name of the field + + script *Script +} + +// NewScriptField creates and initializes a new ScriptField. +func NewScriptField(fieldName string, script *Script) *ScriptField { + return &ScriptField{FieldName: fieldName, script: script} +} + +// Source returns the serializable JSON for the ScriptField. +func (f *ScriptField) Source() (interface{}, error) { + if f.script == nil { + return nil, errors.New("ScriptField expects script") + } + source := make(map[string]interface{}) + src, err := f.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/script_test.go b/vendor/gopkg.in/olivere/elastic.v5/script_test.go new file mode 100644 index 000000000..355e13a06 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/script_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestScriptingDefault(t *testing.T) { + builder := NewScript("doc['field'].value * 2") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `"doc['field'].value * 2"` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptingInline(t *testing.T) { + builder := NewScriptInline("doc['field'].value * factor").Param("factor", 2.0) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"inline":"doc['field'].value * factor","params":{"factor":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptingId(t *testing.T) { + builder := NewScriptId("script-with-id").Param("factor", 2.0) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"id":"script-with-id","params":{"factor":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptingFile(t *testing.T) { + builder := NewScriptFile("script-file").Param("factor", 2.0).Lang("groovy") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"file":"script-file","lang":"groovy","params":{"factor":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/scroll.go b/vendor/gopkg.in/olivere/elastic.v5/scroll.go new file mode 100644 index 000000000..feabb5cda --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/scroll.go @@ -0,0 +1,447 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "io" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +const ( + // DefaultScrollKeepAlive is the default time a scroll cursor will be kept alive. + DefaultScrollKeepAlive = "5m" +) + +// ScrollService iterates over pages of search results from Elasticsearch. +type ScrollService struct { + client *Client + indices []string + types []string + keepAlive string + body interface{} + ss *SearchSource + size *int + pretty bool + routing string + preference string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + + mu sync.RWMutex + scrollId string +} + +// NewScrollService initializes and returns a new ScrollService. +func NewScrollService(client *Client) *ScrollService { + builder := &ScrollService{ + client: client, + ss: NewSearchSource(), + keepAlive: DefaultScrollKeepAlive, + } + return builder +} + +// Index sets the name of one or more indices to iterate over. +func (s *ScrollService) Index(indices ...string) *ScrollService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +// Type sets the name of one or more types to iterate over. +func (s *ScrollService) Type(types ...string) *ScrollService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Scroll is an alias for KeepAlive, the time to keep +// the cursor alive (e.g. "5m" for 5 minutes). +func (s *ScrollService) Scroll(keepAlive string) *ScrollService { + s.keepAlive = keepAlive + return s +} + +// KeepAlive sets the maximum time after which the cursor will expire. +// It is "2m" by default. +func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService { + s.keepAlive = keepAlive + return s +} + +// Size specifies the number of documents Elasticsearch should return +// from each shard, per page. +func (s *ScrollService) Size(size int) *ScrollService { + s.size = &size + return s +} + +// Body sets the raw body to send to Elasticsearch. This can be e.g. a string, +// a map[string]interface{} or anything that can be serialized into JSON. +// Notice that setting the body disables the use of SearchSource and many +// other properties of the ScanService. +func (s *ScrollService) Body(body interface{}) *ScrollService { + s.body = body + return s +} + +// SearchSource sets the search source builder to use with this iterator. +// Notice that only a certain number of properties can be used when scrolling, +// e.g. query and sorting. +func (s *ScrollService) SearchSource(searchSource *SearchSource) *ScrollService { + s.ss = searchSource + if s.ss == nil { + s.ss = NewSearchSource() + } + return s +} + +// Query sets the query to perform, e.g. a MatchAllQuery. +func (s *ScrollService) Query(query Query) *ScrollService { + s.ss = s.ss.Query(query) + return s +} + +// PostFilter is executed as the last filter. It only affects the +// search hits but not facets. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html +// for details. +func (s *ScrollService) PostFilter(postFilter Query) *ScrollService { + s.ss = s.ss.PostFilter(postFilter) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *ScrollService) FetchSource(fetchSource bool) *ScrollService { + s.ss = s.ss.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *ScrollService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScrollService { + s.ss = s.ss.FetchSourceContext(fetchSourceContext) + return s +} + +// Version can be set to true to return a version for each search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html. +func (s *ScrollService) Version(version bool) *ScrollService { + s.ss = s.ss.Version(version) + return s +} + +// Sort adds a sort order. This can have negative effects on the performance +// of the scroll operation as Elasticsearch needs to sort first. +func (s *ScrollService) Sort(field string, ascending bool) *ScrollService { + s.ss = s.ss.Sort(field, ascending) + return s +} + +// SortWithInfo specifies a sort order. Notice that sorting can have a +// negative impact on scroll performance. +func (s *ScrollService) SortWithInfo(info SortInfo) *ScrollService { + s.ss = s.ss.SortWithInfo(info) + return s +} + +// SortBy specifies a sort order. Notice that sorting can have a +// negative impact on scroll performance. +func (s *ScrollService) SortBy(sorter ...Sorter) *ScrollService { + s.ss = s.ss.SortBy(sorter...) + return s +} + +// Pretty asks Elasticsearch to pretty-print the returned JSON. +func (s *ScrollService) Pretty(pretty bool) *ScrollService { + s.pretty = pretty + return s +} + +// Routing is a list of specific routing values to control the shards +// the search will be executed on. +func (s *ScrollService) Routing(routings ...string) *ScrollService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference sets the preference to execute the search. Defaults to +// randomize across shards ("random"). Can be set to "_local" to prefer +// local shards, "_primary" to execute on primary shards only, +// or a custom value which guarantees that the same order will be used +// across different requests. +func (s *ScrollService) Preference(preference string) *ScrollService { + s.preference = preference + return s +} + +// IgnoreUnavailable indicates whether the specified concrete indices +// should be ignored when unavailable (missing or closed). +func (s *ScrollService) IgnoreUnavailable(ignoreUnavailable bool) *ScrollService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string +// or when no indices have been specified). +func (s *ScrollService) AllowNoIndices(allowNoIndices bool) *ScrollService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *ScrollService) ExpandWildcards(expandWildcards string) *ScrollService { + s.expandWildcards = expandWildcards + return s +} + +// ScrollId specifies the identifier of a scroll in action. +func (s *ScrollService) ScrollId(scrollId string) *ScrollService { + s.mu.Lock() + s.scrollId = scrollId + s.mu.Unlock() + return s +} + +// Do returns the next search result. It will return io.EOF as error if there +// are no more search results. +func (s *ScrollService) Do(ctx context.Context) (*SearchResult, error) { + s.mu.RLock() + nextScrollId := s.scrollId + s.mu.RUnlock() + if len(nextScrollId) == 0 { + return s.first(ctx) + } + return s.next(ctx) +} + +// Clear cancels the current scroll operation. If you don't do this manually, +// the scroll will be expired automatically by Elasticsearch. You can control +// how long a scroll cursor is kept alive with the KeepAlive func. +func (s *ScrollService) Clear(ctx context.Context) error { + s.mu.RLock() + scrollId := s.scrollId + s.mu.RUnlock() + if len(scrollId) == 0 { + return nil + } + + path := "/_search/scroll" + params := url.Values{} + body := struct { + ScrollId []string `json:"scroll_id,omitempty"` + }{ + ScrollId: []string{scrollId}, + } + + _, err := s.client.PerformRequest(ctx, "DELETE", path, params, body) + if err != nil { + return err + } + + return nil +} + +// -- First -- + +// first takes the first page of search results. +func (s *ScrollService) first(ctx context.Context) (*SearchResult, error) { + // Get URL and parameters for request + path, params, err := s.buildFirstURL() + if err != nil { + return nil, err + } + + // Get HTTP request body + body, err := s.bodyFirst() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(SearchResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + s.mu.Lock() + s.scrollId = ret.ScrollId + s.mu.Unlock() + if ret.Hits == nil || len(ret.Hits.Hits) == 0 { + return nil, io.EOF + } + return ret, nil +} + +// buildFirstURL builds the URL for retrieving the first page. +func (s *ScrollService) buildFirstURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.indices) == 0 && len(s.types) == 0 { + path = "/_search" + } else if len(s.indices) > 0 && len(s.types) == 0 { + path, err = uritemplates.Expand("/{index}/_search", map[string]string{ + "index": strings.Join(s.indices, ","), + }) + } else if len(s.indices) == 0 && len(s.types) > 0 { + path, err = uritemplates.Expand("/_all/{typ}/_search", map[string]string{ + "typ": strings.Join(s.types, ","), + }) + } else { + path, err = uritemplates.Expand("/{index}/{typ}/_search", map[string]string{ + "index": strings.Join(s.indices, ","), + "typ": strings.Join(s.types, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.size != nil && *s.size > 0 { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if len(s.keepAlive) > 0 { + params.Set("scroll", s.keepAlive) + } + if len(s.routing) > 0 { + params.Set("routing", s.routing) + } + if len(s.preference) > 0 { + params.Set("preference", s.preference) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if len(s.expandWildcards) > 0 { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + + return path, params, nil +} + +// bodyFirst returns the request to fetch the first batch of results. +func (s *ScrollService) bodyFirst() (interface{}, error) { + var err error + var body interface{} + + if s.body != nil { + body = s.body + } else { + // Use _doc sort by default if none is specified + if !s.ss.hasSort() { + // Use efficient sorting when no user-defined query/body is specified + s.ss = s.ss.SortBy(SortByDoc{}) + } + + // Body from search source + body, err = s.ss.Source() + if err != nil { + return nil, err + } + + // Slicing (in ES 5.x+) + /* + if s.slice != nil { + src, err := s.slice.Source() + if err != nil { + return nil, err + } + body["slice"] = src + } + */ + } + + return body, nil +} + +// -- Next -- + +func (s *ScrollService) next(ctx context.Context) (*SearchResult, error) { + // Get URL for request + path, params, err := s.buildNextURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body, err := s.bodyNext() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(SearchResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + s.mu.Lock() + s.scrollId = ret.ScrollId + s.mu.Unlock() + if ret.Hits == nil || len(ret.Hits.Hits) == 0 { + return nil, io.EOF + } + return ret, nil +} + +// buildNextURL builds the URL for the operation. +func (s *ScrollService) buildNextURL() (string, url.Values, error) { + path := "/_search/scroll" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + + return path, params, nil +} + +// body returns the request to fetch the next batch of results. +func (s *ScrollService) bodyNext() (interface{}, error) { + s.mu.RLock() + body := struct { + Scroll string `json:"scroll"` + ScrollId string `json:"scroll_id,omitempty"` + }{ + Scroll: s.keepAlive, + ScrollId: s.scrollId, + } + s.mu.RUnlock() + return body, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/scroll_test.go b/vendor/gopkg.in/olivere/elastic.v5/scroll_test.go new file mode 100644 index 000000000..24ebbf4f7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/scroll_test.go @@ -0,0 +1,328 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "io" + _ "net/http" + "testing" + + "golang.org/x/net/context" +) + +func TestScroll(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Should return all documents. Just don't call Do yet! + svc := client.Scroll(testIndexName).Size(1) + + pages := 0 + docs := 0 + + for { + res, err := svc.Do(context.TODO()) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected results != nil; got nil") + } + if res.Hits == nil { + t.Fatal("expected results.Hits != nil; got nil") + } + if want, have := int64(3), res.Hits.TotalHits; want != have { + t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have) + } + if want, have := 1, len(res.Hits.Hits); want != have { + t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", want, have) + } + + pages++ + + for _, hit := range res.Hits.Hits { + if hit.Index != testIndexName { + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + docs++ + } + + if len(res.ScrollId) == 0 { + t.Fatalf("expected scrollId in results; got %q", res.ScrollId) + } + } + + if want, have := 3, pages; want != have { + t.Fatalf("expected to retrieve %d pages; got %d", want, have) + } + if want, have := 3, docs; want != have { + t.Fatalf("expected to retrieve %d hits; got %d", want, have) + } + + err = svc.Clear(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = svc.Do(context.TODO()) + if err == nil { + t.Fatal(err) + } +} + +func TestScrollWithQueryAndSort(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Create a scroll service that returns tweets from user olivere + // and returns them sorted by "message", in reverse order. + // + // Just don't call Do yet! + svc := client.Scroll(testIndexName). + Query(NewTermQuery("user", "olivere")). + Sort("message", false). + Size(1) + + docs := 0 + pages := 0 + for { + res, err := svc.Do(context.TODO()) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected results != nil; got nil") + } + if res.Hits == nil { + t.Fatal("expected results.Hits != nil; got nil") + } + if want, have := int64(2), res.Hits.TotalHits; want != have { + t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have) + } + if want, have := 1, len(res.Hits.Hits); want != have { + t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", want, have) + } + + pages++ + + for _, hit := range res.Hits.Hits { + if hit.Index != testIndexName { + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + docs++ + } + } + + if want, have := 2, pages; want != have { + t.Fatalf("expected to retrieve %d pages; got %d", want, have) + } + if want, have := 2, docs; want != have { + t.Fatalf("expected to retrieve %d hits; got %d", want, have) + } +} + +func TestScrollWithBody(t *testing.T) { + // client := setupTestClientAndCreateIndexAndLog(t) + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Test with simple strings and a map + var tests = []struct { + Body interface{} + ExpectedTotalHits int64 + ExpectedDocs int + ExpectedPages int + }{ + { + Body: `{"query":{"match_all":{}}}`, + ExpectedTotalHits: 3, + ExpectedDocs: 3, + ExpectedPages: 3, + }, + { + Body: `{"query":{"term":{"user":"olivere"}},"sort":["_doc"]}`, + ExpectedTotalHits: 2, + ExpectedDocs: 2, + ExpectedPages: 2, + }, + { + Body: `{"query":{"term":{"user":"olivere"}},"sort":[{"retweets":"desc"}]}`, + ExpectedTotalHits: 2, + ExpectedDocs: 2, + ExpectedPages: 2, + }, + { + Body: map[string]interface{}{ + "query": map[string]interface{}{ + "term": map[string]interface{}{ + "user": "olivere", + }, + }, + "sort": []interface{}{"_doc"}, + }, + ExpectedTotalHits: 2, + ExpectedDocs: 2, + ExpectedPages: 2, + }, + } + + for i, tt := range tests { + // Should return all documents. Just don't call Do yet! + svc := client.Scroll(testIndexName).Size(1).Body(tt.Body) + + pages := 0 + docs := 0 + + for { + res, err := svc.Do(context.TODO()) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("#%d: expected results != nil; got nil", i) + } + if res.Hits == nil { + t.Fatalf("#%d: expected results.Hits != nil; got nil", i) + } + if want, have := tt.ExpectedTotalHits, res.Hits.TotalHits; want != have { + t.Fatalf("#%d: expected results.Hits.TotalHits = %d; got %d", i, want, have) + } + if want, have := 1, len(res.Hits.Hits); want != have { + t.Fatalf("#%d: expected len(results.Hits.Hits) = %d; got %d", i, want, have) + } + + pages++ + + for _, hit := range res.Hits.Hits { + if hit.Index != testIndexName { + t.Fatalf("#%d: expected SearchResult.Hits.Hit.Index = %q; got %q", i, testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + docs++ + } + + if len(res.ScrollId) == 0 { + t.Fatalf("#%d: expected scrollId in results; got %q", i, res.ScrollId) + } + } + + if want, have := tt.ExpectedPages, pages; want != have { + t.Fatalf("#%d: expected to retrieve %d pages; got %d", i, want, have) + } + if want, have := tt.ExpectedDocs, docs; want != have { + t.Fatalf("#%d: expected to retrieve %d hits; got %d", i, want, have) + } + + err = svc.Clear(context.TODO()) + if err != nil { + t.Fatalf("#%d: failed to clear scroll context: %v", i, err) + } + + _, err = svc.Do(context.TODO()) + if err == nil { + t.Fatalf("#%d: failed to clear scroll context: %v", i, err) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search.go b/vendor/gopkg.in/olivere/elastic.v5/search.go new file mode 100644 index 000000000..3017cd090 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search.go @@ -0,0 +1,488 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// Search for documents in Elasticsearch. +type SearchService struct { + client *Client + searchSource *SearchSource + source interface{} + pretty bool + searchType string + index []string + typ []string + routing string + preference string + requestCache *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewSearchService creates a new service for searching in Elasticsearch. +func NewSearchService(client *Client) *SearchService { + builder := &SearchService{ + client: client, + searchSource: NewSearchSource(), + } + return builder +} + +// SearchSource sets the search source builder to use with this service. +func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService { + s.searchSource = searchSource + if s.searchSource == nil { + s.searchSource = NewSearchSource() + } + return s +} + +// Source allows the user to set the request body manually without using +// any of the structs and interfaces in Elastic. +func (s *SearchService) Source(source interface{}) *SearchService { + s.source = source + return s +} + +// Index sets the names of the indices to use for search. +func (s *SearchService) Index(index ...string) *SearchService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// Types adds search restrictions for a list of types. +func (s *SearchService) Type(typ ...string) *SearchService { + if s.typ == nil { + s.typ = make([]string, 0) + } + s.typ = append(s.typ, typ...) + return s +} + +// Pretty enables the caller to indent the JSON output. +func (s *SearchService) Pretty(pretty bool) *SearchService { + s.pretty = pretty + return s +} + +// Timeout sets the timeout to use, e.g. "1s" or "1000ms". +func (s *SearchService) Timeout(timeout string) *SearchService { + s.searchSource = s.searchSource.Timeout(timeout) + return s +} + +// TimeoutInMillis sets the timeout in milliseconds. +func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService { + s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis) + return s +} + +// SearchType sets the search operation type. Valid values are: +// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", +// "dfs_query_and_fetch", "count", "scan". +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-type.html +// for details. +func (s *SearchService) SearchType(searchType string) *SearchService { + s.searchType = searchType + return s +} + +// Routing is a list of specific routing values to control the shards +// the search will be executed on. +func (s *SearchService) Routing(routings ...string) *SearchService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference sets the preference to execute the search. Defaults to +// randomize across shards ("random"). Can be set to "_local" to prefer +// local shards, "_primary" to execute on primary shards only, +// or a custom value which guarantees that the same order will be used +// across different requests. +func (s *SearchService) Preference(preference string) *SearchService { + s.preference = preference + return s +} + +// RequestCache indicates whether the cache should be used for this +// request or not, defaults to index level setting. +func (s *SearchService) RequestCache(requestCache bool) *SearchService { + s.requestCache = &requestCache + return s +} + +// Query sets the query to perform, e.g. MatchAllQuery. +func (s *SearchService) Query(query Query) *SearchService { + s.searchSource = s.searchSource.Query(query) + return s +} + +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. +func (s *SearchService) PostFilter(postFilter Query) *SearchService { + s.searchSource = s.searchSource.PostFilter(postFilter) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchService) FetchSource(fetchSource bool) *SearchService { + s.searchSource = s.searchSource.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService { + s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) + return s +} + +// Highlight adds highlighting to the search. +func (s *SearchService) Highlight(highlight *Highlight) *SearchService { + s.searchSource = s.searchSource.Highlight(highlight) + return s +} + +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. +func (s *SearchService) GlobalSuggestText(globalText string) *SearchService { + s.searchSource = s.searchSource.GlobalSuggestText(globalText) + return s +} + +// Suggester adds a suggester to the search. +func (s *SearchService) Suggester(suggester Suggester) *SearchService { + s.searchSource = s.searchSource.Suggester(suggester) + return s +} + +// Aggregation adds an aggreation to perform as part of the search. +func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService { + s.searchSource = s.searchSource.Aggregation(name, aggregation) + return s +} + +// MinScore sets the minimum score below which docs will be filtered out. +func (s *SearchService) MinScore(minScore float64) *SearchService { + s.searchSource = s.searchSource.MinScore(minScore) + return s +} + +// From index to start the search from. Defaults to 0. +func (s *SearchService) From(from int) *SearchService { + s.searchSource = s.searchSource.From(from) + return s +} + +// Size is the number of search hits to return. Defaults to 10. +func (s *SearchService) Size(size int) *SearchService { + s.searchSource = s.searchSource.Size(size) + return s +} + +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). +func (s *SearchService) Explain(explain bool) *SearchService { + s.searchSource = s.searchSource.Explain(explain) + return s +} + +// Version indicates whether each search hit should be returned with +// a version associated to it. +func (s *SearchService) Version(version bool) *SearchService { + s.searchSource = s.searchSource.Version(version) + return s +} + +// Sort adds a sort order. +func (s *SearchService) Sort(field string, ascending bool) *SearchService { + s.searchSource = s.searchSource.Sort(field, ascending) + return s +} + +// SortWithInfo adds a sort order. +func (s *SearchService) SortWithInfo(info SortInfo) *SearchService { + s.searchSource = s.searchSource.SortWithInfo(info) + return s +} + +// SortBy adds a sort order. +func (s *SearchService) SortBy(sorter ...Sorter) *SearchService { + s.searchSource = s.searchSource.SortBy(sorter...) + return s +} + +// NoStoredFields indicates that no stored fields should be loaded, resulting in only +// id and type to be returned per field. +func (s *SearchService) NoStoredFields() *SearchService { + s.searchSource = s.searchSource.NoStoredFields() + return s +} + +// StoredField adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. +func (s *SearchService) StoredField(fieldName string) *SearchService { + s.searchSource = s.searchSource.StoredField(fieldName) + return s +} + +// StoredFields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. +func (s *SearchService) StoredFields(fields ...string) *SearchService { + s.searchSource = s.searchSource.StoredFields(fields...) + return s +} + +// IgnoreUnavailable indicates whether the specified concrete indices +// should be ignored when unavailable (missing or closed). +func (s *SearchService) IgnoreUnavailable(ignoreUnavailable bool) *SearchService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string +// or when no indices have been specified). +func (s *SearchService) AllowNoIndices(allowNoIndices bool) *SearchService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *SearchService) ExpandWildcards(expandWildcards string) *SearchService { + s.expandWildcards = expandWildcards + return s +} + +// buildURL builds the URL for the operation. +func (s *SearchService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_search", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_search", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_search", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else { + path = "/_search" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *SearchService) Validate() error { + return nil +} + +// Do executes the search and returns a SearchResult. +func (s *SearchService) Do(ctx context.Context) (*SearchResult, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Perform request + var body interface{} + if s.source != nil { + body = s.source + } else { + src, err := s.searchSource.Source() + if err != nil { + return nil, err + } + body = src + } + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return search results + ret := new(SearchResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// SearchResult is the result of a search in Elasticsearch. +type SearchResult struct { + TookInMillis int64 `json:"took"` // search time in milliseconds + ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations + Hits *SearchHits `json:"hits"` // the actual search hits + Suggest SearchSuggest `json:"suggest"` // results from suggesters + Aggregations Aggregations `json:"aggregations"` // results from aggregations + TimedOut bool `json:"timed_out"` // true if the search timed out + //Error string `json:"error,omitempty"` // used in MultiSearch only + // TODO double-check that MultiGet now returns details error information + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet +} + +// TotalHits is a convenience function to return the number of hits for +// a search result. +func (r *SearchResult) TotalHits() int64 { + if r.Hits != nil { + return r.Hits.TotalHits + } + return 0 +} + +// Each is a utility function to iterate over all hits. It saves you from +// checking for nil values. Notice that Each will ignore errors in +// serializing JSON. +func (r *SearchResult) Each(typ reflect.Type) []interface{} { + if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 { + return nil + } + var slice []interface{} + for _, hit := range r.Hits.Hits { + v := reflect.New(typ).Elem() + if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil { + slice = append(slice, v.Interface()) + } + } + return slice +} + +// SearchHits specifies the list of search hits. +type SearchHits struct { + TotalHits int64 `json:"total"` // total number of hits found + MaxScore *float64 `json:"max_score"` // maximum score of all hits + Hits []*SearchHit `json:"hits"` // the actual hits returned +} + +// SearchHit is a single hit. +type SearchHit struct { + Score *float64 `json:"_score"` // computed score + Index string `json:"_index"` // index name + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // external or internal + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Sort []interface{} `json:"sort"` // sort information + Highlight SearchHitHighlight `json:"highlight"` // highlighter information + Source *json.RawMessage `json:"_source"` // stored document source + Fields map[string]interface{} `json:"fields"` // returned (stored) fields + Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed + MatchedQueries []string `json:"matched_queries"` // matched queries + InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0 + + // Shard + // HighlightFields + // SortValues + // MatchedFilters +} + +type SearchHitInnerHits struct { + Hits *SearchHits `json:"hits"` +} + +// SearchExplanation explains how the score for a hit was computed. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html. +type SearchExplanation struct { + Value float64 `json:"value"` // e.g. 1.0 + Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:" + Details []SearchExplanation `json:"details,omitempty"` // recursive details +} + +// Suggest + +// SearchSuggest is a map of suggestions. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggest map[string][]SearchSuggestion + +// SearchSuggestion is a single search suggestion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggestion struct { + Text string `json:"text"` + Offset int `json:"offset"` + Length int `json:"length"` + Options []SearchSuggestionOption `json:"options"` +} + +// SearchSuggestionOption is an option of a SearchSuggestion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggestionOption struct { + Text string `json:"text"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Score float64 `json:"_score"` + Source *json.RawMessage `json:"_source"` +} + +// Aggregations (see search_aggs.go) + +// Highlighting + +// SearchHitHighlight is the highlight information of a search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html +// for a general discussion of highlighting. +type SearchHitHighlight map[string][]string diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go new file mode 100644 index 000000000..73dc6a268 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go @@ -0,0 +1,1274 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" +) + +// Aggregations can be seen as a unit-of-work that build +// analytic information over a set of documents. It is +// (in many senses) the follow-up of facets in Elasticsearch. +// For more details about aggregations, visit: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html +type Aggregation interface { + // Source returns a JSON-serializable aggregation that is a fragment + // of the request sent to Elasticsearch. + Source() (interface{}, error) +} + +// Aggregations is a list of aggregations that are part of a search result. +type Aggregations map[string]*json.RawMessage + +// Min returns min aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html +func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Max returns max aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html +func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Sum returns sum aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html +func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Avg returns average aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html +func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ValueCount returns value-count aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html +func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Cardinality returns cardinality aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html +func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Stats returns stats aggregation results. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html +func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ExtendedStats returns extended stats aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html +func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationExtendedStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Percentiles returns percentiles results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html +func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPercentilesMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// PercentileRanks returns percentile ranks results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html +func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPercentilesMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// TopHits returns top-hits aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html +func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationTopHitsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Global returns global results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html +func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Filter returns filter results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html +func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Filters returns filters results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html +func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketFilters) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Missing returns missing results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html +func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Nested returns nested results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html +func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ReverseNested returns reverse-nested results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html +func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Children returns children results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html +func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Terms returns terms aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html +func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SignificantTerms returns significant terms aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html +func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketSignificantTerms) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Sampler returns sampler aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-sampler-aggregation.html +func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Range returns range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html +func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// KeyedRange returns keyed range aggregation results. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html. +func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyedRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// DateRange returns date range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html +func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// IPv4Range returns IPv4 range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-iprange-aggregation.html +func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Histogram returns histogram aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html +func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketHistogramItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// DateHistogram returns date histogram aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html +func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketHistogramItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoBounds returns geo-bounds aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html +func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationGeoBoundsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoHash returns geo-hash aggregation results. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html +func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoDistance returns geo distance aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geodistance-aggregation.html +func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// AvgBucket returns average bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-avg-bucket-aggregation.html +func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SumBucket returns sum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-sum-bucket-aggregation.html +func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MaxBucket returns maximum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-max-bucket-aggregation.html +func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineBucketMetricValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MinBucket returns minimum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-min-bucket-aggregation.html +func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineBucketMetricValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MovAvg returns moving average pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html +func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Derivative returns derivative pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html +func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineDerivative) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// CumulativeSum returns a cumulative sum pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html +func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// BucketScript returns bucket script pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html +func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SerialDiff returns serial differencing pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html +func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// -- Single value metric -- + +// AggregationValueMetric is a single-value metric, returned e.g. by a +// Min or Max aggregation. +type AggregationValueMetric struct { + Aggregations + + Value *float64 //`json:"value"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure. +func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Stats metric -- + +// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation. +type AggregationStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + Min *float64 //`json:"min,omitempty"` + Max *float64 //`json:"max,omitempty"` + Avg *float64 //`json:"avg,omitempty"` + Sum *float64 //`json:"sum,omitempty"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure. +func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Extended stats metric -- + +// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation. +type AggregationExtendedStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + Min *float64 //`json:"min,omitempty"` + Max *float64 //`json:"max,omitempty"` + Avg *float64 //`json:"avg,omitempty"` + Sum *float64 //`json:"sum,omitempty"` + SumOfSquares *float64 //`json:"sum_of_squares,omitempty"` + Variance *float64 //`json:"variance,omitempty"` + StdDeviation *float64 //`json:"std_deviation,omitempty"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure. +func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["sum_of_squares"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfSquares) + } + if v, ok := aggs["variance"]; ok && v != nil { + json.Unmarshal(*v, &a.Variance) + } + if v, ok := aggs["std_deviation"]; ok && v != nil { + json.Unmarshal(*v, &a.StdDeviation) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Percentiles metric -- + +// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation. +type AggregationPercentilesMetric struct { + Aggregations + + Values map[string]float64 // `json:"values"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure. +func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["values"]; ok && v != nil { + json.Unmarshal(*v, &a.Values) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Top-hits metric -- + +// AggregationTopHitsMetric is a metric returned by a TopHits aggregation. +type AggregationTopHitsMetric struct { + Aggregations + + Hits *SearchHits //`json:"hits"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure. +func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + a.Aggregations = aggs + a.Hits = new(SearchHits) + if v, ok := aggs["hits"]; ok && v != nil { + json.Unmarshal(*v, &a.Hits) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + return nil +} + +// -- Geo-bounds metric -- + +// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation. +type AggregationGeoBoundsMetric struct { + Aggregations + + Bounds struct { + TopLeft struct { + Latitude float64 `json:"lat"` + Longitude float64 `json:"lon"` + } `json:"top_left"` + BottomRight struct { + Latitude float64 `json:"lat"` + Longitude float64 `json:"lon"` + } `json:"bottom_right"` + } `json:"bounds"` + + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure. +func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["bounds"]; ok && v != nil { + json.Unmarshal(*v, &a.Bounds) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Single bucket -- + +// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global. +type AggregationSingleBucket struct { + Aggregations + + DocCount int64 // `json:"doc_count"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure. +func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket range items -- + +// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned +// with a range aggregation. +type AggregationBucketRangeItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets []*AggregationBucketRangeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. +func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned +// with a keyed range aggregation. +type AggregationBucketKeyedRangeItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. +func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure. +type AggregationBucketRangeItem struct { + Aggregations + + Key string //`json:"key"` + DocCount int64 //`json:"doc_count"` + From *float64 //`json:"from"` + FromAsString string //`json:"from_as_string"` + To *float64 //`json:"to"` + ToAsString string //`json:"to_as_string"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure. +func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["from"]; ok && v != nil { + json.Unmarshal(*v, &a.From) + } + if v, ok := aggs["from_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.FromAsString) + } + if v, ok := aggs["to"]; ok && v != nil { + json.Unmarshal(*v, &a.To) + } + if v, ok := aggs["to_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ToAsString) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket key items -- + +// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned +// with a terms aggregation. +type AggregationBucketKeyItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets []*AggregationBucketKeyItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure. +func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure. +type AggregationBucketKeyItem struct { + Aggregations + + Key interface{} //`json:"key"` + KeyAsString *string //`json:"key_as_string"` + KeyNumber json.Number + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure. +func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + if err := dec.Decode(&aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + json.Unmarshal(*v, &a.KeyNumber) + } + if v, ok := aggs["key_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.KeyAsString) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket types for significant terms -- + +// AggregationBucketSignificantTerms is a bucket aggregation returned +// with a significant terms aggregation. +type AggregationBucketSignificantTerms struct { + Aggregations + + DocCount int64 //`json:"doc_count"` + Buckets []*AggregationBucketSignificantTerm //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure. +func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure. +type AggregationBucketSignificantTerm struct { + Aggregations + + Key string //`json:"key"` + DocCount int64 //`json:"doc_count"` + BgCount int64 //`json:"bg_count"` + Score float64 //`json:"score"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure. +func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["bg_count"]; ok && v != nil { + json.Unmarshal(*v, &a.BgCount) + } + if v, ok := aggs["score"]; ok && v != nil { + json.Unmarshal(*v, &a.Score) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket filters -- + +// AggregationBucketFilters is a multi-bucket aggregation that is returned +// with a filters aggregation. +type AggregationBucketFilters struct { + Aggregations + + Buckets []*AggregationBucketKeyItem //`json:"buckets"` + NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure. +func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + json.Unmarshal(*v, &a.NamedBuckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket histogram items -- + +// AggregationBucketHistogramItems is a bucket aggregation that is returned +// with a date histogram aggregation. +type AggregationBucketHistogramItems struct { + Aggregations + + Buckets []*AggregationBucketHistogramItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure. +func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure. +type AggregationBucketHistogramItem struct { + Aggregations + + Key float64 //`json:"key"` + KeyAsString *string //`json:"key_as_string"` + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure. +func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["key_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.KeyAsString) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline simple value -- + +// AggregationPipelineSimpleValue is a simple value, returned e.g. by a +// MovAvg aggregation. +type AggregationPipelineSimpleValue struct { + Aggregations + + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineSimpleValue structure. +func (a *AggregationPipelineSimpleValue) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline simple value -- + +// AggregationPipelineBucketMetricValue is a value returned e.g. by a +// MaxBucket aggregation. +type AggregationPipelineBucketMetricValue struct { + Aggregations + + Keys []interface{} // `json:"keys"` + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineBucketMetricValue structure. +func (a *AggregationPipelineBucketMetricValue) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["keys"]; ok && v != nil { + json.Unmarshal(*v, &a.Keys) + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline derivative -- + +// AggregationPipelineDerivative is the value returned by a +// Derivative aggregation. +type AggregationPipelineDerivative struct { + Aggregations + + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + NormalizedValue *float64 // `json:"normalized_value"` + NormalizedValueAsString string // `json:"normalized_value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineDerivative structure. +func (a *AggregationPipelineDerivative) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["normalized_value"]; ok && v != nil { + json.Unmarshal(*v, &a.NormalizedValue) + } + if v, ok := aggs["normalized_value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.NormalizedValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go new file mode 100644 index 000000000..d3521388a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go @@ -0,0 +1,76 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ChildrenAggregation is a special single bucket aggregation that enables +// aggregating from buckets on parent document types to buckets on child documents. +// It is available from 1.4.0.Beta1 upwards. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html +type ChildrenAggregation struct { + typ string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewChildrenAggregation() *ChildrenAggregation { + return &ChildrenAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ChildrenAggregation) Type(typ string) *ChildrenAggregation { + a.typ = typ + return a +} + +func (a *ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) *ChildrenAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation { + a.meta = metaData + return a +} + +func (a *ChildrenAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "to-answers" : { + // "children": { + // "type" : "answer" + // } + // } + // } + // } + // This method returns only the { "type" : ... } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["children"] = opts + opts["type"] = a.typ + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children_test.go new file mode 100644 index 000000000..0486079a9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestChildrenAggregation(t *testing.T) { + agg := NewChildrenAggregation().Type("answer") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"children":{"type":"answer"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestChildrenAggregationWithSubAggregation(t *testing.T) { + subAgg := NewTermsAggregation().Field("owner.display_name").Size(10) + agg := NewChildrenAggregation().Type("answer") + agg = agg.SubAggregation("top-names", subAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"top-names":{"terms":{"field":"owner.display_name","size":10}}},"children":{"type":"answer"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go new file mode 100644 index 000000000..029f0cd8c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go @@ -0,0 +1,285 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DateHistogramAggregation is a multi-bucket aggregation similar to the +// histogram except it can only be applied on date values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html +type DateHistogramAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + interval string + order string + orderAsc bool + minDocCount *int64 + extendedBoundsMin interface{} + extendedBoundsMax interface{} + timeZone string + format string + offset string +} + +// NewDateHistogramAggregation creates a new DateHistogramAggregation. +func NewDateHistogramAggregation() *DateHistogramAggregation { + return &DateHistogramAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +// Field on which the aggregation is processed. +func (a *DateHistogramAggregation) Field(field string) *DateHistogramAggregation { + a.field = field + return a +} + +func (a *DateHistogramAggregation) Script(script *Script) *DateHistogramAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *DateHistogramAggregation) Missing(missing interface{}) *DateHistogramAggregation { + a.missing = missing + return a +} + +func (a *DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *DateHistogramAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DateHistogramAggregation) Meta(metaData map[string]interface{}) *DateHistogramAggregation { + a.meta = metaData + return a +} + +// Interval by which the aggregation gets processed. +// Allowed values are: "year", "quarter", "month", "week", "day", +// "hour", "minute". It also supports time settings like "1.5h" +// (up to "w" for weeks). +func (a *DateHistogramAggregation) Interval(interval string) *DateHistogramAggregation { + a.interval = interval + return a +} + +// Order specifies the sort order. Valid values for order are: +// "_key", "_count", a sub-aggregation name, or a sub-aggregation name +// with a metric. +func (a *DateHistogramAggregation) Order(order string, asc bool) *DateHistogramAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByCount(asc bool) *DateHistogramAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByCountAsc() *DateHistogramAggregation { + return a.OrderByCount(true) +} + +func (a *DateHistogramAggregation) OrderByCountDesc() *DateHistogramAggregation { + return a.OrderByCount(false) +} + +func (a *DateHistogramAggregation) OrderByKey(asc bool) *DateHistogramAggregation { + // "order" : { "_key" : "asc" } + a.order = "_key" + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByKeyAsc() *DateHistogramAggregation { + return a.OrderByKey(true) +} + +func (a *DateHistogramAggregation) OrderByKeyDesc() *DateHistogramAggregation { + return a.OrderByKey(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) *DateHistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *DateHistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +// MinDocCount sets the minimum document count per bucket. +// Buckets with less documents than this min value will not be returned. +func (a *DateHistogramAggregation) MinDocCount(minDocCount int64) *DateHistogramAggregation { + a.minDocCount = &minDocCount + return a +} + +// TimeZone sets the timezone in which to translate dates before computing buckets. +func (a *DateHistogramAggregation) TimeZone(timeZone string) *DateHistogramAggregation { + a.timeZone = timeZone + return a +} + +// Format sets the format to use for dates. +func (a *DateHistogramAggregation) Format(format string) *DateHistogramAggregation { + a.format = format + return a +} + +// Offset sets the offset of time intervals in the histogram, e.g. "+6h". +func (a *DateHistogramAggregation) Offset(offset string) *DateHistogramAggregation { + a.offset = offset + return a +} + +// ExtendedBounds accepts int, int64, string, or time.Time values. +// In case the lower value in the histogram would be greater than min or the +// upper value would be less than max, empty buckets will be generated. +func (a *DateHistogramAggregation) ExtendedBounds(min, max interface{}) *DateHistogramAggregation { + a.extendedBoundsMin = min + a.extendedBoundsMax = max + return a +} + +// ExtendedBoundsMin accepts int, int64, string, or time.Time values. +func (a *DateHistogramAggregation) ExtendedBoundsMin(min interface{}) *DateHistogramAggregation { + a.extendedBoundsMin = min + return a +} + +// ExtendedBoundsMax accepts int, int64, string, or time.Time values. +func (a *DateHistogramAggregation) ExtendedBoundsMax(max interface{}) *DateHistogramAggregation { + a.extendedBoundsMax = max + return a +} + +func (a *DateHistogramAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "articles_over_time" : { + // "date_histogram" : { + // "field" : "date", + // "interval" : "month" + // } + // } + // } + // } + // + // This method returns only the { "date_histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["date_histogram"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + opts["interval"] = a.interval + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if a.timeZone != "" { + opts["time_zone"] = a.timeZone + } + if a.offset != "" { + opts["offset"] = a.offset + } + if a.format != "" { + opts["format"] = a.format + } + if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { + bounds := make(map[string]interface{}) + if a.extendedBoundsMin != nil { + bounds["min"] = a.extendedBoundsMin + } + if a.extendedBoundsMax != nil { + bounds["max"] = a.extendedBoundsMax + } + opts["extended_bounds"] = bounds + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram_test.go new file mode 100644 index 000000000..ddf790834 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram_test.go @@ -0,0 +1,49 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDateHistogramAggregation(t *testing.T) { + agg := NewDateHistogramAggregation(). + Field("date"). + Interval("month"). + Format("YYYY-MM"). + TimeZone("UTC"). + Offset("+6h") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_histogram":{"field":"date","format":"YYYY-MM","interval":"month","offset":"+6h","time_zone":"UTC"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateHistogramAggregationWithMissing(t *testing.T) { + agg := NewDateHistogramAggregation().Field("date").Interval("year").Missing("1900") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_histogram":{"field":"date","interval":"year","missing":"1900"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go new file mode 100644 index 000000000..4f29b14dc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go @@ -0,0 +1,234 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "time" +) + +// DateRangeAggregation is a range aggregation that is dedicated for +// date values. The main difference between this aggregation and the +// normal range aggregation is that the from and to values can be expressed +// in Date Math expressions, and it is also possible to specify a +// date format by which the from and to response fields will be returned. +// Note that this aggregration includes the from value and excludes the to +// value for each range. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html +type DateRangeAggregation struct { + field string + script *Script + subAggregations map[string]Aggregation + meta map[string]interface{} + keyed *bool + unmapped *bool + format string + entries []DateRangeAggregationEntry +} + +type DateRangeAggregationEntry struct { + Key string + From interface{} + To interface{} +} + +func NewDateRangeAggregation() *DateRangeAggregation { + return &DateRangeAggregation{ + subAggregations: make(map[string]Aggregation), + entries: make([]DateRangeAggregationEntry, 0), + } +} + +func (a *DateRangeAggregation) Field(field string) *DateRangeAggregation { + a.field = field + return a +} + +func (a *DateRangeAggregation) Script(script *Script) *DateRangeAggregation { + a.script = script + return a +} + +func (a *DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *DateRangeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DateRangeAggregation) Meta(metaData map[string]interface{}) *DateRangeAggregation { + a.meta = metaData + return a +} + +func (a *DateRangeAggregation) Keyed(keyed bool) *DateRangeAggregation { + a.keyed = &keyed + return a +} + +func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation { + a.unmapped = &unmapped + return a +} + +func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation { + a.format = format + return a +} + +func (a *DateRangeAggregation) AddRange(from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedTo(from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedFrom(to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) Lt(to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) LtWithKey(key string, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) Between(from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) Gt(from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) GtWithKey(key string, from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "range" : { + // "date_range": { + // "field": "date", + // "format": "MM-yyy", + // "ranges": [ + // { "to": "now-10M/M" }, + // { "from": "now-10M/M" } + // ] + // } + // } + // } + // } + // } + // + // This method returns only the { "date_range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["date_range"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.keyed != nil { + opts["keyed"] = *a.keyed + } + if a.unmapped != nil { + opts["unmapped"] = *a.unmapped + } + if a.format != "" { + opts["format"] = a.format + } + + var ranges []interface{} + for _, ent := range a.entries { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case time.Time: + r["from"] = from.Format(time.RFC3339) + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case time.Time: + r["to"] = to.Format(time.RFC3339) + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range_test.go new file mode 100644 index 000000000..c9ceaec8e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range_test.go @@ -0,0 +1,130 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDateRangeAggregation(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at") + agg = agg.AddRange(nil, "2012-12-31") + agg = agg.AddRange("2013-01-01", "2013-12-31") + agg = agg.AddRange("2014-01-01", nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithUnbounded(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + AddUnboundedFrom("2012-12-31"). + AddRange("2013-01-01", "2013-12-31"). + AddUnboundedTo("2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithLtAndCo(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + Lt("2012-12-31"). + Between("2013-01-01", "2013-12-31"). + Gt("2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithKeyedFlag(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + Keyed(true). + Lt("2012-12-31"). + Between("2013-01-01", "2013-12-31"). + Gt("2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithKeys(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + Keyed(true). + LtWithKey("pre-2012", "2012-12-31"). + BetweenWithKey("2013", "2013-01-01", "2013-12-31"). + GtWithKey("post-2013", "2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"key":"pre-2012","to":"2012-12-31"},{"from":"2013-01-01","key":"2013","to":"2013-12-31"},{"from":"2014-01-01","key":"post-2013"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithSpecialNames(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + AddRange("now-10M/M", "now+10M/M") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"from":"now-10M/M","to":"now+10M/M"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go new file mode 100644 index 000000000..2e04dea5a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go @@ -0,0 +1,77 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FilterAggregation defines a single bucket of all the documents +// in the current document set context that match a specified filter. +// Often this will be used to narrow down the current aggregation context +// to a specific set of documents. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html +type FilterAggregation struct { + filter Query + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewFilterAggregation() *FilterAggregation { + return &FilterAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *FilterAggregation) SubAggregation(name string, subAggregation Aggregation) *FilterAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation { + a.meta = metaData + return a +} + +func (a *FilterAggregation) Filter(filter Query) *FilterAggregation { + a.filter = filter + return a +} + +func (a *FilterAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "in_stock_products" : { + // "filter" : { "range" : { "stock" : { "gt" : 0 } } } + // } + // } + // } + // This method returns only the { "filter" : {} } part. + + src, err := a.filter.Source() + if err != nil { + return nil, err + } + source := make(map[string]interface{}) + source["filter"] = src + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter_test.go new file mode 100644 index 000000000..6aa4fbb7c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter_test.go @@ -0,0 +1,66 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFilterAggregation(t *testing.T) { + filter := NewRangeQuery("stock").Gt(0) + agg := NewFilterAggregation().Filter(filter) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFilterAggregationWithSubAggregation(t *testing.T) { + avgPriceAgg := NewAvgAggregation().Field("price") + filter := NewRangeQuery("stock").Gt(0) + agg := NewFilterAggregation().Filter(filter). + SubAggregation("avg_price", avgPriceAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFilterAggregationWithMeta(t *testing.T) { + filter := NewRangeQuery("stock").Gt(0) + agg := NewFilterAggregation().Filter(filter).Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go new file mode 100644 index 000000000..2fcb17998 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go @@ -0,0 +1,138 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// FiltersAggregation defines a multi bucket aggregations where each bucket +// is associated with a filter. Each bucket will collect all documents that +// match its associated filter. +// +// Notice that the caller has to decide whether to add filters by name +// (using FilterWithName) or unnamed filters (using Filter or Filters). One cannot +// use both named and unnamed filters. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html +type FiltersAggregation struct { + unnamedFilters []Query + namedFilters map[string]Query + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +// NewFiltersAggregation initializes a new FiltersAggregation. +func NewFiltersAggregation() *FiltersAggregation { + return &FiltersAggregation{ + unnamedFilters: make([]Query, 0), + namedFilters: make(map[string]Query), + subAggregations: make(map[string]Aggregation), + } +} + +// Filter adds an unnamed filter. Notice that you can +// either use named or unnamed filters, but not both. +func (a *FiltersAggregation) Filter(filter Query) *FiltersAggregation { + a.unnamedFilters = append(a.unnamedFilters, filter) + return a +} + +// Filters adds one or more unnamed filters. Notice that you can +// either use named or unnamed filters, but not both. +func (a *FiltersAggregation) Filters(filters ...Query) *FiltersAggregation { + if len(filters) > 0 { + a.unnamedFilters = append(a.unnamedFilters, filters...) + } + return a +} + +// FilterWithName adds a filter with a specific name. Notice that you can +// either use named or unnamed filters, but not both. +func (a *FiltersAggregation) FilterWithName(name string, filter Query) *FiltersAggregation { + a.namedFilters[name] = filter + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) *FiltersAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *FiltersAggregation) Meta(metaData map[string]interface{}) *FiltersAggregation { + a.meta = metaData + return a +} + +// Source returns the a JSON-serializable interface. +// If the aggregation is invalid, an error is returned. This may e.g. happen +// if you mixed named and unnamed filters. +func (a *FiltersAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "messages" : { + // "filters" : { + // "filters" : { + // "errors" : { "term" : { "body" : "error" }}, + // "warnings" : { "term" : { "body" : "warning" }} + // } + // } + // } + // } + // } + // This method returns only the (outer) { "filters" : {} } part. + + source := make(map[string]interface{}) + filters := make(map[string]interface{}) + source["filters"] = filters + + if len(a.unnamedFilters) > 0 && len(a.namedFilters) > 0 { + return nil, errors.New("elastic: use either named or unnamed filters with FiltersAggregation but not both") + } + + if len(a.unnamedFilters) > 0 { + arr := make([]interface{}, len(a.unnamedFilters)) + for i, filter := range a.unnamedFilters { + src, err := filter.Source() + if err != nil { + return nil, err + } + arr[i] = src + } + filters["filters"] = arr + } else { + dict := make(map[string]interface{}) + for key, filter := range a.namedFilters { + src, err := filter.Source() + if err != nil { + return nil, err + } + dict[key] = src + } + filters["filters"] = dict + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters_test.go new file mode 100644 index 000000000..95cc8d7c3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters_test.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFiltersAggregationFilters(t *testing.T) { + f1 := NewRangeQuery("stock").Gt(0) + f2 := NewTermQuery("symbol", "GOOG") + agg := NewFiltersAggregation().Filters(f1, f2) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFiltersAggregationFilterWithName(t *testing.T) { + f1 := NewRangeQuery("stock").Gt(0) + f2 := NewTermQuery("symbol", "GOOG") + agg := NewFiltersAggregation(). + FilterWithName("f1", f1). + FilterWithName("f2", f2) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filters":{"filters":{"f1":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},"f2":{"term":{"symbol":"GOOG"}}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFiltersAggregationWithKeyedAndNonKeyedFilters(t *testing.T) { + agg := NewFiltersAggregation(). + Filter(NewTermQuery("symbol", "MSFT")). // unnamed + FilterWithName("one", NewTermQuery("symbol", "GOOG")) // named filter + _, err := agg.Source() + if err == nil { + t.Fatal("expected error, got nil") + } +} + +func TestFiltersAggregationWithSubAggregation(t *testing.T) { + avgPriceAgg := NewAvgAggregation().Field("price") + f1 := NewRangeQuery("stock").Gt(0) + f2 := NewTermQuery("symbol", "GOOG") + agg := NewFiltersAggregation().Filters(f1, f2).SubAggregation("avg_price", avgPriceAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFiltersAggregationWithMetaData(t *testing.T) { + f1 := NewRangeQuery("stock").Gt(0) + f2 := NewTermQuery("symbol", "GOOG") + agg := NewFiltersAggregation().Filters(f1, f2).Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go new file mode 100644 index 000000000..00afbab09 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go @@ -0,0 +1,194 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields +// and conceptually works very similar to the range aggregation. +// The user can define a point of origin and a set of distance range buckets. +// The aggregation evaluate the distance of each document value from +// the origin point and determines the buckets it belongs to based on +// the ranges (a document belongs to a bucket if the distance between the +// document and the origin falls within the distance range of the bucket). +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-geodistance-aggregation.html +type GeoDistanceAggregation struct { + field string + unit string + distanceType string + point string + ranges []geoDistAggRange + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +type geoDistAggRange struct { + Key string + From interface{} + To interface{} +} + +func NewGeoDistanceAggregation() *GeoDistanceAggregation { + return &GeoDistanceAggregation{ + subAggregations: make(map[string]Aggregation), + ranges: make([]geoDistAggRange, 0), + } +} + +func (a *GeoDistanceAggregation) Field(field string) *GeoDistanceAggregation { + a.field = field + return a +} + +func (a *GeoDistanceAggregation) Unit(unit string) *GeoDistanceAggregation { + a.unit = unit + return a +} + +func (a *GeoDistanceAggregation) DistanceType(distanceType string) *GeoDistanceAggregation { + a.distanceType = distanceType + return a +} + +func (a *GeoDistanceAggregation) Point(latLon string) *GeoDistanceAggregation { + a.point = latLon + return a +} + +func (a *GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoDistanceAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GeoDistanceAggregation) Meta(metaData map[string]interface{}) *GeoDistanceAggregation { + a.meta = metaData + return a +} +func (a *GeoDistanceAggregation) AddRange(from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedTo(from float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedFrom(to float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to}) + return a +} + +func (a *GeoDistanceAggregation) Between(from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "rings_around_amsterdam" : { + // "geo_distance" : { + // "field" : "location", + // "origin" : "52.3760, 4.894", + // "ranges" : [ + // { "to" : 100 }, + // { "from" : 100, "to" : 300 }, + // { "from" : 300 } + // ] + // } + // } + // } + // } + // + // This method returns only the { "range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geo_distance"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.unit != "" { + opts["unit"] = a.unit + } + if a.distanceType != "" { + opts["distance_type"] = a.distanceType + } + if a.point != "" { + opts["origin"] = a.point + } + + var ranges []interface{} + for _, ent := range a.ranges { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case *int, *int16, *int32, *int64, *float32, *float64: + r["from"] = from + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case *int, *int16, *int32, *int64, *float32, *float64: + r["to"] = to + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance_test.go new file mode 100644 index 000000000..0466dca21 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance_test.go @@ -0,0 +1,71 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoDistanceAggregation(t *testing.T) { + agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") + agg = agg.AddRange(nil, 100) + agg = agg.AddRange(100, 300) + agg = agg.AddRange(300, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceAggregationWithUnbounded(t *testing.T) { + agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") + agg = agg.AddUnboundedFrom(100) + agg = agg.AddRange(100, 300) + agg = agg.AddUnboundedTo(300) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceAggregationWithMetaData(t *testing.T) { + agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") + agg = agg.AddRange(nil, 100) + agg = agg.AddRange(100, 300) + agg = agg.AddRange(300, nil) + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid.go new file mode 100644 index 000000000..07f61b331 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid.go @@ -0,0 +1,102 @@ +package elastic + +type GeoHashGridAggregation struct { + field string + precision int + size int + shardSize int + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGeoHashGridAggregation() *GeoHashGridAggregation { + return &GeoHashGridAggregation{ + subAggregations: make(map[string]Aggregation), + precision: -1, + size: -1, + shardSize: -1, + } +} + +func (a *GeoHashGridAggregation) Field(field string) *GeoHashGridAggregation { + a.field = field + return a +} + +func (a *GeoHashGridAggregation) Precision(precision int) *GeoHashGridAggregation { + a.precision = precision + return a +} + +func (a *GeoHashGridAggregation) Size(size int) *GeoHashGridAggregation { + a.size = size + return a +} + +func (a *GeoHashGridAggregation) ShardSize(shardSize int) *GeoHashGridAggregation { + a.shardSize = shardSize + return a +} + +func (a *GeoHashGridAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoHashGridAggregation { + a.subAggregations[name] = subAggregation + return a +} + +func (a *GeoHashGridAggregation) Meta(metaData map[string]interface{}) *GeoHashGridAggregation { + a.meta = metaData + return a +} + +func (a *GeoHashGridAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs": { + // "new_york": { + // "geohash_grid": { + // "field": "location", + // "precision": 5 + // } + // } + // } + // } + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geohash_grid"] = opts + + if a.field != "" { + opts["field"] = a.field + } + + if a.precision != -1 { + opts["precision"] = a.precision + } + + if a.size != -1 { + opts["size"] = a.size + } + + if a.shardSize != -1 { + opts["shard_size"] = a.shardSize + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid_test.go new file mode 100644 index 000000000..044e211eb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid_test.go @@ -0,0 +1,84 @@ +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoHashGridAggregation(t *testing.T) { + agg := NewGeoHashGridAggregation().Field("location").Precision(5) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("Marshalling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geohash_grid":{"field":"location","precision":5}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoHashGridAggregationWithMetaData(t *testing.T) { + agg := NewGeoHashGridAggregation().Field("location").Precision(5) + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("Marshalling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geohash_grid":{"field":"location","precision":5},"meta":{"name":"Oliver"}}` + + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoHashGridAggregationWithSize(t *testing.T) { + agg := NewGeoHashGridAggregation().Field("location").Precision(5).Size(5) + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("Marshalling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geohash_grid":{"field":"location","precision":5,"size":5},"meta":{"name":"Oliver"}}` + + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoHashGridAggregationWithShardSize(t *testing.T) { + agg := NewGeoHashGridAggregation().Field("location").Precision(5).ShardSize(5) + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("Marshalling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geohash_grid":{"field":"location","precision":5,"shard_size":5},"meta":{"name":"Oliver"}}` + + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go new file mode 100644 index 000000000..c96e3c82f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go @@ -0,0 +1,71 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GlobalAggregation defines a single bucket of all the documents within +// the search execution context. This context is defined by the indices +// and the document types you’re searching on, but is not influenced +// by the search query itself. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html +type GlobalAggregation struct { + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGlobalAggregation() *GlobalAggregation { + return &GlobalAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) *GlobalAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GlobalAggregation) Meta(metaData map[string]interface{}) *GlobalAggregation { + a.meta = metaData + return a +} + +func (a *GlobalAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "all_products" : { + // "global" : {}, + // "aggs" : { + // "avg_price" : { "avg" : { "field" : "price" } } + // } + // } + // } + // } + // This method returns only the { "global" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["global"] = opts + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global_test.go new file mode 100644 index 000000000..5f1e5e6cb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGlobalAggregation(t *testing.T) { + agg := NewGlobalAggregation() + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"global":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGlobalAggregationWithMetaData(t *testing.T) { + agg := NewGlobalAggregation().Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"global":{},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go new file mode 100644 index 000000000..ac42fe98e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go @@ -0,0 +1,253 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HistogramAggregation is a multi-bucket values source based aggregation +// that can be applied on numeric values extracted from the documents. +// It dynamically builds fixed size (a.k.a. interval) buckets over the +// values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html +type HistogramAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + interval int64 + order string + orderAsc bool + minDocCount *int64 + extendedBoundsMin *int64 + extendedBoundsMax *int64 + offset *int64 +} + +func NewHistogramAggregation() *HistogramAggregation { + return &HistogramAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *HistogramAggregation) Field(field string) *HistogramAggregation { + a.field = field + return a +} + +func (a *HistogramAggregation) Script(script *Script) *HistogramAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *HistogramAggregation) Missing(missing interface{}) *HistogramAggregation { + a.missing = missing + return a +} + +func (a *HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *HistogramAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *HistogramAggregation) Meta(metaData map[string]interface{}) *HistogramAggregation { + a.meta = metaData + return a +} + +func (a *HistogramAggregation) Interval(interval int64) *HistogramAggregation { + a.interval = interval + return a +} + +// Order specifies the sort order. Valid values for order are: +// "_key", "_count", a sub-aggregation name, or a sub-aggregation name +// with a metric. +func (a *HistogramAggregation) Order(order string, asc bool) *HistogramAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByCount(asc bool) *HistogramAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByCountAsc() *HistogramAggregation { + return a.OrderByCount(true) +} + +func (a *HistogramAggregation) OrderByCountDesc() *HistogramAggregation { + return a.OrderByCount(false) +} + +func (a *HistogramAggregation) OrderByKey(asc bool) *HistogramAggregation { + // "order" : { "_key" : "asc" } + a.order = "_key" + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByKeyAsc() *HistogramAggregation { + return a.OrderByKey(true) +} + +func (a *HistogramAggregation) OrderByKeyDesc() *HistogramAggregation { + return a.OrderByKey(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *HistogramAggregation) OrderByAggregation(aggName string, asc bool) *HistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *HistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) MinDocCount(minDocCount int64) *HistogramAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *HistogramAggregation) ExtendedBounds(min, max int64) *HistogramAggregation { + a.extendedBoundsMin = &min + a.extendedBoundsMax = &max + return a +} + +func (a *HistogramAggregation) ExtendedBoundsMin(min int64) *HistogramAggregation { + a.extendedBoundsMin = &min + return a +} + +func (a *HistogramAggregation) ExtendedBoundsMax(max int64) *HistogramAggregation { + a.extendedBoundsMax = &max + return a +} + +func (a *HistogramAggregation) Offset(offset int64) *HistogramAggregation { + a.offset = &offset + return a +} + +func (a *HistogramAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "prices" : { + // "histogram" : { + // "field" : "price", + // "interval" : 50 + // } + // } + // } + // } + // + // This method returns only the { "histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["histogram"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + opts["interval"] = a.interval + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if a.offset != nil { + opts["offset"] = *a.offset + } + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { + bounds := make(map[string]interface{}) + if a.extendedBoundsMin != nil { + bounds["min"] = a.extendedBoundsMin + } + if a.extendedBoundsMax != nil { + bounds["max"] = a.extendedBoundsMax + } + opts["extended_bounds"] = bounds + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram_test.go new file mode 100644 index 000000000..aeb7eec54 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestHistogramAggregation(t *testing.T) { + agg := NewHistogramAggregation().Field("price").Interval(50) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histogram":{"field":"price","interval":50}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHistogramAggregationWithMetaData(t *testing.T) { + agg := NewHistogramAggregation().Field("price").Offset(10).Interval(50).Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histogram":{"field":"price","interval":50,"offset":10},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHistogramAggregationWithMissing(t *testing.T) { + agg := NewHistogramAggregation().Field("price").Interval(50).Missing("n/a") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histogram":{"field":"price","interval":50,"missing":"n/a"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go new file mode 100644 index 000000000..82f6de707 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go @@ -0,0 +1,81 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MissingAggregation is a field data based single bucket aggregation, +// that creates a bucket of all documents in the current document set context +// that are missing a field value (effectively, missing a field or having +// the configured NULL value set). This aggregator will often be used in +// conjunction with other field data bucket aggregators (such as ranges) +// to return information for all the documents that could not be placed +// in any of the other buckets due to missing field data values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html +type MissingAggregation struct { + field string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMissingAggregation() *MissingAggregation { + return &MissingAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MissingAggregation) Field(field string) *MissingAggregation { + a.field = field + return a +} + +func (a *MissingAggregation) SubAggregation(name string, subAggregation Aggregation) *MissingAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MissingAggregation) Meta(metaData map[string]interface{}) *MissingAggregation { + a.meta = metaData + return a +} + +func (a *MissingAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "products_without_a_price" : { + // "missing" : { "field" : "price" } + // } + // } + // } + // This method returns only the { "missing" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["missing"] = opts + + if a.field != "" { + opts["field"] = a.field + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing_test.go new file mode 100644 index 000000000..179c3084f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMissingAggregation(t *testing.T) { + agg := NewMissingAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"missing":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMissingAggregationWithMetaData(t *testing.T) { + agg := NewMissingAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"missing":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go new file mode 100644 index 000000000..3da1b99bf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go @@ -0,0 +1,82 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NestedAggregation is a special single bucket aggregation that enables +// aggregating nested documents. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-nested-aggregation.html +type NestedAggregation struct { + path string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewNestedAggregation() *NestedAggregation { + return &NestedAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *NestedAggregation) SubAggregation(name string, subAggregation Aggregation) *NestedAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *NestedAggregation) Meta(metaData map[string]interface{}) *NestedAggregation { + a.meta = metaData + return a +} + +func (a *NestedAggregation) Path(path string) *NestedAggregation { + a.path = path + return a +} + +func (a *NestedAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "match" : { "name" : "led tv" } + // } + // "aggs" : { + // "resellers" : { + // "nested" : { + // "path" : "resellers" + // }, + // "aggs" : { + // "min_price" : { "min" : { "field" : "resellers.price" } } + // } + // } + // } + // } + // This method returns only the { "nested" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["nested"] = opts + + opts["path"] = a.path + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested_test.go new file mode 100644 index 000000000..219943e3d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested_test.go @@ -0,0 +1,62 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestNestedAggregation(t *testing.T) { + agg := NewNestedAggregation().Path("resellers") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"nested":{"path":"resellers"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNestedAggregationWithSubAggregation(t *testing.T) { + minPriceAgg := NewMinAggregation().Field("resellers.price") + agg := NewNestedAggregation().Path("resellers").SubAggregation("min_price", minPriceAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"min_price":{"min":{"field":"resellers.price"}}},"nested":{"path":"resellers"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNestedAggregationWithMetaData(t *testing.T) { + agg := NewNestedAggregation().Path("resellers").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"nested":{"path":"resellers"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go new file mode 100644 index 000000000..2a8fd138a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go @@ -0,0 +1,232 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "time" +) + +// RangeAggregation is a multi-bucket value source based aggregation that +// enables the user to define a set of ranges - each representing a bucket. +// During the aggregation process, the values extracted from each document +// will be checked against each bucket range and "bucket" the +// relevant/matching document. Note that this aggregration includes the +// from value and excludes the to value for each range. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html +type RangeAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + keyed *bool + unmapped *bool + entries []rangeAggregationEntry +} + +type rangeAggregationEntry struct { + Key string + From interface{} + To interface{} +} + +func NewRangeAggregation() *RangeAggregation { + return &RangeAggregation{ + subAggregations: make(map[string]Aggregation), + entries: make([]rangeAggregationEntry, 0), + } +} + +func (a *RangeAggregation) Field(field string) *RangeAggregation { + a.field = field + return a +} + +func (a *RangeAggregation) Script(script *Script) *RangeAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *RangeAggregation) Missing(missing interface{}) *RangeAggregation { + a.missing = missing + return a +} + +func (a *RangeAggregation) SubAggregation(name string, subAggregation Aggregation) *RangeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *RangeAggregation) Meta(metaData map[string]interface{}) *RangeAggregation { + a.meta = metaData + return a +} + +func (a *RangeAggregation) Keyed(keyed bool) *RangeAggregation { + a.keyed = &keyed + return a +} + +func (a *RangeAggregation) Unmapped(unmapped bool) *RangeAggregation { + a.unmapped = &unmapped + return a +} + +func (a *RangeAggregation) AddRange(from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *RangeAggregation) AddRangeWithKey(key string, from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *RangeAggregation) AddUnboundedTo(from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *RangeAggregation) AddUnboundedFrom(to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *RangeAggregation) Lt(to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *RangeAggregation) LtWithKey(key string, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *RangeAggregation) Between(from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *RangeAggregation) BetweenWithKey(key string, from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *RangeAggregation) Gt(from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *RangeAggregation) GtWithKey(key string, from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *RangeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "price_ranges" : { + // "range" : { + // "field" : "price", + // "ranges" : [ + // { "to" : 50 }, + // { "from" : 50, "to" : 100 }, + // { "from" : 100 } + // ] + // } + // } + // } + // } + // + // This method returns only the { "range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["range"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + if a.keyed != nil { + opts["keyed"] = *a.keyed + } + if a.unmapped != nil { + opts["unmapped"] = *a.unmapped + } + + var ranges []interface{} + for _, ent := range a.entries { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case time.Time: + r["from"] = from.Format(time.RFC3339) + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case time.Time: + r["to"] = to.Format(time.RFC3339) + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range_test.go new file mode 100644 index 000000000..361840ae1 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range_test.go @@ -0,0 +1,156 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestRangeAggregation(t *testing.T) { + agg := NewRangeAggregation().Field("price") + agg = agg.AddRange(nil, 50) + agg = agg.AddRange(50, 100) + agg = agg.AddRange(100, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithUnbounded(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + AddUnboundedFrom(50). + AddRange(20, 70). + AddRange(70, 120). + AddUnboundedTo(150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithLtAndCo(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + Lt(50). + Between(20, 70). + Between(70, 120). + Gt(150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithKeyedFlag(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + Keyed(true). + Lt(50). + Between(20, 70). + Between(70, 120). + Gt(150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithKeys(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + Keyed(true). + LtWithKey("cheap", 50). + BetweenWithKey("affordable", 20, 70). + BetweenWithKey("average", 70, 120). + GtWithKey("expensive", 150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"key":"cheap","to":50},{"from":20,"key":"affordable","to":70},{"from":70,"key":"average","to":120},{"from":150,"key":"expensive"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithMetaData(t *testing.T) { + agg := NewRangeAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + agg = agg.AddRange(nil, 50) + agg = agg.AddRange(50, 100) + agg = agg.AddRange(100, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithMissing(t *testing.T) { + agg := NewRangeAggregation().Field("price").Missing(0) + agg = agg.AddRange(nil, 50) + agg = agg.AddRange(50, 100) + agg = agg.AddRange(100, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"price","missing":0,"ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go new file mode 100644 index 000000000..89c7531d2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go @@ -0,0 +1,86 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ReverseNestedAggregation defines a special single bucket aggregation +// that enables aggregating on parent docs from nested documents. +// Effectively this aggregation can break out of the nested block +// structure and link to other nested structures or the root document, +// which allows nesting other aggregations that aren’t part of +// the nested object in a nested aggregation. +// +// See: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html +type ReverseNestedAggregation struct { + path string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +// NewReverseNestedAggregation initializes a new ReverseNestedAggregation +// bucket aggregation. +func NewReverseNestedAggregation() *ReverseNestedAggregation { + return &ReverseNestedAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +// Path set the path to use for this nested aggregation. The path must match +// the path to a nested object in the mappings. If it is not specified +// then this aggregation will go back to the root document. +func (a *ReverseNestedAggregation) Path(path string) *ReverseNestedAggregation { + a.path = path + return a +} + +func (a *ReverseNestedAggregation) SubAggregation(name string, subAggregation Aggregation) *ReverseNestedAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ReverseNestedAggregation) Meta(metaData map[string]interface{}) *ReverseNestedAggregation { + a.meta = metaData + return a +} + +func (a *ReverseNestedAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "reverse_nested" : { + // "path": "..." + // } + // } + // } + // This method returns only the { "reverse_nested" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["reverse_nested"] = opts + + if a.path != "" { + opts["path"] = a.path + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested_test.go new file mode 100644 index 000000000..dc50bbc28 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested_test.go @@ -0,0 +1,83 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestReverseNestedAggregation(t *testing.T) { + agg := NewReverseNestedAggregation() + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"reverse_nested":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestReverseNestedAggregationWithPath(t *testing.T) { + agg := NewReverseNestedAggregation().Path("comments") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"reverse_nested":{"path":"comments"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestReverseNestedAggregationWithSubAggregation(t *testing.T) { + avgPriceAgg := NewAvgAggregation().Field("price") + agg := NewReverseNestedAggregation(). + Path("a_path"). + SubAggregation("avg_price", avgPriceAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"reverse_nested":{"path":"a_path"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestReverseNestedAggregationWithMeta(t *testing.T) { + agg := NewReverseNestedAggregation(). + Path("a_path"). + Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"reverse_nested":{"path":"a_path"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go new file mode 100644 index 000000000..8fb61b771 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go @@ -0,0 +1,110 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SamplerAggregation is a filtering aggregation used to limit any +// sub aggregations' processing to a sample of the top-scoring documents. +// Optionally, diversity settings can be used to limit the number of matches +// that share a common value such as an "author". +// See: https://www.elastic.co/guide/en/elasticsearch/reference/2.x/search-aggregations-bucket-sampler-aggregation.html +type SamplerAggregation struct { + subAggregations map[string]Aggregation + meta map[string]interface{} + + shardSize int + maxDocsPerValue int + executionHint string +} + +func NewSamplerAggregation() *SamplerAggregation { + return &SamplerAggregation{ + shardSize: -1, + maxDocsPerValue: -1, + subAggregations: make(map[string]Aggregation), + } +} + +func (a *SamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *SamplerAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SamplerAggregation) Meta(metaData map[string]interface{}) *SamplerAggregation { + a.meta = metaData + return a +} + +// ShardSize sets the maximum number of docs returned from each shard. +func (a *SamplerAggregation) ShardSize(shardSize int) *SamplerAggregation { + a.shardSize = shardSize + return a +} + +func (a *SamplerAggregation) MaxDocsPerValue(maxDocsPerValue int) *SamplerAggregation { + a.maxDocsPerValue = maxDocsPerValue + return a +} + +func (a *SamplerAggregation) ExecutionHint(hint string) *SamplerAggregation { + a.executionHint = hint + return a +} + +func (a *SamplerAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "sample" : { + // "sampler" : { + // "shard_size" : 200 + // }, + // "aggs": { + // "keywords": { + // "significant_terms": { + // "field": "text" + // } + // } + // } + // } + // } + // } + // + // This method returns only the { "sampler" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["sampler"] = opts + + if a.shardSize >= 0 { + opts["shard_size"] = a.shardSize + } + if a.maxDocsPerValue >= 0 { + opts["max_docs_per_value"] = a.maxDocsPerValue + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler_test.go new file mode 100644 index 000000000..c4dc1c7cc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler_test.go @@ -0,0 +1,30 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSamplerAggregation(t *testing.T) { + keywordsAgg := NewSignificantTermsAggregation().Field("text") + agg := NewSamplerAggregation(). + ShardSize(200). + SubAggregation("keywords", keywordsAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"shard_size":200}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go new file mode 100644 index 000000000..c6b24f929 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go @@ -0,0 +1,389 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SignificantSignificantTermsAggregation is an aggregation that returns interesting +// or unusual occurrences of terms in a set. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html +type SignificantTermsAggregation struct { + field string + subAggregations map[string]Aggregation + meta map[string]interface{} + + minDocCount *int + shardMinDocCount *int + requiredSize *int + shardSize *int + filter Query + executionHint string + significanceHeuristic SignificanceHeuristic +} + +func NewSignificantTermsAggregation() *SignificantTermsAggregation { + return &SignificantTermsAggregation{ + subAggregations: make(map[string]Aggregation, 0), + } +} + +func (a *SignificantTermsAggregation) Field(field string) *SignificantTermsAggregation { + a.field = field + return a +} + +func (a *SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTermsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SignificantTermsAggregation) Meta(metaData map[string]interface{}) *SignificantTermsAggregation { + a.meta = metaData + return a +} + +func (a *SignificantTermsAggregation) MinDocCount(minDocCount int) *SignificantTermsAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) *SignificantTermsAggregation { + a.shardMinDocCount = &shardMinDocCount + return a +} + +func (a *SignificantTermsAggregation) RequiredSize(requiredSize int) *SignificantTermsAggregation { + a.requiredSize = &requiredSize + return a +} + +func (a *SignificantTermsAggregation) ShardSize(shardSize int) *SignificantTermsAggregation { + a.shardSize = &shardSize + return a +} + +func (a *SignificantTermsAggregation) BackgroundFilter(filter Query) *SignificantTermsAggregation { + a.filter = filter + return a +} + +func (a *SignificantTermsAggregation) ExecutionHint(hint string) *SignificantTermsAggregation { + a.executionHint = hint + return a +} + +func (a *SignificantTermsAggregation) SignificanceHeuristic(heuristic SignificanceHeuristic) *SignificantTermsAggregation { + a.significanceHeuristic = heuristic + return a +} + +func (a *SignificantTermsAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "terms" : {"force" : [ "British Transport Police" ]} + // }, + // "aggregations" : { + // "significantCrimeTypes" : { + // "significant_terms" : { "field" : "crime_type" } + // } + // } + // } + // + // This method returns only the + // { "significant_terms" : { "field" : "crime_type" } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["significant_terms"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.requiredSize != nil { + opts["size"] = *a.requiredSize // not a typo! + } + if a.shardSize != nil { + opts["shard_size"] = *a.shardSize + } + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.shardMinDocCount != nil { + opts["shard_min_doc_count"] = *a.shardMinDocCount + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + if a.filter != nil { + src, err := a.filter.Source() + if err != nil { + return nil, err + } + opts["background_filter"] = src + } + if a.significanceHeuristic != nil { + name := a.significanceHeuristic.Name() + src, err := a.significanceHeuristic.Source() + if err != nil { + return nil, err + } + opts[name] = src + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} + +// -- Significance heuristics -- + +type SignificanceHeuristic interface { + Name() string + Source() (interface{}, error) +} + +// -- Chi Square -- + +// ChiSquareSignificanceHeuristic implements Chi square as described +// in "Information Retrieval", Manning et al., Chapter 13.5.2. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_chi_square +// for details. +type ChiSquareSignificanceHeuristic struct { + backgroundIsSuperset *bool + includeNegatives *bool +} + +// NewChiSquareSignificanceHeuristic initializes a new ChiSquareSignificanceHeuristic. +func NewChiSquareSignificanceHeuristic() *ChiSquareSignificanceHeuristic { + return &ChiSquareSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *ChiSquareSignificanceHeuristic) Name() string { + return "chi_square" +} + +// BackgroundIsSuperset indicates whether you defined a custom background +// filter that represents a difference set of documents that you want to +// compare to. +func (sh *ChiSquareSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *ChiSquareSignificanceHeuristic { + sh.backgroundIsSuperset = &backgroundIsSuperset + return sh +} + +// IncludeNegatives indicates whether to filter out the terms that appear +// much less in the subset than in the background without the subset. +func (sh *ChiSquareSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *ChiSquareSignificanceHeuristic { + sh.includeNegatives = &includeNegatives + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *ChiSquareSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.backgroundIsSuperset != nil { + source["background_is_superset"] = *sh.backgroundIsSuperset + } + if sh.includeNegatives != nil { + source["include_negatives"] = *sh.includeNegatives + } + return source, nil +} + +// -- GND -- + +// GNDSignificanceHeuristic implements the "Google Normalized Distance" +// as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, +// 2007. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_google_normalized_distance +// for details. +type GNDSignificanceHeuristic struct { + backgroundIsSuperset *bool +} + +// NewGNDSignificanceHeuristic implements a new GNDSignificanceHeuristic. +func NewGNDSignificanceHeuristic() *GNDSignificanceHeuristic { + return &GNDSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *GNDSignificanceHeuristic) Name() string { + return "gnd" +} + +// BackgroundIsSuperset indicates whether you defined a custom background +// filter that represents a difference set of documents that you want to +// compare to. +func (sh *GNDSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *GNDSignificanceHeuristic { + sh.backgroundIsSuperset = &backgroundIsSuperset + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *GNDSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.backgroundIsSuperset != nil { + source["background_is_superset"] = *sh.backgroundIsSuperset + } + return source, nil +} + +// -- JLH Score -- + +// JLHScoreSignificanceHeuristic implements the JLH score as described in +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_jlh_score. +type JLHScoreSignificanceHeuristic struct{} + +// NewJLHScoreSignificanceHeuristic initializes a new JLHScoreSignificanceHeuristic. +func NewJLHScoreSignificanceHeuristic() *JLHScoreSignificanceHeuristic { + return &JLHScoreSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *JLHScoreSignificanceHeuristic) Name() string { + return "jlh" +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *JLHScoreSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + return source, nil +} + +// -- Mutual Information -- + +// MutualInformationSignificanceHeuristic implements Mutual information +// as described in "Information Retrieval", Manning et al., Chapter 13.5.1. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_mutual_information +// for details. +type MutualInformationSignificanceHeuristic struct { + backgroundIsSuperset *bool + includeNegatives *bool +} + +// NewMutualInformationSignificanceHeuristic initializes a new instance of +// MutualInformationSignificanceHeuristic. +func NewMutualInformationSignificanceHeuristic() *MutualInformationSignificanceHeuristic { + return &MutualInformationSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *MutualInformationSignificanceHeuristic) Name() string { + return "mutual_information" +} + +// BackgroundIsSuperset indicates whether you defined a custom background +// filter that represents a difference set of documents that you want to +// compare to. +func (sh *MutualInformationSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *MutualInformationSignificanceHeuristic { + sh.backgroundIsSuperset = &backgroundIsSuperset + return sh +} + +// IncludeNegatives indicates whether to filter out the terms that appear +// much less in the subset than in the background without the subset. +func (sh *MutualInformationSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *MutualInformationSignificanceHeuristic { + sh.includeNegatives = &includeNegatives + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *MutualInformationSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.backgroundIsSuperset != nil { + source["background_is_superset"] = *sh.backgroundIsSuperset + } + if sh.includeNegatives != nil { + source["include_negatives"] = *sh.includeNegatives + } + return source, nil +} + +// -- Percentage Score -- + +// PercentageScoreSignificanceHeuristic implements the algorithm described +// in https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_percentage. +type PercentageScoreSignificanceHeuristic struct{} + +// NewPercentageScoreSignificanceHeuristic initializes a new instance of +// PercentageScoreSignificanceHeuristic. +func NewPercentageScoreSignificanceHeuristic() *PercentageScoreSignificanceHeuristic { + return &PercentageScoreSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *PercentageScoreSignificanceHeuristic) Name() string { + return "percentage" +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *PercentageScoreSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + return source, nil +} + +// -- Script -- + +// ScriptSignificanceHeuristic implements a scripted significance heuristic. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_scripted +// for details. +type ScriptSignificanceHeuristic struct { + script *Script +} + +// NewScriptSignificanceHeuristic initializes a new instance of +// ScriptSignificanceHeuristic. +func NewScriptSignificanceHeuristic() *ScriptSignificanceHeuristic { + return &ScriptSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *ScriptSignificanceHeuristic) Name() string { + return "script_heuristic" +} + +// Script specifies the script to use to get custom scores. The following +// parameters are available in the script: `_subset_freq`, `_superset_freq`, +// `_subset_size`, and `_superset_size`. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html#_scripted +// for details. +func (sh *ScriptSignificanceHeuristic) Script(script *Script) *ScriptSignificanceHeuristic { + sh.script = script + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *ScriptSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.script != nil { + src, err := sh.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms_test.go new file mode 100644 index 000000000..2f87373d7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms_test.go @@ -0,0 +1,211 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSignificantTermsAggregation(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"field":"crime_type"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithArgs(t *testing.T) { + agg := NewSignificantTermsAggregation(). + Field("crime_type"). + ExecutionHint("map"). + ShardSize(5). + MinDocCount(10). + BackgroundFilter(NewTermQuery("city", "London")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"background_filter":{"term":{"city":"London"}},"execution_hint":"map","field":"crime_type","min_doc_count":10,"shard_size":5}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationSubAggregation(t *testing.T) { + crimeTypesAgg := NewSignificantTermsAggregation().Field("crime_type") + agg := NewTermsAggregation().Field("force") + agg = agg.SubAggregation("significantCrimeTypes", crimeTypesAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"significantCrimeTypes":{"significant_terms":{"field":"crime_type"}}},"terms":{"field":"force"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithMetaData(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"significant_terms":{"field":"crime_type"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithChiSquare(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + agg = agg.SignificanceHeuristic( + NewChiSquareSignificanceHeuristic(). + BackgroundIsSuperset(true). + IncludeNegatives(false), + ) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"chi_square":{"background_is_superset":true,"include_negatives":false},"field":"crime_type"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithGND(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + agg = agg.SignificanceHeuristic( + NewGNDSignificanceHeuristic(), + ) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"field":"crime_type","gnd":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithJLH(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + agg = agg.SignificanceHeuristic( + NewJLHScoreSignificanceHeuristic(), + ) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"field":"crime_type","jlh":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithMutualInformation(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + agg = agg.SignificanceHeuristic( + NewMutualInformationSignificanceHeuristic(). + BackgroundIsSuperset(false). + IncludeNegatives(true), + ) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"field":"crime_type","mutual_information":{"background_is_superset":false,"include_negatives":true}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithPercentageScore(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + agg = agg.SignificanceHeuristic( + NewPercentageScoreSignificanceHeuristic(), + ) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"field":"crime_type","percentage":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithScript(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + agg = agg.SignificanceHeuristic( + NewScriptSignificanceHeuristic(). + Script(NewScript("_subset_freq/(_superset_freq - _subset_freq + 1)")), + ) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"field":"crime_type","script_heuristic":{"script":"_subset_freq/(_superset_freq - _subset_freq + 1)"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms.go new file mode 100644 index 000000000..7c72d1ab0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms.go @@ -0,0 +1,341 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsAggregation is a multi-bucket value source based aggregation +// where buckets are dynamically built - one per unique value. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html +type TermsAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + size *int + shardSize *int + requiredSize *int + minDocCount *int + shardMinDocCount *int + valueType string + order string + orderAsc bool + includePattern string + includeFlags *int + excludePattern string + excludeFlags *int + executionHint string + collectionMode string + showTermDocCountError *bool + includeTerms []string + excludeTerms []string +} + +func NewTermsAggregation() *TermsAggregation { + return &TermsAggregation{ + subAggregations: make(map[string]Aggregation, 0), + includeTerms: make([]string, 0), + excludeTerms: make([]string, 0), + } +} + +func (a *TermsAggregation) Field(field string) *TermsAggregation { + a.field = field + return a +} + +func (a *TermsAggregation) Script(script *Script) *TermsAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *TermsAggregation) Missing(missing interface{}) *TermsAggregation { + a.missing = missing + return a +} + +func (a *TermsAggregation) SubAggregation(name string, subAggregation Aggregation) *TermsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *TermsAggregation) Meta(metaData map[string]interface{}) *TermsAggregation { + a.meta = metaData + return a +} + +func (a *TermsAggregation) Size(size int) *TermsAggregation { + a.size = &size + return a +} + +func (a *TermsAggregation) RequiredSize(requiredSize int) *TermsAggregation { + a.requiredSize = &requiredSize + return a +} + +func (a *TermsAggregation) ShardSize(shardSize int) *TermsAggregation { + a.shardSize = &shardSize + return a +} + +func (a *TermsAggregation) MinDocCount(minDocCount int) *TermsAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *TermsAggregation) ShardMinDocCount(shardMinDocCount int) *TermsAggregation { + a.shardMinDocCount = &shardMinDocCount + return a +} + +func (a *TermsAggregation) Include(regexp string) *TermsAggregation { + a.includePattern = regexp + return a +} + +func (a *TermsAggregation) IncludeWithFlags(regexp string, flags int) *TermsAggregation { + a.includePattern = regexp + a.includeFlags = &flags + return a +} + +func (a *TermsAggregation) Exclude(regexp string) *TermsAggregation { + a.excludePattern = regexp + return a +} + +func (a *TermsAggregation) ExcludeWithFlags(regexp string, flags int) *TermsAggregation { + a.excludePattern = regexp + a.excludeFlags = &flags + return a +} + +// ValueType can be string, long, or double. +func (a *TermsAggregation) ValueType(valueType string) *TermsAggregation { + a.valueType = valueType + return a +} + +func (a *TermsAggregation) Order(order string, asc bool) *TermsAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByCount(asc bool) *TermsAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByCountAsc() *TermsAggregation { + return a.OrderByCount(true) +} + +func (a *TermsAggregation) OrderByCountDesc() *TermsAggregation { + return a.OrderByCount(false) +} + +func (a *TermsAggregation) OrderByTerm(asc bool) *TermsAggregation { + // "order" : { "_term" : "asc" } + a.order = "_term" + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByTermAsc() *TermsAggregation { + return a.OrderByTerm(true) +} + +func (a *TermsAggregation) OrderByTermDesc() *TermsAggregation { + return a.OrderByTerm(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *TermsAggregation) OrderByAggregation(aggName string, asc bool) *TermsAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *TermsAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) ExecutionHint(hint string) *TermsAggregation { + a.executionHint = hint + return a +} + +// Collection mode can be depth_first or breadth_first as of 1.4.0. +func (a *TermsAggregation) CollectionMode(collectionMode string) *TermsAggregation { + a.collectionMode = collectionMode + return a +} + +func (a *TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) *TermsAggregation { + a.showTermDocCountError = &showTermDocCountError + return a +} + +func (a *TermsAggregation) IncludeTerms(terms ...string) *TermsAggregation { + a.includeTerms = append(a.includeTerms, terms...) + return a +} + +func (a *TermsAggregation) ExcludeTerms(terms ...string) *TermsAggregation { + a.excludeTerms = append(a.excludeTerms, terms...) + return a +} + +func (a *TermsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "genders" : { + // "terms" : { "field" : "gender" } + // } + // } + // } + // This method returns only the { "terms" : { "field" : "gender" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["terms"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + // TermsBuilder + if a.size != nil && *a.size >= 0 { + opts["size"] = *a.size + } + if a.shardSize != nil && *a.shardSize >= 0 { + opts["shard_size"] = *a.shardSize + } + if a.requiredSize != nil && *a.requiredSize >= 0 { + opts["required_size"] = *a.requiredSize + } + if a.minDocCount != nil && *a.minDocCount >= 0 { + opts["min_doc_count"] = *a.minDocCount + } + if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 { + opts["shard_min_doc_count"] = *a.shardMinDocCount + } + if a.showTermDocCountError != nil { + opts["show_term_doc_count_error"] = *a.showTermDocCountError + } + if a.collectionMode != "" { + opts["collect_mode"] = a.collectionMode + } + if a.valueType != "" { + opts["value_type"] = a.valueType + } + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if len(a.includeTerms) > 0 { + opts["include"] = a.includeTerms + } + if a.includePattern != "" { + if a.includeFlags == nil || *a.includeFlags == 0 { + opts["include"] = a.includePattern + } else { + p := make(map[string]interface{}) + p["pattern"] = a.includePattern + p["flags"] = *a.includeFlags + opts["include"] = p + } + } + if len(a.excludeTerms) > 0 { + opts["exclude"] = a.excludeTerms + } + if a.excludePattern != "" { + if a.excludeFlags == nil || *a.excludeFlags == 0 { + opts["exclude"] = a.excludePattern + } else { + p := make(map[string]interface{}) + p["pattern"] = a.excludePattern + p["flags"] = *a.excludeFlags + opts["exclude"] = p + } + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms_test.go new file mode 100644 index 000000000..e84f51a15 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms_test.go @@ -0,0 +1,104 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermsAggregation(t *testing.T) { + agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc() + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"field":"gender","order":{"_term":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithSubAggregation(t *testing.T) { + subAgg := NewAvgAggregation().Field("height") + agg := NewTermsAggregation().Field("gender").Size(10). + OrderByAggregation("avg_height", false) + agg = agg.SubAggregation("avg_height", subAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithMultipleSubAggregation(t *testing.T) { + subAgg1 := NewAvgAggregation().Field("height") + subAgg2 := NewAvgAggregation().Field("width") + agg := NewTermsAggregation().Field("gender").Size(10). + OrderByAggregation("avg_height", false) + agg = agg.SubAggregation("avg_height", subAgg1) + agg = agg.SubAggregation("avg_width", subAgg2) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}},"avg_width":{"avg":{"field":"width"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithMetaData(t *testing.T) { + agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc() + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"terms":{"field":"gender","order":{"_term":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithMissing(t *testing.T) { + agg := NewTermsAggregation().Field("gender").Size(10).Missing("n/a") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"field":"gender","missing":"n/a","size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go new file mode 100644 index 000000000..e09ba347a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go @@ -0,0 +1,101 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AvgAggregation is a single-value metrics aggregation that computes +// the average of numeric values that are extracted from the +// aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by +// a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html +type AvgAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewAvgAggregation() *AvgAggregation { + return &AvgAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *AvgAggregation) Field(field string) *AvgAggregation { + a.field = field + return a +} + +func (a *AvgAggregation) Script(script *Script) *AvgAggregation { + a.script = script + return a +} + +func (a *AvgAggregation) Format(format string) *AvgAggregation { + a.format = format + return a +} + +func (a *AvgAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *AvgAggregation) Meta(metaData map[string]interface{}) *AvgAggregation { + a.meta = metaData + return a +} + +func (a *AvgAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "avg_grade" : { "avg" : { "field" : "grade" } } + // } + // } + // This method returns only the { "avg" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["avg"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg_test.go new file mode 100644 index 000000000..784ff45dd --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestAvgAggregation(t *testing.T) { + agg := NewAvgAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestAvgAggregationWithFormat(t *testing.T) { + agg := NewAvgAggregation().Field("grade").Format("000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg":{"field":"grade","format":"000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestAvgAggregationWithMetaData(t *testing.T) { + agg := NewAvgAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg":{"field":"grade"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go new file mode 100644 index 000000000..c21d6c8b1 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go @@ -0,0 +1,120 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CardinalityAggregation is a single-value metrics aggregation that +// calculates an approximate count of distinct values. +// Values can be extracted either from specific fields in the document +// or generated by a script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html +type CardinalityAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + precisionThreshold *int64 + rehash *bool +} + +func NewCardinalityAggregation() *CardinalityAggregation { + return &CardinalityAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *CardinalityAggregation) Field(field string) *CardinalityAggregation { + a.field = field + return a +} + +func (a *CardinalityAggregation) Script(script *Script) *CardinalityAggregation { + a.script = script + return a +} + +func (a *CardinalityAggregation) Format(format string) *CardinalityAggregation { + a.format = format + return a +} + +func (a *CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) *CardinalityAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CardinalityAggregation) Meta(metaData map[string]interface{}) *CardinalityAggregation { + a.meta = metaData + return a +} + +func (a *CardinalityAggregation) PrecisionThreshold(threshold int64) *CardinalityAggregation { + a.precisionThreshold = &threshold + return a +} + +func (a *CardinalityAggregation) Rehash(rehash bool) *CardinalityAggregation { + a.rehash = &rehash + return a +} + +func (a *CardinalityAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "author_count" : { + // "cardinality" : { "field" : "author" } + // } + // } + // } + // This method returns only the "cardinality" : { "field" : "author" } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["cardinality"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.format != "" { + opts["format"] = a.format + } + if a.precisionThreshold != nil { + opts["precision_threshold"] = *a.precisionThreshold + } + if a.rehash != nil { + opts["rehash"] = *a.rehash + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality_test.go new file mode 100644 index 000000000..b5f8490b5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCardinalityAggregation(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCardinalityAggregationWithOptions(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash").PrecisionThreshold(100).Rehash(true) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash","precision_threshold":100,"rehash":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCardinalityAggregationWithFormat(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash").Format("00000") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash","format":"00000"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCardinalityAggregationWithMetaData(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go new file mode 100644 index 000000000..b2147bd9f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that +// computes stats over numeric values extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html +type ExtendedStatsAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewExtendedStatsAggregation() *ExtendedStatsAggregation { + return &ExtendedStatsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ExtendedStatsAggregation) Field(field string) *ExtendedStatsAggregation { + a.field = field + return a +} + +func (a *ExtendedStatsAggregation) Script(script *Script) *ExtendedStatsAggregation { + a.script = script + return a +} + +func (a *ExtendedStatsAggregation) Format(format string) *ExtendedStatsAggregation { + a.format = format + return a +} + +func (a *ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *ExtendedStatsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ExtendedStatsAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsAggregation { + a.meta = metaData + return a +} + +func (a *ExtendedStatsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_stats" : { "extended_stats" : { "field" : "grade" } } + // } + // } + // This method returns only the { "extended_stats" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["extended_stats"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats_test.go new file mode 100644 index 000000000..76489630d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestExtendedStatsAggregation(t *testing.T) { + agg := NewExtendedStatsAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"extended_stats":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestExtendedStatsAggregationWithFormat(t *testing.T) { + agg := NewExtendedStatsAggregation().Field("grade").Format("000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"extended_stats":{"field":"grade","format":"000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go new file mode 100644 index 000000000..f675cbdb4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go @@ -0,0 +1,105 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoBoundsAggregation is a metric aggregation that computes the +// bounding box containing all geo_point values for a field. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html +type GeoBoundsAggregation struct { + field string + script *Script + wrapLongitude *bool + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGeoBoundsAggregation() *GeoBoundsAggregation { + return &GeoBoundsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *GeoBoundsAggregation) Field(field string) *GeoBoundsAggregation { + a.field = field + return a +} + +func (a *GeoBoundsAggregation) Script(script *Script) *GeoBoundsAggregation { + a.script = script + return a +} + +func (a *GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) *GeoBoundsAggregation { + a.wrapLongitude = &wrapLongitude + return a +} + +func (a *GeoBoundsAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoBoundsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GeoBoundsAggregation) Meta(metaData map[string]interface{}) *GeoBoundsAggregation { + a.meta = metaData + return a +} + +func (a *GeoBoundsAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "match" : { "business_type" : "shop" } + // }, + // "aggs" : { + // "viewport" : { + // "geo_bounds" : { + // "field" : "location" + // "wrap_longitude" : "true" + // } + // } + // } + // } + // + // This method returns only the { "geo_bounds" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geo_bounds"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.wrapLongitude != nil { + opts["wrap_longitude"] = *a.wrapLongitude + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds_test.go new file mode 100644 index 000000000..ea713c604 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoBoundsAggregation(t *testing.T) { + agg := NewGeoBoundsAggregation().Field("location") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bounds":{"field":"location"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoBoundsAggregationWithWrapLongitude(t *testing.T) { + agg := NewGeoBoundsAggregation().Field("location").WrapLongitude(true) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bounds":{"field":"location","wrap_longitude":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoBoundsAggregationWithMetaData(t *testing.T) { + agg := NewGeoBoundsAggregation().Field("location").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bounds":{"field":"location"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go new file mode 100644 index 000000000..7d7de53d1 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MaxAggregation is a single-value metrics aggregation that keeps track and +// returns the maximum value among the numeric values extracted from +// the aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by +// a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html +type MaxAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMaxAggregation() *MaxAggregation { + return &MaxAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MaxAggregation) Field(field string) *MaxAggregation { + a.field = field + return a +} + +func (a *MaxAggregation) Script(script *Script) *MaxAggregation { + a.script = script + return a +} + +func (a *MaxAggregation) Format(format string) *MaxAggregation { + a.format = format + return a +} + +func (a *MaxAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MaxAggregation) Meta(metaData map[string]interface{}) *MaxAggregation { + a.meta = metaData + return a +} +func (a *MaxAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "max_price" : { "max" : { "field" : "price" } } + // } + // } + // This method returns only the { "max" : { "field" : "price" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["max"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max_test.go new file mode 100644 index 000000000..773cc2e4b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMaxAggregation(t *testing.T) { + agg := NewMaxAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMaxAggregationWithFormat(t *testing.T) { + agg := NewMaxAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMaxAggregationWithMetaData(t *testing.T) { + agg := NewMaxAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max":{"field":"price"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go new file mode 100644 index 000000000..3a2578d7f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go @@ -0,0 +1,100 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MinAggregation is a single-value metrics aggregation that keeps track and +// returns the minimum value among numeric values extracted from the +// aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by a +// provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html +type MinAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMinAggregation() *MinAggregation { + return &MinAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MinAggregation) Field(field string) *MinAggregation { + a.field = field + return a +} + +func (a *MinAggregation) Script(script *Script) *MinAggregation { + a.script = script + return a +} + +func (a *MinAggregation) Format(format string) *MinAggregation { + a.format = format + return a +} + +func (a *MinAggregation) SubAggregation(name string, subAggregation Aggregation) *MinAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MinAggregation) Meta(metaData map[string]interface{}) *MinAggregation { + a.meta = metaData + return a +} + +func (a *MinAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "min_price" : { "min" : { "field" : "price" } } + // } + // } + // This method returns only the { "min" : { "field" : "price" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["min"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min_test.go new file mode 100644 index 000000000..fcde3817c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMinAggregation(t *testing.T) { + agg := NewMinAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"min":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMinAggregationWithFormat(t *testing.T) { + agg := NewMinAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"min":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMinAggregationWithMetaData(t *testing.T) { + agg := NewMinAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"min":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go new file mode 100644 index 000000000..41623a9c3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go @@ -0,0 +1,131 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PercentileRanksAggregation +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html +type PercentileRanksAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + values []float64 + compression *float64 + estimator string +} + +func NewPercentileRanksAggregation() *PercentileRanksAggregation { + return &PercentileRanksAggregation{ + subAggregations: make(map[string]Aggregation), + values: make([]float64, 0), + } +} + +func (a *PercentileRanksAggregation) Field(field string) *PercentileRanksAggregation { + a.field = field + return a +} + +func (a *PercentileRanksAggregation) Script(script *Script) *PercentileRanksAggregation { + a.script = script + return a +} + +func (a *PercentileRanksAggregation) Format(format string) *PercentileRanksAggregation { + a.format = format + return a +} + +func (a *PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentileRanksAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *PercentileRanksAggregation) Meta(metaData map[string]interface{}) *PercentileRanksAggregation { + a.meta = metaData + return a +} + +func (a *PercentileRanksAggregation) Values(values ...float64) *PercentileRanksAggregation { + a.values = append(a.values, values...) + return a +} + +func (a *PercentileRanksAggregation) Compression(compression float64) *PercentileRanksAggregation { + a.compression = &compression + return a +} + +func (a *PercentileRanksAggregation) Estimator(estimator string) *PercentileRanksAggregation { + a.estimator = estimator + return a +} + +func (a *PercentileRanksAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "load_time_outlier" : { + // "percentile_ranks" : { + // "field" : "load_time" + // "values" : [15, 30] + // } + // } + // } + // } + // This method returns only the + // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["percentile_ranks"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + if len(a.values) > 0 { + opts["values"] = a.values + } + if a.compression != nil { + opts["compression"] = *a.compression + } + if a.estimator != "" { + opts["estimator"] = a.estimator + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks_test.go new file mode 100644 index 000000000..a4bac02b5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPercentileRanksAggregation(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentile_ranks":{"field":"load_time"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentileRanksAggregationWithCustomValues(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time").Values(15, 30) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentile_ranks":{"field":"load_time","values":[15,30]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentileRanksAggregationWithFormat(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time").Format("000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentile_ranks":{"field":"load_time","format":"000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentileRanksAggregationWithMetaData(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"percentile_ranks":{"field":"load_time"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go new file mode 100644 index 000000000..0f7f77db9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go @@ -0,0 +1,130 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PercentilesAggregation +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html +type PercentilesAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + percentiles []float64 + compression *float64 + estimator string +} + +func NewPercentilesAggregation() *PercentilesAggregation { + return &PercentilesAggregation{ + subAggregations: make(map[string]Aggregation), + percentiles: make([]float64, 0), + } +} + +func (a *PercentilesAggregation) Field(field string) *PercentilesAggregation { + a.field = field + return a +} + +func (a *PercentilesAggregation) Script(script *Script) *PercentilesAggregation { + a.script = script + return a +} + +func (a *PercentilesAggregation) Format(format string) *PercentilesAggregation { + a.format = format + return a +} + +func (a *PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *PercentilesAggregation) Meta(metaData map[string]interface{}) *PercentilesAggregation { + a.meta = metaData + return a +} + +func (a *PercentilesAggregation) Percentiles(percentiles ...float64) *PercentilesAggregation { + a.percentiles = append(a.percentiles, percentiles...) + return a +} + +func (a *PercentilesAggregation) Compression(compression float64) *PercentilesAggregation { + a.compression = &compression + return a +} + +func (a *PercentilesAggregation) Estimator(estimator string) *PercentilesAggregation { + a.estimator = estimator + return a +} + +func (a *PercentilesAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "load_time_outlier" : { + // "percentiles" : { + // "field" : "load_time" + // } + // } + // } + // } + // This method returns only the + // { "percentiles" : { "field" : "load_time" } } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["percentiles"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + if len(a.percentiles) > 0 { + opts["percents"] = a.percentiles + } + if a.compression != nil { + opts["compression"] = *a.compression + } + if a.estimator != "" { + opts["estimator"] = a.estimator + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles_test.go new file mode 100644 index 000000000..93df1dd29 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPercentilesAggregation(t *testing.T) { + agg := NewPercentilesAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentiles":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentilesAggregationWithCustomPercents(t *testing.T) { + agg := NewPercentilesAggregation().Field("price").Percentiles(0.2, 0.5, 0.9) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentiles":{"field":"price","percents":[0.2,0.5,0.9]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentilesAggregationWithFormat(t *testing.T) { + agg := NewPercentilesAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentiles":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentilesAggregationWithMetaData(t *testing.T) { + agg := NewPercentilesAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"percentiles":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go new file mode 100644 index 000000000..0a27f2e65 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// StatsAggregation is a multi-value metrics aggregation that computes stats +// over numeric values extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html +type StatsAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewStatsAggregation() *StatsAggregation { + return &StatsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *StatsAggregation) Field(field string) *StatsAggregation { + a.field = field + return a +} + +func (a *StatsAggregation) Script(script *Script) *StatsAggregation { + a.script = script + return a +} + +func (a *StatsAggregation) Format(format string) *StatsAggregation { + a.format = format + return a +} + +func (a *StatsAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *StatsAggregation) Meta(metaData map[string]interface{}) *StatsAggregation { + a.meta = metaData + return a +} + +func (a *StatsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_stats" : { "stats" : { "field" : "grade" } } + // } + // } + // This method returns only the { "stats" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["stats"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats_test.go new file mode 100644 index 000000000..5cff372d4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestStatsAggregation(t *testing.T) { + agg := NewStatsAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"stats":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestStatsAggregationWithFormat(t *testing.T) { + agg := NewStatsAggregation().Field("grade").Format("0000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"stats":{"field":"grade","format":"0000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestStatsAggregationWithMetaData(t *testing.T) { + agg := NewStatsAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"stats":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go new file mode 100644 index 000000000..9eb74d61a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SumAggregation is a single-value metrics aggregation that sums up +// numeric values that are extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html +type SumAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewSumAggregation() *SumAggregation { + return &SumAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *SumAggregation) Field(field string) *SumAggregation { + a.field = field + return a +} + +func (a *SumAggregation) Script(script *Script) *SumAggregation { + a.script = script + return a +} + +func (a *SumAggregation) Format(format string) *SumAggregation { + a.format = format + return a +} + +func (a *SumAggregation) SubAggregation(name string, subAggregation Aggregation) *SumAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SumAggregation) Meta(metaData map[string]interface{}) *SumAggregation { + a.meta = metaData + return a +} + +func (a *SumAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "intraday_return" : { "sum" : { "field" : "change" } } + // } + // } + // This method returns only the { "sum" : { "field" : "change" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["sum"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum_test.go new file mode 100644 index 000000000..ff0e42545 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSumAggregation(t *testing.T) { + agg := NewSumAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"sum":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSumAggregationWithFormat(t *testing.T) { + agg := NewSumAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"sum":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSumAggregationWithMetaData(t *testing.T) { + agg := NewSumAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"sum":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go new file mode 100644 index 000000000..9d84790b2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go @@ -0,0 +1,143 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TopHitsAggregation keeps track of the most relevant document +// being aggregated. This aggregator is intended to be used as a +// sub aggregator, so that the top matching documents +// can be aggregated per bucket. +// +// It can effectively be used to group result sets by certain fields via +// a bucket aggregator. One or more bucket aggregators determines by +// which properties a result set get sliced into. +// +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html +type TopHitsAggregation struct { + searchSource *SearchSource +} + +func NewTopHitsAggregation() *TopHitsAggregation { + return &TopHitsAggregation{ + searchSource: NewSearchSource(), + } +} + +func (a *TopHitsAggregation) From(from int) *TopHitsAggregation { + a.searchSource = a.searchSource.From(from) + return a +} + +func (a *TopHitsAggregation) Size(size int) *TopHitsAggregation { + a.searchSource = a.searchSource.Size(size) + return a +} + +func (a *TopHitsAggregation) TrackScores(trackScores bool) *TopHitsAggregation { + a.searchSource = a.searchSource.TrackScores(trackScores) + return a +} + +func (a *TopHitsAggregation) Explain(explain bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Explain(explain) + return a +} + +func (a *TopHitsAggregation) Version(version bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Version(version) + return a +} + +func (a *TopHitsAggregation) NoStoredFields() *TopHitsAggregation { + a.searchSource = a.searchSource.NoStoredFields() + return a +} + +func (a *TopHitsAggregation) FetchSource(fetchSource bool) *TopHitsAggregation { + a.searchSource = a.searchSource.FetchSource(fetchSource) + return a +} + +func (a *TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) *TopHitsAggregation { + a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext) + return a +} + +func (a *TopHitsAggregation) DocvalueFields(docvalueFields ...string) *TopHitsAggregation { + a.searchSource = a.searchSource.DocvalueFields(docvalueFields...) + return a +} + +func (a *TopHitsAggregation) DocvalueField(docvalueField string) *TopHitsAggregation { + a.searchSource = a.searchSource.DocvalueField(docvalueField) + return a +} + +func (a *TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) *TopHitsAggregation { + a.searchSource = a.searchSource.ScriptFields(scriptFields...) + return a +} + +func (a *TopHitsAggregation) ScriptField(scriptField *ScriptField) *TopHitsAggregation { + a.searchSource = a.searchSource.ScriptField(scriptField) + return a +} + +func (a *TopHitsAggregation) Sort(field string, ascending bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Sort(field, ascending) + return a +} + +func (a *TopHitsAggregation) SortWithInfo(info SortInfo) *TopHitsAggregation { + a.searchSource = a.searchSource.SortWithInfo(info) + return a +} + +func (a *TopHitsAggregation) SortBy(sorter ...Sorter) *TopHitsAggregation { + a.searchSource = a.searchSource.SortBy(sorter...) + return a +} + +func (a *TopHitsAggregation) Highlight(highlight *Highlight) *TopHitsAggregation { + a.searchSource = a.searchSource.Highlight(highlight) + return a +} + +func (a *TopHitsAggregation) Highlighter() *Highlight { + return a.searchSource.Highlighter() +} + +func (a *TopHitsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs": { + // "top_tag_hits": { + // "top_hits": { + // "sort": [ + // { + // "last_activity_date": { + // "order": "desc" + // } + // } + // ], + // "_source": { + // "include": [ + // "title" + // ] + // }, + // "size" : 1 + // } + // } + // } + // } + // This method returns only the { "top_hits" : { ... } } part. + + source := make(map[string]interface{}) + src, err := a.searchSource.Source() + if err != nil { + return nil, err + } + source["top_hits"] = src + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits_test.go new file mode 100644 index 000000000..ff238ee62 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits_test.go @@ -0,0 +1,31 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTopHitsAggregation(t *testing.T) { + fsc := NewFetchSourceContext(true).Include("title") + agg := NewTopHitsAggregation(). + Sort("last_activity_date", false). + FetchSourceContext(fsc). + Size(1) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"top_hits":{"_source":{"excludes":[],"includes":["title"]},"size":1,"sort":[{"last_activity_date":{"order":"desc"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go new file mode 100644 index 000000000..772555523 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go @@ -0,0 +1,102 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ValueCountAggregation is a single-value metrics aggregation that counts +// the number of values that are extracted from the aggregated documents. +// These values can be extracted either from specific fields in the documents, +// or be generated by a provided script. Typically, this aggregator will be +// used in conjunction with other single-value aggregations. +// For example, when computing the avg one might be interested in the +// number of values the average is computed over. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html +type ValueCountAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewValueCountAggregation() *ValueCountAggregation { + return &ValueCountAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ValueCountAggregation) Field(field string) *ValueCountAggregation { + a.field = field + return a +} + +func (a *ValueCountAggregation) Script(script *Script) *ValueCountAggregation { + a.script = script + return a +} + +func (a *ValueCountAggregation) Format(format string) *ValueCountAggregation { + a.format = format + return a +} + +func (a *ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) *ValueCountAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ValueCountAggregation) Meta(metaData map[string]interface{}) *ValueCountAggregation { + a.meta = metaData + return a +} + +func (a *ValueCountAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_count" : { "value_count" : { "field" : "grade" } } + // } + // } + // This method returns only the { "value_count" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["value_count"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count_test.go new file mode 100644 index 000000000..18d2ba119 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count_test.go @@ -0,0 +1,63 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestValueCountAggregation(t *testing.T) { + agg := NewValueCountAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"value_count":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestValueCountAggregationWithFormat(t *testing.T) { + // Format comes with 1.5.0+ + agg := NewValueCountAggregation().Field("grade").Format("0000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"value_count":{"field":"grade","format":"0000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestValueCountAggregationWithMetaData(t *testing.T) { + agg := NewValueCountAggregation().Field("grade") + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"value_count":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go new file mode 100644 index 000000000..56c5aab5b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AvgBucketAggregation is a sibling pipeline aggregation which calculates +// the (mean) average value of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-avg-bucket-aggregation.html +type AvgBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewAvgBucketAggregation creates and initializes a new AvgBucketAggregation. +func NewAvgBucketAggregation() *AvgBucketAggregation { + return &AvgBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *AvgBucketAggregation) Format(format string) *AvgBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *AvgBucketAggregation) GapPolicy(gapPolicy string) *AvgBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *AvgBucketAggregation) GapInsertZeros() *AvgBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *AvgBucketAggregation) GapSkip() *AvgBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *AvgBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *AvgBucketAggregation) Meta(metaData map[string]interface{}) *AvgBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *AvgBucketAggregation) BucketsPath(bucketsPaths ...string) *AvgBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *AvgBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["avg_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket_test.go new file mode 100644 index 000000000..019b8f1ad --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestAvgBucketAggregation(t *testing.T) { + agg := NewAvgBucketAggregation().BucketsPath("the_sum").GapPolicy("skip") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go new file mode 100644 index 000000000..ddce02ebf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go @@ -0,0 +1,132 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// BucketScriptAggregation is a parent pipeline aggregation which executes +// a script which can perform per bucket computations on specified metrics +// in the parent multi-bucket aggregation. The specified metric must be +// numeric and the script must return a numeric value. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html +type BucketScriptAggregation struct { + format string + gapPolicy string + script *Script + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPathsMap map[string]string +} + +// NewBucketScriptAggregation creates and initializes a new BucketScriptAggregation. +func NewBucketScriptAggregation() *BucketScriptAggregation { + return &BucketScriptAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPathsMap: make(map[string]string), + } +} + +func (a *BucketScriptAggregation) Format(format string) *BucketScriptAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *BucketScriptAggregation) GapPolicy(gapPolicy string) *BucketScriptAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *BucketScriptAggregation) GapInsertZeros() *BucketScriptAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *BucketScriptAggregation) GapSkip() *BucketScriptAggregation { + a.gapPolicy = "skip" + return a +} + +// Script is the script to run. +func (a *BucketScriptAggregation) Script(script *Script) *BucketScriptAggregation { + a.script = script + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *BucketScriptAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketScriptAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *BucketScriptAggregation) Meta(metaData map[string]interface{}) *BucketScriptAggregation { + a.meta = metaData + return a +} + +// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. +func (a *BucketScriptAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketScriptAggregation { + a.bucketsPathsMap = bucketsPathsMap + return a +} + +// AddBucketsPath adds a bucket path to use for this pipeline aggregator. +func (a *BucketScriptAggregation) AddBucketsPath(name, path string) *BucketScriptAggregation { + if a.bucketsPathsMap == nil { + a.bucketsPathsMap = make(map[string]string) + } + a.bucketsPathsMap[name] = path + return a +} + +func (a *BucketScriptAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["bucket_script"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + } + + // Add buckets paths + if len(a.bucketsPathsMap) > 0 { + params["buckets_path"] = a.bucketsPathsMap + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script_test.go new file mode 100644 index 000000000..b4e6bf1c0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script_test.go @@ -0,0 +1,30 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBucketScriptAggregation(t *testing.T) { + agg := NewBucketScriptAggregation(). + AddBucketsPath("tShirtSales", "t-shirts>sales"). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("tShirtSales / totalSales * 100")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"bucket_script":{"buckets_path":{"tShirtSales":"t-shirts\u003esales","totalSales":"total_sales"},"script":"tShirtSales / totalSales * 100"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go new file mode 100644 index 000000000..3e074b600 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go @@ -0,0 +1,134 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// BucketSelectorAggregation is a parent pipeline aggregation which +// determines whether the current bucket will be retained in the parent +// multi-bucket aggregation. The specific metric must be numeric and +// the script must return a boolean value. If the script language is +// expression then a numeric return value is permitted. In this case 0.0 +// will be evaluated as false and all other values will evaluate to true. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-selector-aggregation.html +type BucketSelectorAggregation struct { + format string + gapPolicy string + script *Script + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPathsMap map[string]string +} + +// NewBucketSelectorAggregation creates and initializes a new BucketSelectorAggregation. +func NewBucketSelectorAggregation() *BucketSelectorAggregation { + return &BucketSelectorAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPathsMap: make(map[string]string), + } +} + +func (a *BucketSelectorAggregation) Format(format string) *BucketSelectorAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *BucketSelectorAggregation) GapPolicy(gapPolicy string) *BucketSelectorAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *BucketSelectorAggregation) GapInsertZeros() *BucketSelectorAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *BucketSelectorAggregation) GapSkip() *BucketSelectorAggregation { + a.gapPolicy = "skip" + return a +} + +// Script is the script to run. +func (a *BucketSelectorAggregation) Script(script *Script) *BucketSelectorAggregation { + a.script = script + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *BucketSelectorAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketSelectorAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *BucketSelectorAggregation) Meta(metaData map[string]interface{}) *BucketSelectorAggregation { + a.meta = metaData + return a +} + +// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. +func (a *BucketSelectorAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketSelectorAggregation { + a.bucketsPathsMap = bucketsPathsMap + return a +} + +// AddBucketsPath adds a bucket path to use for this pipeline aggregator. +func (a *BucketSelectorAggregation) AddBucketsPath(name, path string) *BucketSelectorAggregation { + if a.bucketsPathsMap == nil { + a.bucketsPathsMap = make(map[string]string) + } + a.bucketsPathsMap[name] = path + return a +} + +func (a *BucketSelectorAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["bucket_selector"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + } + + // Add buckets paths + if len(a.bucketsPathsMap) > 0 { + params["buckets_path"] = a.bucketsPathsMap + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector_test.go new file mode 100644 index 000000000..dd276a867 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector_test.go @@ -0,0 +1,29 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBucketSelectorAggregation(t *testing.T) { + agg := NewBucketSelectorAggregation(). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("totalSales >= 1000")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"bucket_selector":{"buckets_path":{"totalSales":"total_sales"},"script":"totalSales \u003e= 1000"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go new file mode 100644 index 000000000..4a3d4b6ff --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go @@ -0,0 +1,90 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CumulativeSumAggregation is a parent pipeline aggregation which calculates +// the cumulative sum of a specified metric in a parent histogram (or date_histogram) +// aggregation. The specified metric must be numeric and the enclosing +// histogram must have min_doc_count set to 0 (default for histogram aggregations). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html +type CumulativeSumAggregation struct { + format string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewCumulativeSumAggregation creates and initializes a new CumulativeSumAggregation. +func NewCumulativeSumAggregation() *CumulativeSumAggregation { + return &CumulativeSumAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *CumulativeSumAggregation) Format(format string) *CumulativeSumAggregation { + a.format = format + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *CumulativeSumAggregation) SubAggregation(name string, subAggregation Aggregation) *CumulativeSumAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CumulativeSumAggregation) Meta(metaData map[string]interface{}) *CumulativeSumAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *CumulativeSumAggregation) BucketsPath(bucketsPaths ...string) *CumulativeSumAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *CumulativeSumAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["cumulative_sum"] = params + + if a.format != "" { + params["format"] = a.format + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum_test.go new file mode 100644 index 000000000..69a215d43 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCumulativeSumAggregation(t *testing.T) { + agg := NewCumulativeSumAggregation().BucketsPath("sales") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cumulative_sum":{"buckets_path":"sales"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go new file mode 100644 index 000000000..7f6f7327a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go @@ -0,0 +1,124 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DerivativeAggregation is a parent pipeline aggregation which calculates +// the derivative of a specified metric in a parent histogram (or date_histogram) +// aggregation. The specified metric must be numeric and the enclosing +// histogram must have min_doc_count set to 0 (default for histogram aggregations). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html +type DerivativeAggregation struct { + format string + gapPolicy string + unit string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewDerivativeAggregation creates and initializes a new DerivativeAggregation. +func NewDerivativeAggregation() *DerivativeAggregation { + return &DerivativeAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *DerivativeAggregation) Format(format string) *DerivativeAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *DerivativeAggregation) GapPolicy(gapPolicy string) *DerivativeAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *DerivativeAggregation) GapInsertZeros() *DerivativeAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *DerivativeAggregation) GapSkip() *DerivativeAggregation { + a.gapPolicy = "skip" + return a +} + +// Unit sets the unit provided, e.g. "1d" or "1y". +// It is only useful when calculating the derivative using a date_histogram. +func (a *DerivativeAggregation) Unit(unit string) *DerivativeAggregation { + a.unit = unit + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *DerivativeAggregation) SubAggregation(name string, subAggregation Aggregation) *DerivativeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DerivativeAggregation) Meta(metaData map[string]interface{}) *DerivativeAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *DerivativeAggregation) BucketsPath(bucketsPaths ...string) *DerivativeAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *DerivativeAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["derivative"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.unit != "" { + params["unit"] = a.unit + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative_test.go new file mode 100644 index 000000000..7e7b26749 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDerivativeAggregation(t *testing.T) { + agg := NewDerivativeAggregation().BucketsPath("sales") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"derivative":{"buckets_path":"sales"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go new file mode 100644 index 000000000..6eb13aa10 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go @@ -0,0 +1,114 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MaxBucketAggregation is a sibling pipeline aggregation which identifies +// the bucket(s) with the maximum value of a specified metric in a sibling +// aggregation and outputs both the value and the key(s) of the bucket(s). +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-max-bucket-aggregation.html +type MaxBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMaxBucketAggregation creates and initializes a new MaxBucketAggregation. +func NewMaxBucketAggregation() *MaxBucketAggregation { + return &MaxBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MaxBucketAggregation) Format(format string) *MaxBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MaxBucketAggregation) GapPolicy(gapPolicy string) *MaxBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MaxBucketAggregation) GapInsertZeros() *MaxBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MaxBucketAggregation) GapSkip() *MaxBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MaxBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MaxBucketAggregation) Meta(metaData map[string]interface{}) *MaxBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MaxBucketAggregation) BucketsPath(bucketsPaths ...string) *MaxBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MaxBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["max_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket_test.go new file mode 100644 index 000000000..aa9bf2f6d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMaxBucketAggregation(t *testing.T) { + agg := NewMaxBucketAggregation().BucketsPath("the_sum").GapPolicy("skip") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go new file mode 100644 index 000000000..c70f1bc78 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go @@ -0,0 +1,114 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MinBucketAggregation is a sibling pipeline aggregation which identifies +// the bucket(s) with the maximum value of a specified metric in a sibling +// aggregation and outputs both the value and the key(s) of the bucket(s). +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-min-bucket-aggregation.html +type MinBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMinBucketAggregation creates and initializes a new MinBucketAggregation. +func NewMinBucketAggregation() *MinBucketAggregation { + return &MinBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MinBucketAggregation) Format(format string) *MinBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MinBucketAggregation) GapPolicy(gapPolicy string) *MinBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MinBucketAggregation) GapInsertZeros() *MinBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MinBucketAggregation) GapSkip() *MinBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MinBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MinBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MinBucketAggregation) Meta(metaData map[string]interface{}) *MinBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MinBucketAggregation) BucketsPath(bucketsPaths ...string) *MinBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MinBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["min_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket_test.go new file mode 100644 index 000000000..ff4abf2b2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMinBucketAggregation(t *testing.T) { + agg := NewMinBucketAggregation().BucketsPath("sales_per_month>sales").GapPolicy("skip") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"min_bucket":{"buckets_path":"sales_per_month\u003esales","gap_policy":"skip"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go new file mode 100644 index 000000000..017e8b1e0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go @@ -0,0 +1,393 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MovAvgAggregation operates on a series of data. It will slide a window +// across the data and emit the average value of that window. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html +type MovAvgAggregation struct { + format string + gapPolicy string + model MovAvgModel + window *int + predict *int + minimize *bool + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMovAvgAggregation creates and initializes a new MovAvgAggregation. +func NewMovAvgAggregation() *MovAvgAggregation { + return &MovAvgAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MovAvgAggregation) Format(format string) *MovAvgAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MovAvgAggregation) GapPolicy(gapPolicy string) *MovAvgAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MovAvgAggregation) GapInsertZeros() *MovAvgAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MovAvgAggregation) GapSkip() *MovAvgAggregation { + a.gapPolicy = "skip" + return a +} + +// Model is used to define what type of moving average you want to use +// in the series. +func (a *MovAvgAggregation) Model(model MovAvgModel) *MovAvgAggregation { + a.model = model + return a +} + +// Window sets the window size for the moving average. This window will +// "slide" across the series, and the values inside that window will +// be used to calculate the moving avg value. +func (a *MovAvgAggregation) Window(window int) *MovAvgAggregation { + a.window = &window + return a +} + +// Predict sets the number of predictions that should be returned. +// Each prediction will be spaced at the intervals in the histogram. +// E.g. a predict of 2 will return two new buckets at the end of the +// histogram with the predicted values. +func (a *MovAvgAggregation) Predict(numPredictions int) *MovAvgAggregation { + a.predict = &numPredictions + return a +} + +// Minimize determines if the model should be fit to the data using a +// cost minimizing algorithm. +func (a *MovAvgAggregation) Minimize(minimize bool) *MovAvgAggregation { + a.minimize = &minimize + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MovAvgAggregation) SubAggregation(name string, subAggregation Aggregation) *MovAvgAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MovAvgAggregation) Meta(metaData map[string]interface{}) *MovAvgAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MovAvgAggregation) BucketsPath(bucketsPaths ...string) *MovAvgAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MovAvgAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["moving_avg"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.model != nil { + params["model"] = a.model.Name() + settings := a.model.Settings() + if len(settings) > 0 { + params["settings"] = settings + } + } + if a.window != nil { + params["window"] = *a.window + } + if a.predict != nil { + params["predict"] = *a.predict + } + if a.minimize != nil { + params["minimize"] = *a.minimize + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} + +// -- Models for moving averages -- +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_models + +// MovAvgModel specifies the model to use with the MovAvgAggregation. +type MovAvgModel interface { + Name() string + Settings() map[string]interface{} +} + +// -- EWMA -- + +// EWMAMovAvgModel calculates an exponentially weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted +type EWMAMovAvgModel struct { + alpha *float64 +} + +// NewEWMAMovAvgModel creates and initializes a new EWMAMovAvgModel. +func NewEWMAMovAvgModel() *EWMAMovAvgModel { + return &EWMAMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *EWMAMovAvgModel) Alpha(alpha float64) *EWMAMovAvgModel { + m.alpha = &alpha + return m +} + +// Name of the model. +func (m *EWMAMovAvgModel) Name() string { + return "ewma" +} + +// Settings of the model. +func (m *EWMAMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + return settings +} + +// -- Holt linear -- + +// HoltLinearMovAvgModel calculates a doubly exponential weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear +type HoltLinearMovAvgModel struct { + alpha *float64 + beta *float64 +} + +// NewHoltLinearMovAvgModel creates and initializes a new HoltLinearMovAvgModel. +func NewHoltLinearMovAvgModel() *HoltLinearMovAvgModel { + return &HoltLinearMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *HoltLinearMovAvgModel) Alpha(alpha float64) *HoltLinearMovAvgModel { + m.alpha = &alpha + return m +} + +// Beta is equivalent to Alpha but controls the smoothing of the trend +// instead of the data. +func (m *HoltLinearMovAvgModel) Beta(beta float64) *HoltLinearMovAvgModel { + m.beta = &beta + return m +} + +// Name of the model. +func (m *HoltLinearMovAvgModel) Name() string { + return "holt" +} + +// Settings of the model. +func (m *HoltLinearMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + if m.beta != nil { + settings["beta"] = *m.beta + } + return settings +} + +// -- Holt Winters -- + +// HoltWintersMovAvgModel calculates a triple exponential weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters +type HoltWintersMovAvgModel struct { + alpha *float64 + beta *float64 + gamma *float64 + period *int + seasonalityType string + pad *bool +} + +// NewHoltWintersMovAvgModel creates and initializes a new HoltWintersMovAvgModel. +func NewHoltWintersMovAvgModel() *HoltWintersMovAvgModel { + return &HoltWintersMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *HoltWintersMovAvgModel) Alpha(alpha float64) *HoltWintersMovAvgModel { + m.alpha = &alpha + return m +} + +// Beta is equivalent to Alpha but controls the smoothing of the trend +// instead of the data. +func (m *HoltWintersMovAvgModel) Beta(beta float64) *HoltWintersMovAvgModel { + m.beta = &beta + return m +} + +func (m *HoltWintersMovAvgModel) Gamma(gamma float64) *HoltWintersMovAvgModel { + m.gamma = &gamma + return m +} + +func (m *HoltWintersMovAvgModel) Period(period int) *HoltWintersMovAvgModel { + m.period = &period + return m +} + +func (m *HoltWintersMovAvgModel) SeasonalityType(typ string) *HoltWintersMovAvgModel { + m.seasonalityType = typ + return m +} + +func (m *HoltWintersMovAvgModel) Pad(pad bool) *HoltWintersMovAvgModel { + m.pad = &pad + return m +} + +// Name of the model. +func (m *HoltWintersMovAvgModel) Name() string { + return "holt_winters" +} + +// Settings of the model. +func (m *HoltWintersMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + if m.beta != nil { + settings["beta"] = *m.beta + } + if m.gamma != nil { + settings["gamma"] = *m.gamma + } + if m.period != nil { + settings["period"] = *m.period + } + if m.pad != nil { + settings["pad"] = *m.pad + } + if m.seasonalityType != "" { + settings["type"] = m.seasonalityType + } + return settings +} + +// -- Linear -- + +// LinearMovAvgModel calculates a linearly weighted moving average, such +// that older values are linearly less important. "Time" is determined +// by position in collection. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_linear +type LinearMovAvgModel struct { +} + +// NewLinearMovAvgModel creates and initializes a new LinearMovAvgModel. +func NewLinearMovAvgModel() *LinearMovAvgModel { + return &LinearMovAvgModel{} +} + +// Name of the model. +func (m *LinearMovAvgModel) Name() string { + return "linear" +} + +// Settings of the model. +func (m *LinearMovAvgModel) Settings() map[string]interface{} { + return nil +} + +// -- Simple -- + +// SimpleMovAvgModel calculates a simple unweighted (arithmetic) moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_simple +type SimpleMovAvgModel struct { +} + +// NewSimpleMovAvgModel creates and initializes a new SimpleMovAvgModel. +func NewSimpleMovAvgModel() *SimpleMovAvgModel { + return &SimpleMovAvgModel{} +} + +// Name of the model. +func (m *SimpleMovAvgModel) Name() string { + return "simple" +} + +// Settings of the model. +func (m *SimpleMovAvgModel) Settings() map[string]interface{} { + return nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg_test.go new file mode 100644 index 000000000..af2fc7c27 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg_test.go @@ -0,0 +1,132 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMovAvgAggregation(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithSimpleModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewSimpleMovAvgModel()) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"simple","window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithLinearModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewLinearMovAvgModel()) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"linear","window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithEWMAModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewEWMAMovAvgModel().Alpha(0.5)) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"ewma","settings":{"alpha":0.5},"window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithHoltLinearModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30). + Model(NewHoltLinearMovAvgModel().Alpha(0.5).Beta(0.4)) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"holt","settings":{"alpha":0.5,"beta":0.4},"window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithHoltWintersModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Predict(10).Minimize(true). + Model(NewHoltWintersMovAvgModel().Alpha(0.5).Beta(0.4).Gamma(0.3).Period(7).Pad(true)) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","minimize":true,"model":"holt_winters","predict":10,"settings":{"alpha":0.5,"beta":0.4,"gamma":0.3,"pad":true,"period":7},"window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithSubAggs(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum") + agg = agg.SubAggregation("avg_sum", NewAvgAggregation().Field("height")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_sum":{"avg":{"field":"height"}}},"moving_avg":{"buckets_path":"the_sum"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go new file mode 100644 index 000000000..590375ebb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go @@ -0,0 +1,124 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SerialDiffAggregation implements serial differencing. +// Serial differencing is a technique where values in a time series are +// subtracted from itself at different time lags or periods. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html +type SerialDiffAggregation struct { + format string + gapPolicy string + lag *int + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewSerialDiffAggregation creates and initializes a new SerialDiffAggregation. +func NewSerialDiffAggregation() *SerialDiffAggregation { + return &SerialDiffAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *SerialDiffAggregation) Format(format string) *SerialDiffAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *SerialDiffAggregation) GapPolicy(gapPolicy string) *SerialDiffAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *SerialDiffAggregation) GapInsertZeros() *SerialDiffAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *SerialDiffAggregation) GapSkip() *SerialDiffAggregation { + a.gapPolicy = "skip" + return a +} + +// Lag specifies the historical bucket to subtract from the current value. +// E.g. a lag of 7 will subtract the current value from the value 7 buckets +// ago. Lag must be a positive, non-zero integer. +func (a *SerialDiffAggregation) Lag(lag int) *SerialDiffAggregation { + a.lag = &lag + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *SerialDiffAggregation) SubAggregation(name string, subAggregation Aggregation) *SerialDiffAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SerialDiffAggregation) Meta(metaData map[string]interface{}) *SerialDiffAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *SerialDiffAggregation) BucketsPath(bucketsPaths ...string) *SerialDiffAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *SerialDiffAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["serial_diff"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.lag != nil { + params["lag"] = *a.lag + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff_test.go new file mode 100644 index 000000000..6d336a2ee --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSerialDiffAggregation(t *testing.T) { + agg := NewSerialDiffAggregation().BucketsPath("the_sum").Lag(7) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"serial_diff":{"buckets_path":"the_sum","lag":7}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go new file mode 100644 index 000000000..4cd204369 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SumBucketAggregation is a sibling pipeline aggregation which calculates +// the sum across all buckets of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-sum-bucket-aggregation.html +type SumBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewSumBucketAggregation creates and initializes a new SumBucketAggregation. +func NewSumBucketAggregation() *SumBucketAggregation { + return &SumBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *SumBucketAggregation) Format(format string) *SumBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *SumBucketAggregation) GapPolicy(gapPolicy string) *SumBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *SumBucketAggregation) GapInsertZeros() *SumBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *SumBucketAggregation) GapSkip() *SumBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *SumBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *SumBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SumBucketAggregation) Meta(metaData map[string]interface{}) *SumBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *SumBucketAggregation) BucketsPath(bucketsPaths ...string) *SumBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *SumBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["sum_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket_test.go new file mode 100644 index 000000000..be8275c81 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSumBucketAggregation(t *testing.T) { + agg := NewSumBucketAggregation().BucketsPath("the_sum") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"sum_bucket":{"buckets_path":"the_sum"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_test.go new file mode 100644 index 000000000..bf8049a2c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_test.go @@ -0,0 +1,1003 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestAggsIntegrationAvgBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("avg_monthly_sales", NewAvgBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.AvgBucket("avg_monthly_sales") + if !found { + t.Fatal("expected avg_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected avg_monthly_sales aggregation") + } + if agg.Value == nil { + t.Fatal("expected avg_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(939.2); got != want { + t.Fatalf("expected avg_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationDerivative(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("sales_deriv", NewDerivativeAggregation().BucketsPath("sales")) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].Derivative("sales_deriv") + if found { + t.Fatal("expected no sales_deriv aggregation") + } + if d != nil { + t.Fatal("expected no sales_deriv aggregation") + } + + d, found = agg.Buckets[1].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value != nil { + t.Fatal("expected sales_deriv value == nil") + } + + d, found = agg.Buckets[2].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value != nil { + t.Fatal("expected sales_deriv value == nil") + } + + d, found = agg.Buckets[3].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value == nil { + t.Fatal("expected sales_deriv value != nil") + } + if got, want := *d.Value, float64(2348.0); got != want { + t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value == nil { + t.Fatal("expected sales_deriv value != nil") + } + if got, want := *d.Value, float64(-1658.0); got != want { + t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value == nil { + t.Fatal("expected sales_deriv value != nil") + } + if got, want := *d.Value, float64(-722.0); got != want { + t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationMaxBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("max_monthly_sales", NewMaxBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.MaxBucket("max_monthly_sales") + if !found { + t.Fatal("expected max_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected max_monthly_sales aggregation") + } + if got, want := len(agg.Keys), 1; got != want { + t.Fatalf("expected len(max_monthly_sales.keys)=%d; got: %d", want, got) + } + if got, want := agg.Keys[0], "2015-04-01"; got != want { + t.Fatalf("expected max_monthly_sales.keys[0]=%v; got: %v", want, got) + } + if agg.Value == nil { + t.Fatal("expected max_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(2448); got != want { + t.Fatalf("expected max_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationMinBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("min_monthly_sales", NewMinBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.MinBucket("min_monthly_sales") + if !found { + t.Fatal("expected min_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected min_monthly_sales aggregation") + } + if got, want := len(agg.Keys), 1; got != want { + t.Fatalf("expected len(min_monthly_sales.keys)=%d; got: %d", want, got) + } + if got, want := agg.Keys[0], "2015-06-01"; got != want { + t.Fatalf("expected min_monthly_sales.keys[0]=%v; got: %v", want, got) + } + if agg.Value == nil { + t.Fatal("expected min_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(68); got != want { + t.Fatalf("expected min_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationSumBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("sum_monthly_sales", NewSumBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.SumBucket("sum_monthly_sales") + if !found { + t.Fatal("expected sum_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected sum_monthly_sales aggregation") + } + if agg.Value == nil { + t.Fatal("expected sum_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(4696.0); got != want { + t.Fatalf("expected sum_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationMovAvg(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("the_sum", NewSumAggregation().Field("price")) + h = h.SubAggregation("the_movavg", NewMovAvgAggregation().BucketsPath("the_sum")) + builder = builder.Aggregation("my_date_histo", h) + + res, err := builder.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("my_date_histo") + if !found { + t.Fatal("expected sum_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected sum_monthly_sales aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + d, found := agg.Buckets[0].MovAvg("the_movavg") + if found { + t.Fatal("expected no the_movavg aggregation") + } + if d != nil { + t.Fatal("expected no the_movavg aggregation") + } + + d, found = agg.Buckets[1].MovAvg("the_movavg") + if found { + t.Fatal("expected no the_movavg aggregation") + } + if d != nil { + t.Fatal("expected no the_movavg aggregation") + } + + d, found = agg.Buckets[2].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(1290.0); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } + + d, found = agg.Buckets[3].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(695.0); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } + + d, found = agg.Buckets[4].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(1279.3333333333333); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } + + d, found = agg.Buckets[5].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(1157.0); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } +} + +func TestAggsIntegrationCumulativeSum(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("cumulative_sales", NewCumulativeSumAggregation().BucketsPath("sales")) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(1290.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[1].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(1290.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[2].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(1390.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[3].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(3838.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(4628.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(4696.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationBucketScript(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("total_sales", NewSumAggregation().Field("price")) + appleFilter := NewFilterAggregation().Filter(NewTermQuery("manufacturer", "Apple")) + appleFilter = appleFilter.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("apple_sales", appleFilter) + h = h.SubAggregation("apple_percentage", + NewBucketScriptAggregation(). + GapPolicy("insert_zeros"). + AddBucketsPath("appleSales", "apple_sales>sales"). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("params.appleSales / params.totalSales * 100"))) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Pretty(true).Do(context.TODO()) + if err != nil { + t.Fatalf("%v (maybe scripting is disabled?)", err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(100.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[1].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value != nil { + t.Fatal("expected apple_percentage value == nil") + } + + d, found = agg.Buckets[2].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(0.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[3].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(34.64052287581699); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(0.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(0.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationBucketSelector(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("total_sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("sales_bucket_filter", + NewBucketSelectorAggregation(). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("params.totalSales <= 100"))) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do(context.TODO()) + if err != nil { + t.Fatalf("%v (maybe scripting is disabled?)", err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 2; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } +} + +func TestAggsIntegrationSerialDiff(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("the_diff", NewSerialDiffAggregation().BucketsPath("sales").Lag(1)) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].SerialDiff("the_diff") + if found { + t.Fatal("expected no the_diff aggregation") + } + if d != nil { + t.Fatal("expected no the_diff aggregation") + } + + d, found = agg.Buckets[1].SerialDiff("the_diff") + if found { + t.Fatal("expected no the_diff aggregation") + } + if d != nil { + t.Fatal("expected no the_diff aggregation") + } + + d, found = agg.Buckets[2].SerialDiff("the_diff") + if found { + t.Fatal("expected no the_diff aggregation") + } + if d != nil { + t.Fatal("expected no the_diff aggregation") + } + + d, found = agg.Buckets[3].SerialDiff("the_diff") + if !found { + t.Fatal("expected the_diff aggregation") + } + if d == nil { + t.Fatal("expected the_diff aggregation") + } + if d.Value == nil { + t.Fatal("expected the_diff value != nil") + } + if got, want := *d.Value, float64(2348.0); got != want { + t.Fatalf("expected the_diff.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].SerialDiff("the_diff") + if !found { + t.Fatal("expected the_diff aggregation") + } + if d == nil { + t.Fatal("expected the_diff aggregation") + } + if d.Value == nil { + t.Fatal("expected the_diff value != nil") + } + if got, want := *d.Value, float64(-1658.0); got != want { + t.Fatalf("expected the_diff.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].SerialDiff("the_diff") + if !found { + t.Fatal("expected the_diff aggregation") + } + if d == nil { + t.Fatal("expected the_diff aggregation") + } + if d.Value == nil { + t.Fatal("expected the_diff value != nil") + } + if got, want := *d.Value, float64(-722.0); got != want { + t.Fatalf("expected the_diff.value=%v; got: %v", want, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go new file mode 100644 index 000000000..6b6a54018 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go @@ -0,0 +1,3054 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestAggs(t *testing.T) { + // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + tweet1 := tweet{ + User: "olivere", + Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Image: "http://golang.org/doc/gopher/gophercolor.png", + Tags: []string{"golang", "elasticsearch"}, + Location: "48.1333,11.5667", // lat,lon + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", + Retweets: 0, + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + Location: "48.1189,11.4289", // lat,lon + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", + Retweets: 12, + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + Location: "47.7167,11.7167", // lat,lon + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + + // Terms Aggregate by user name + globalAgg := NewGlobalAggregation() + usersAgg := NewTermsAggregation().Field("user").Size(10).OrderByCountDesc() + retweetsAgg := NewTermsAggregation().Field("retweets").Size(10) + avgRetweetsAgg := NewAvgAggregation().Field("retweets") + avgRetweetsWithMetaAgg := NewAvgAggregation().Field("retweetsMeta").Meta(map[string]interface{}{"meta": true}) + minRetweetsAgg := NewMinAggregation().Field("retweets") + maxRetweetsAgg := NewMaxAggregation().Field("retweets") + sumRetweetsAgg := NewSumAggregation().Field("retweets") + statsRetweetsAgg := NewStatsAggregation().Field("retweets") + extstatsRetweetsAgg := NewExtendedStatsAggregation().Field("retweets") + valueCountRetweetsAgg := NewValueCountAggregation().Field("retweets") + percentilesRetweetsAgg := NewPercentilesAggregation().Field("retweets") + percentileRanksRetweetsAgg := NewPercentileRanksAggregation().Field("retweets").Values(25, 50, 75) + cardinalityAgg := NewCardinalityAggregation().Field("user") + significantTermsAgg := NewSignificantTermsAggregation().Field("message") + samplerAgg := NewSamplerAggregation().SubAggregation("tagged_with", NewTermsAggregation().Field("tags")) + retweetsRangeAgg := NewRangeAggregation().Field("retweets").Lt(10).Between(10, 100).Gt(100) + retweetsKeyedRangeAgg := NewRangeAggregation().Field("retweets").Keyed(true).Lt(10).Between(10, 100).Gt(100) + dateRangeAgg := NewDateRangeAggregation().Field("created").Lt("2012-01-01").Between("2012-01-01", "2013-01-01").Gt("2013-01-01") + missingTagsAgg := NewMissingAggregation().Field("tags") + retweetsHistoAgg := NewHistogramAggregation().Field("retweets").Interval(100) + dateHistoAgg := NewDateHistogramAggregation().Field("created").Interval("year") + retweetsFilterAgg := NewFilterAggregation().Filter( + NewRangeQuery("created").Gte("2012-01-01").Lte("2012-12-31")). + SubAggregation("avgRetweetsSub", NewAvgAggregation().Field("retweets")) + queryFilterAgg := NewFilterAggregation().Filter(NewTermQuery("tags", "golang")) + topTagsHitsAgg := NewTopHitsAggregation().Sort("created", false).Size(5).FetchSource(true) + topTagsAgg := NewTermsAggregation().Field("tags").Size(3).SubAggregation("top_tag_hits", topTagsHitsAgg) + geoBoundsAgg := NewGeoBoundsAggregation().Field("location") + geoHashAgg := NewGeoHashGridAggregation().Field("location").Precision(5) + + // Run query + builder := client.Search().Index(testIndexName).Query(all).Pretty(true) + builder = builder.Aggregation("global", globalAgg) + builder = builder.Aggregation("users", usersAgg) + builder = builder.Aggregation("retweets", retweetsAgg) + builder = builder.Aggregation("avgRetweets", avgRetweetsAgg) + if esversion >= "2.0" { + builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg) + } + builder = builder.Aggregation("minRetweets", minRetweetsAgg) + builder = builder.Aggregation("maxRetweets", maxRetweetsAgg) + builder = builder.Aggregation("sumRetweets", sumRetweetsAgg) + builder = builder.Aggregation("statsRetweets", statsRetweetsAgg) + builder = builder.Aggregation("extstatsRetweets", extstatsRetweetsAgg) + builder = builder.Aggregation("valueCountRetweets", valueCountRetweetsAgg) + builder = builder.Aggregation("percentilesRetweets", percentilesRetweetsAgg) + builder = builder.Aggregation("percentileRanksRetweets", percentileRanksRetweetsAgg) + builder = builder.Aggregation("usersCardinality", cardinalityAgg) + builder = builder.Aggregation("significantTerms", significantTermsAgg) + builder = builder.Aggregation("sample", samplerAgg) + builder = builder.Aggregation("retweetsRange", retweetsRangeAgg) + builder = builder.Aggregation("retweetsKeyedRange", retweetsKeyedRangeAgg) + builder = builder.Aggregation("dateRange", dateRangeAgg) + builder = builder.Aggregation("missingTags", missingTagsAgg) + builder = builder.Aggregation("retweetsHisto", retweetsHistoAgg) + builder = builder.Aggregation("dateHisto", dateHistoAgg) + builder = builder.Aggregation("retweetsFilter", retweetsFilterAgg) + builder = builder.Aggregation("queryFilter", queryFilterAgg) + builder = builder.Aggregation("top-tags", topTagsAgg) + builder = builder.Aggregation("viewport", geoBoundsAgg) + builder = builder.Aggregation("geohashed", geoHashAgg) + if esversion >= "1.4" { + // Unnamed filters + countByUserAgg := NewFiltersAggregation(). + Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae")) + builder = builder.Aggregation("countByUser", countByUserAgg) + // Named filters + countByUserAgg2 := NewFiltersAggregation(). + FilterWithName("olivere", NewTermQuery("user", "olivere")). + FilterWithName("sandrae", NewTermQuery("user", "sandrae")) + builder = builder.Aggregation("countByUser2", countByUserAgg2) + } + if esversion >= "2.0" { + // AvgBucket + dateHisto := NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("avgBucketDateHisto", dateHisto) + builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets")) + // MinBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("minBucketDateHisto", dateHisto) + builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets")) + // MaxBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("maxBucketDateHisto", dateHisto) + builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets")) + // SumBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("sumBucketDateHisto", dateHisto) + builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets")) + // MovAvg + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets")) + builder = builder.Aggregation("movingAvgDateHisto", dateHisto) + } + searchResult, err := builder.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got: %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got: %d", 3, len(searchResult.Hits.Hits)) + } + agg := searchResult.Aggregations + if agg == nil { + t.Fatalf("expected Aggregations != nil; got: nil") + } + + // Search for non-existent aggregate should return (nil, false) + unknownAgg, found := agg.Terms("no-such-aggregate") + if found { + t.Errorf("expected unknown aggregation to not be found; got: %v", found) + } + if unknownAgg != nil { + t.Errorf("expected unknown aggregation to return %v; got %v", nil, unknownAgg) + } + + // Global + globalAggRes, found := agg.Global("global") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if globalAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if globalAggRes.DocCount != 3 { + t.Errorf("expected DocCount = %d; got: %d", 3, globalAggRes.DocCount) + } + + // Search for existent aggregate (by name) should return (aggregate, true) + termsAggRes, found := agg.Terms("users") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if termsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(termsAggRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(termsAggRes.Buckets)) + } + if termsAggRes.Buckets[0].Key != "olivere" { + t.Errorf("expected %q; got: %q", "olivere", termsAggRes.Buckets[0].Key) + } + if termsAggRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, termsAggRes.Buckets[0].DocCount) + } + if termsAggRes.Buckets[1].Key != "sandrae" { + t.Errorf("expected %q; got: %q", "sandrae", termsAggRes.Buckets[1].Key) + } + if termsAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, termsAggRes.Buckets[1].DocCount) + } + + // A terms aggregate with keys that are not strings + retweetsAggRes, found := agg.Terms("retweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if retweetsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(retweetsAggRes.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(retweetsAggRes.Buckets)) + } + + if retweetsAggRes.Buckets[0].Key != float64(0) { + t.Errorf("expected %v; got: %v", float64(0), retweetsAggRes.Buckets[0].Key) + } + if got, err := retweetsAggRes.Buckets[0].KeyNumber.Int64(); err != nil { + t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[0].Key) + } else if got != 0 { + t.Errorf("expected %d; got: %d", 0, got) + } + if retweetsAggRes.Buckets[0].KeyNumber != "0" { + t.Errorf("expected %q; got: %q", "0", retweetsAggRes.Buckets[0].KeyNumber) + } + if retweetsAggRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[0].DocCount) + } + + if retweetsAggRes.Buckets[1].Key != float64(12) { + t.Errorf("expected %v; got: %v", float64(12), retweetsAggRes.Buckets[1].Key) + } + if got, err := retweetsAggRes.Buckets[1].KeyNumber.Int64(); err != nil { + t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[1].KeyNumber) + } else if got != 12 { + t.Errorf("expected %d; got: %d", 12, got) + } + if retweetsAggRes.Buckets[1].KeyNumber != "12" { + t.Errorf("expected %q; got: %q", "12", retweetsAggRes.Buckets[1].KeyNumber) + } + if retweetsAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[1].DocCount) + } + + if retweetsAggRes.Buckets[2].Key != float64(108) { + t.Errorf("expected %v; got: %v", float64(108), retweetsAggRes.Buckets[2].Key) + } + if got, err := retweetsAggRes.Buckets[2].KeyNumber.Int64(); err != nil { + t.Errorf("expected %d; got: %v", 108, retweetsAggRes.Buckets[2].KeyNumber) + } else if got != 108 { + t.Errorf("expected %d; got: %d", 108, got) + } + if retweetsAggRes.Buckets[2].KeyNumber != "108" { + t.Errorf("expected %q; got: %q", "108", retweetsAggRes.Buckets[2].KeyNumber) + } + if retweetsAggRes.Buckets[2].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[2].DocCount) + } + + // avgRetweets + avgAggRes, found := agg.Avg("avgRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if avgAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if avgAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *avgAggRes.Value) + } + if *avgAggRes.Value != 40.0 { + t.Errorf("expected %v; got: %v", 40.0, *avgAggRes.Value) + } + + // avgRetweetsWithMeta + if esversion >= "2.0" { + avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if avgMetaAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if avgMetaAggRes.Meta == nil { + t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta) + } + metaDataValue, found := avgMetaAggRes.Meta["meta"] + if !found { + t.Fatalf("expected to return meta data key %q; got: %v", "meta", found) + } + if flag, ok := metaDataValue.(bool); !ok { + t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue) + } else if flag != true { + t.Fatalf("expected to return meta data key value %v; got: %v", true, flag) + } + } + + // minRetweets + minAggRes, found := agg.Min("minRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if minAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if minAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *minAggRes.Value) + } + if *minAggRes.Value != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, *minAggRes.Value) + } + + // maxRetweets + maxAggRes, found := agg.Max("maxRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if maxAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if maxAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *maxAggRes.Value) + } + if *maxAggRes.Value != 108.0 { + t.Errorf("expected %v; got: %v", 108.0, *maxAggRes.Value) + } + + // sumRetweets + sumAggRes, found := agg.Sum("sumRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if sumAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if sumAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *sumAggRes.Value) + } + if *sumAggRes.Value != 120.0 { + t.Errorf("expected %v; got: %v", 120.0, *sumAggRes.Value) + } + + // statsRetweets + statsAggRes, found := agg.Stats("statsRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if statsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if statsAggRes.Count != 3 { + t.Errorf("expected %d; got: %d", 3, statsAggRes.Count) + } + if statsAggRes.Min == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Min) + } + if *statsAggRes.Min != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, *statsAggRes.Min) + } + if statsAggRes.Max == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Max) + } + if *statsAggRes.Max != 108.0 { + t.Errorf("expected %v; got: %v", 108.0, *statsAggRes.Max) + } + if statsAggRes.Avg == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Avg) + } + if *statsAggRes.Avg != 40.0 { + t.Errorf("expected %v; got: %v", 40.0, *statsAggRes.Avg) + } + if statsAggRes.Sum == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Sum) + } + if *statsAggRes.Sum != 120.0 { + t.Errorf("expected %v; got: %v", 120.0, *statsAggRes.Sum) + } + + // extstatsRetweets + extStatsAggRes, found := agg.ExtendedStats("extstatsRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if extStatsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if extStatsAggRes.Count != 3 { + t.Errorf("expected %d; got: %d", 3, extStatsAggRes.Count) + } + if extStatsAggRes.Min == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Min) + } + if *extStatsAggRes.Min != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, *extStatsAggRes.Min) + } + if extStatsAggRes.Max == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Max) + } + if *extStatsAggRes.Max != 108.0 { + t.Errorf("expected %v; got: %v", 108.0, *extStatsAggRes.Max) + } + if extStatsAggRes.Avg == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Avg) + } + if *extStatsAggRes.Avg != 40.0 { + t.Errorf("expected %v; got: %v", 40.0, *extStatsAggRes.Avg) + } + if extStatsAggRes.Sum == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Sum) + } + if *extStatsAggRes.Sum != 120.0 { + t.Errorf("expected %v; got: %v", 120.0, *extStatsAggRes.Sum) + } + if extStatsAggRes.SumOfSquares == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.SumOfSquares) + } + if *extStatsAggRes.SumOfSquares != 11808.0 { + t.Errorf("expected %v; got: %v", 11808.0, *extStatsAggRes.SumOfSquares) + } + if extStatsAggRes.Variance == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Variance) + } + if *extStatsAggRes.Variance != 2336.0 { + t.Errorf("expected %v; got: %v", 2336.0, *extStatsAggRes.Variance) + } + if extStatsAggRes.StdDeviation == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.StdDeviation) + } + if *extStatsAggRes.StdDeviation != 48.33218389437829 { + t.Errorf("expected %v; got: %v", 48.33218389437829, *extStatsAggRes.StdDeviation) + } + + // valueCountRetweets + valueCountAggRes, found := agg.ValueCount("valueCountRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if valueCountAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if valueCountAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *valueCountAggRes.Value) + } + if *valueCountAggRes.Value != 3.0 { + t.Errorf("expected %v; got: %v", 3.0, *valueCountAggRes.Value) + } + + // percentilesRetweets + percentilesAggRes, found := agg.Percentiles("percentilesRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if percentilesAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + // ES 1.4.x returns 7: {"1.0":...} + // ES 1.5.0 returns 14: {"1.0":..., "1.0_as_string":...} + // So we're relaxing the test here. + if len(percentilesAggRes.Values) == 0 { + t.Errorf("expected at least %d value; got: %d\nValues are: %#v", 1, len(percentilesAggRes.Values), percentilesAggRes.Values) + } + if _, found := percentilesAggRes.Values["0.0"]; found { + t.Errorf("expected %v; got: %v", false, found) + } + if percentilesAggRes.Values["1.0"] != 0.24 { + t.Errorf("expected %v; got: %v", 0.24, percentilesAggRes.Values["1.0"]) + } + if percentilesAggRes.Values["25.0"] != 6.0 { + t.Errorf("expected %v; got: %v", 6.0, percentilesAggRes.Values["25.0"]) + } + if percentilesAggRes.Values["99.0"] != 106.08 { + t.Errorf("expected %v; got: %v", 106.08, percentilesAggRes.Values["99.0"]) + } + + // percentileRanksRetweets + percentileRanksAggRes, found := agg.PercentileRanks("percentileRanksRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if percentileRanksAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(percentileRanksAggRes.Values) == 0 { + t.Errorf("expected at least %d value; got %d\nValues are: %#v", 1, len(percentileRanksAggRes.Values), percentileRanksAggRes.Values) + } + if _, found := percentileRanksAggRes.Values["0.0"]; found { + t.Errorf("expected %v; got: %v", true, found) + } + if percentileRanksAggRes.Values["25.0"] != 21.180555555555557 { + t.Errorf("expected %v; got: %v", 21.180555555555557, percentileRanksAggRes.Values["25.0"]) + } + if percentileRanksAggRes.Values["50.0"] != 29.86111111111111 { + t.Errorf("expected %v; got: %v", 29.86111111111111, percentileRanksAggRes.Values["50.0"]) + } + if percentileRanksAggRes.Values["75.0"] != 38.54166666666667 { + t.Errorf("expected %v; got: %v", 38.54166666666667, percentileRanksAggRes.Values["75.0"]) + } + + // usersCardinality + cardAggRes, found := agg.Cardinality("usersCardinality") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if cardAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if cardAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *cardAggRes.Value) + } + if *cardAggRes.Value != 2 { + t.Errorf("expected %v; got: %v", 2, *cardAggRes.Value) + } + + // retweetsFilter + filterAggRes, found := agg.Filter("retweetsFilter") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if filterAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if filterAggRes.DocCount != 2 { + t.Fatalf("expected %v; got: %v", 2, filterAggRes.DocCount) + } + + // Retrieve sub-aggregation + avgRetweetsAggRes, found := filterAggRes.Avg("avgRetweetsSub") + if !found { + t.Error("expected sub-aggregation \"avgRetweets\" to be found; got false") + } + if avgRetweetsAggRes == nil { + t.Fatal("expected sub-aggregation \"avgRetweets\"; got nil") + } + if avgRetweetsAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", avgRetweetsAggRes.Value) + } + if *avgRetweetsAggRes.Value != 54.0 { + t.Errorf("expected %v; got: %v", 54.0, *avgRetweetsAggRes.Value) + } + + // queryFilter + queryFilterAggRes, found := agg.Filter("queryFilter") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if queryFilterAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if queryFilterAggRes.DocCount != 2 { + t.Fatalf("expected %v; got: %v", 2, queryFilterAggRes.DocCount) + } + + // significantTerms + stAggRes, found := agg.SignificantTerms("significantTerms") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if stAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if stAggRes.DocCount != 3 { + t.Errorf("expected %v; got: %v", 3, stAggRes.DocCount) + } + if len(stAggRes.Buckets) != 0 { + t.Errorf("expected %v; got: %v", 0, len(stAggRes.Buckets)) + } + + // sampler + samplerAggRes, found := agg.Sampler("sample") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if samplerAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if samplerAggRes.DocCount != 3 { + t.Errorf("expected %v; got: %v", 3, samplerAggRes.DocCount) + } + sub, found := samplerAggRes.Aggregations["tagged_with"] + if !found { + t.Fatalf("expected sub aggregation %q", "tagged_with") + } + if sub == nil { + t.Fatalf("expected sub aggregation %q; got: %v", "tagged_with", sub) + } + + // retweetsRange + rangeAggRes, found := agg.Range("retweetsRange") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if rangeAggRes == nil { + t.Fatal("expected != nil; got: nil") + } + if len(rangeAggRes.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(rangeAggRes.Buckets)) + } + if rangeAggRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[0].DocCount) + } + if rangeAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[1].DocCount) + } + if rangeAggRes.Buckets[2].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[2].DocCount) + } + + // retweetsKeyedRange + keyedRangeAggRes, found := agg.KeyedRange("retweetsKeyedRange") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if keyedRangeAggRes == nil { + t.Fatal("expected != nil; got: nil") + } + if len(keyedRangeAggRes.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(keyedRangeAggRes.Buckets)) + } + _, found = keyedRangeAggRes.Buckets["no-such-key"] + if found { + t.Fatalf("expected bucket to not be found; got: %v", found) + } + bucket, found := keyedRangeAggRes.Buckets["*-10.0"] + if !found { + t.Fatalf("expected bucket to be found; got: %v", found) + } + if bucket.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, bucket.DocCount) + } + bucket, found = keyedRangeAggRes.Buckets["10.0-100.0"] + if !found { + t.Fatalf("expected bucket to be found; got: %v", found) + } + if bucket.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, bucket.DocCount) + } + bucket, found = keyedRangeAggRes.Buckets["100.0-*"] + if !found { + t.Fatalf("expected bucket to be found; got: %v", found) + } + if bucket.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, bucket.DocCount) + } + + // dateRange + dateRangeRes, found := agg.DateRange("dateRange") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if dateRangeRes == nil { + t.Fatal("expected != nil; got: nil") + } + if dateRangeRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, dateRangeRes.Buckets[0].DocCount) + } + if dateRangeRes.Buckets[0].From != nil { + t.Fatal("expected From to be nil") + } + if dateRangeRes.Buckets[0].To == nil { + t.Fatal("expected To to be != nil") + } + if *dateRangeRes.Buckets[0].To != 1.325376e+12 { + t.Errorf("expected %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[0].To) + } + if dateRangeRes.Buckets[0].ToAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[0].ToAsString) + } + if dateRangeRes.Buckets[1].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, dateRangeRes.Buckets[1].DocCount) + } + if dateRangeRes.Buckets[1].From == nil { + t.Fatal("expected From to be != nil") + } + if *dateRangeRes.Buckets[1].From != 1.325376e+12 { + t.Errorf("expected From = %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[1].From) + } + if dateRangeRes.Buckets[1].FromAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected FromAsString = %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].FromAsString) + } + if dateRangeRes.Buckets[1].To == nil { + t.Fatal("expected To to be != nil") + } + if *dateRangeRes.Buckets[1].To != 1.3569984e+12 { + t.Errorf("expected To = %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[1].To) + } + if dateRangeRes.Buckets[1].ToAsString != "2013-01-01T00:00:00.000Z" { + t.Errorf("expected ToAsString = %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].ToAsString) + } + if dateRangeRes.Buckets[2].DocCount != 0 { + t.Errorf("expected %d; got: %d", 0, dateRangeRes.Buckets[2].DocCount) + } + if dateRangeRes.Buckets[2].To != nil { + t.Fatal("expected To to be nil") + } + if dateRangeRes.Buckets[2].From == nil { + t.Fatal("expected From to be != nil") + } + if *dateRangeRes.Buckets[2].From != 1.3569984e+12 { + t.Errorf("expected %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[2].From) + } + if dateRangeRes.Buckets[2].FromAsString != "2013-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[2].FromAsString) + } + + // missingTags + missingRes, found := agg.Missing("missingTags") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if missingRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if missingRes.DocCount != 0 { + t.Errorf("expected searchResult.Aggregations[\"missingTags\"].DocCount = %v; got %v", 0, missingRes.DocCount) + } + + // retweetsHisto + histoRes, found := agg.Histogram("retweetsHisto") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if histoRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(histoRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(histoRes.Buckets)) + } + if histoRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, histoRes.Buckets[0].DocCount) + } + if histoRes.Buckets[0].Key != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, histoRes.Buckets[0].Key) + } + if histoRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, histoRes.Buckets[1].DocCount) + } + if histoRes.Buckets[1].Key != 100.0 { + t.Errorf("expected %v; got: %+v", 100.0, histoRes.Buckets[1].Key) + } + + // dateHisto + dateHistoRes, found := agg.DateHistogram("dateHisto") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if dateHistoRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(dateHistoRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(dateHistoRes.Buckets)) + } + if dateHistoRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, dateHistoRes.Buckets[0].DocCount) + } + if dateHistoRes.Buckets[0].Key != 1.29384e+12 { + t.Errorf("expected %v; got: %v", 1.29384e+12, dateHistoRes.Buckets[0].Key) + } + if dateHistoRes.Buckets[0].KeyAsString == nil { + t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[0].KeyAsString) + } + if *dateHistoRes.Buckets[0].KeyAsString != "2011-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2011-01-01T00:00:00.000Z", *dateHistoRes.Buckets[0].KeyAsString) + } + if dateHistoRes.Buckets[1].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, dateHistoRes.Buckets[1].DocCount) + } + if dateHistoRes.Buckets[1].Key != 1.325376e+12 { + t.Errorf("expected %v; got: %v", 1.325376e+12, dateHistoRes.Buckets[1].Key) + } + if dateHistoRes.Buckets[1].KeyAsString == nil { + t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[1].KeyAsString) + } + if *dateHistoRes.Buckets[1].KeyAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", *dateHistoRes.Buckets[1].KeyAsString) + } + + // topHits + topTags, found := agg.Terms("top-tags") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topTags == nil { + t.Fatalf("expected != nil; got: nil") + } + if esversion >= "1.4.0" { + if topTags.DocCountErrorUpperBound != 0 { + t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound) + } + if topTags.SumOfOtherDocCount != 1 { + t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount) + } + } + if len(topTags.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(topTags.Buckets)) + } + if topTags.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, topTags.Buckets[0].DocCount) + } + if topTags.Buckets[0].Key != "golang" { + t.Errorf("expected %v; got: %v", "golang", topTags.Buckets[0].Key) + } + topHits, found := topTags.Buckets[0].TopHits("top_tag_hits") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topHits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits == nil { + t.Fatalf("expected != nil; got: nil") + } + if topHits.Hits.TotalHits != 2 { + t.Errorf("expected %d; got: %d", 2, topHits.Hits.TotalHits) + } + if topHits.Hits.Hits == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(topHits.Hits.Hits) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(topHits.Hits.Hits)) + } + hit := topHits.Hits.Hits[0] + if !found { + t.Fatalf("expected %v; got: %v", true, found) + } + if hit == nil { + t.Fatal("expected != nil; got: nil") + } + var tw tweet + if err := json.Unmarshal(*hit.Source, &tw); err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if tw.Message != "Welcome to Golang and Elasticsearch." { + t.Errorf("expected %q; got: %q", "Welcome to Golang and Elasticsearch.", tw.Message) + } + if topTags.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, topTags.Buckets[1].DocCount) + } + if topTags.Buckets[1].Key != "cycling" { + t.Errorf("expected %v; got: %v", "cycling", topTags.Buckets[1].Key) + } + topHits, found = topTags.Buckets[1].TopHits("top_tag_hits") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topHits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits == nil { + t.Fatal("expected != nil; got nil") + } + if topHits.Hits.TotalHits != 1 { + t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits) + } + if topTags.Buckets[2].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, topTags.Buckets[2].DocCount) + } + if topTags.Buckets[2].Key != "elasticsearch" { + t.Errorf("expected %v; got: %v", "elasticsearch", topTags.Buckets[2].Key) + } + topHits, found = topTags.Buckets[2].TopHits("top_tag_hits") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topHits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits.TotalHits != 1 { + t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits) + } + + // viewport via geo_bounds (1.3.0 has an error in that it doesn't output the aggregation name) + geoBoundsRes, found := agg.GeoBounds("viewport") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if geoBoundsRes == nil { + t.Fatalf("expected != nil; got: nil") + } + + // geohashed via geohash + geoHashRes, found := agg.GeoHash("geohashed") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if geoHashRes == nil { + t.Fatalf("expected != nil; got: nil") + } + + if esversion >= "1.4" { + // Filters agg "countByUser" (unnamed) + countByUserAggRes, found := agg.Filters("countByUser") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if countByUserAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(countByUserAggRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets)) + } + if len(countByUserAggRes.NamedBuckets) != 0 { + t.Fatalf("expected %d; got: %d", 0, len(countByUserAggRes.NamedBuckets)) + } + if countByUserAggRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount) + } + if countByUserAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount) + } + + // Filters agg "countByUser2" (named) + countByUser2AggRes, found := agg.Filters("countByUser2") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if countByUser2AggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(countByUser2AggRes.Buckets) != 0 { + t.Fatalf("expected %d; got: %d", 0, len(countByUser2AggRes.Buckets)) + } + if len(countByUser2AggRes.NamedBuckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(countByUser2AggRes.NamedBuckets)) + } + b, found := countByUser2AggRes.NamedBuckets["olivere"] + if !found { + t.Fatalf("expected bucket %q; got: %v", "olivere", found) + } + if b == nil { + t.Fatalf("expected bucket %q; got: %v", "olivere", b) + } + if b.DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, b.DocCount) + } + b, found = countByUser2AggRes.NamedBuckets["sandrae"] + if !found { + t.Fatalf("expected bucket %q; got: %v", "sandrae", found) + } + if b == nil { + t.Fatalf("expected bucket %q; got: %v", "sandrae", b) + } + if b.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, b.DocCount) + } + } +} + +// TestAggsMarshal ensures that marshaling aggregations back into a string +// does not yield base64 encoded data. See https://github.com/olivere/elastic/issues/51 +// and https://groups.google.com/forum/#!topic/Golang-Nuts/38ShOlhxAYY for details. +func TestAggsMarshal(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Image: "http://golang.org/doc/gopher/gophercolor.png", + Tags: []string{"golang", "elasticsearch"}, + Location: "48.1333,11.5667", // lat,lon + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + dhagg := NewDateHistogramAggregation().Field("created").Interval("year") + + // Run query + builder := client.Search().Index(testIndexName).Query(all) + builder = builder.Aggregation("dhagg", dhagg) + searchResult, err := builder.Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.TotalHits() != 1 { + t.Errorf("expected Hits.TotalHits = %d; got: %d", 1, searchResult.TotalHits()) + } + if _, found := searchResult.Aggregations["dhagg"]; !found { + t.Fatalf("expected aggregation %q", "dhagg") + } + buf, err := json.Marshal(searchResult) + if err != nil { + t.Fatal(err) + } + s := string(buf) + if i := strings.Index(s, `{"dhagg":{"buckets":[{"key_as_string":"2012-01-01`); i < 0 { + t.Errorf("expected to serialize aggregation into string; got: %v", s) + } +} + +func TestAggsMetricsMin(t *testing.T) { + s := `{ + "min_price": { + "value": 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Min("min_price") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(10) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value) + } +} + +func TestAggsMetricsMax(t *testing.T) { + s := `{ + "max_price": { + "value": 35 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Max("max_price") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(35) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(35), *agg.Value) + } +} + +func TestAggsMetricsSum(t *testing.T) { + s := `{ + "intraday_return": { + "value": 2.18 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Sum("intraday_return") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(2.18) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(2.18), *agg.Value) + } +} + +func TestAggsMetricsAvg(t *testing.T) { + s := `{ + "avg_grade": { + "value": 75 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Avg("avg_grade") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(75) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(75), *agg.Value) + } +} + +func TestAggsMetricsValueCount(t *testing.T) { + s := `{ + "grades_count": { + "value": 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.ValueCount("grades_count") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(10) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value) + } +} + +func TestAggsMetricsCardinality(t *testing.T) { + s := `{ + "author_count": { + "value": 12 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Cardinality("author_count") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(12) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(12), *agg.Value) + } +} + +func TestAggsMetricsStats(t *testing.T) { + s := `{ + "grades_stats": { + "count": 6, + "min": 60, + "max": 98, + "avg": 78.5, + "sum": 471 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Stats("grades_stats") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Count != int64(6) { + t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count) + } + if agg.Min == nil { + t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min) + } + if *agg.Min != float64(60) { + t.Fatalf("expected aggregation Min = %v; got: %v", float64(60), *agg.Min) + } + if agg.Max == nil { + t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max) + } + if *agg.Max != float64(98) { + t.Fatalf("expected aggregation Max = %v; got: %v", float64(98), *agg.Max) + } + if agg.Avg == nil { + t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg) + } + if *agg.Avg != float64(78.5) { + t.Fatalf("expected aggregation Avg = %v; got: %v", float64(78.5), *agg.Avg) + } + if agg.Sum == nil { + t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum) + } + if *agg.Sum != float64(471) { + t.Fatalf("expected aggregation Sum = %v; got: %v", float64(471), *agg.Sum) + } +} + +func TestAggsMetricsExtendedStats(t *testing.T) { + s := `{ + "grades_stats": { + "count": 6, + "min": 72, + "max": 117.6, + "avg": 94.2, + "sum": 565.2, + "sum_of_squares": 54551.51999999999, + "variance": 218.2799999999976, + "std_deviation": 14.774302013969987 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.ExtendedStats("grades_stats") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Count != int64(6) { + t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count) + } + if agg.Min == nil { + t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min) + } + if *agg.Min != float64(72) { + t.Fatalf("expected aggregation Min = %v; got: %v", float64(72), *agg.Min) + } + if agg.Max == nil { + t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max) + } + if *agg.Max != float64(117.6) { + t.Fatalf("expected aggregation Max = %v; got: %v", float64(117.6), *agg.Max) + } + if agg.Avg == nil { + t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg) + } + if *agg.Avg != float64(94.2) { + t.Fatalf("expected aggregation Avg = %v; got: %v", float64(94.2), *agg.Avg) + } + if agg.Sum == nil { + t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum) + } + if *agg.Sum != float64(565.2) { + t.Fatalf("expected aggregation Sum = %v; got: %v", float64(565.2), *agg.Sum) + } + if agg.SumOfSquares == nil { + t.Fatalf("expected aggregation sum_of_squares != nil; got: %v", agg.SumOfSquares) + } + if *agg.SumOfSquares != float64(54551.51999999999) { + t.Fatalf("expected aggregation sum_of_squares = %v; got: %v", float64(54551.51999999999), *agg.SumOfSquares) + } + if agg.Variance == nil { + t.Fatalf("expected aggregation Variance != nil; got: %v", agg.Variance) + } + if *agg.Variance != float64(218.2799999999976) { + t.Fatalf("expected aggregation Variance = %v; got: %v", float64(218.2799999999976), *agg.Variance) + } + if agg.StdDeviation == nil { + t.Fatalf("expected aggregation StdDeviation != nil; got: %v", agg.StdDeviation) + } + if *agg.StdDeviation != float64(14.774302013969987) { + t.Fatalf("expected aggregation StdDeviation = %v; got: %v", float64(14.774302013969987), *agg.StdDeviation) + } +} + +func TestAggsMetricsPercentiles(t *testing.T) { + s := `{ + "load_time_outlier": { + "values" : { + "1.0": 15, + "5.0": 20, + "25.0": 23, + "50.0": 25, + "75.0": 29, + "95.0": 60, + "99.0": 150 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Percentiles("load_time_outlier") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Values == nil { + t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values) + } + if len(agg.Values) != 7 { + t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values)) + } + if agg.Values["1.0"] != float64(15) { + t.Errorf("expected aggregation value for \"1.0\" = %v; got: %v", float64(15), agg.Values["1.0"]) + } + if agg.Values["5.0"] != float64(20) { + t.Errorf("expected aggregation value for \"5.0\" = %v; got: %v", float64(20), agg.Values["5.0"]) + } + if agg.Values["25.0"] != float64(23) { + t.Errorf("expected aggregation value for \"25.0\" = %v; got: %v", float64(23), agg.Values["25.0"]) + } + if agg.Values["50.0"] != float64(25) { + t.Errorf("expected aggregation value for \"50.0\" = %v; got: %v", float64(25), agg.Values["50.0"]) + } + if agg.Values["75.0"] != float64(29) { + t.Errorf("expected aggregation value for \"75.0\" = %v; got: %v", float64(29), agg.Values["75.0"]) + } + if agg.Values["95.0"] != float64(60) { + t.Errorf("expected aggregation value for \"95.0\" = %v; got: %v", float64(60), agg.Values["95.0"]) + } + if agg.Values["99.0"] != float64(150) { + t.Errorf("expected aggregation value for \"99.0\" = %v; got: %v", float64(150), agg.Values["99.0"]) + } +} + +func TestAggsMetricsPercentileRanks(t *testing.T) { + s := `{ + "load_time_outlier": { + "values" : { + "15": 92, + "30": 100 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.PercentileRanks("load_time_outlier") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Values == nil { + t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values) + } + if len(agg.Values) != 2 { + t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values)) + } + if agg.Values["15"] != float64(92) { + t.Errorf("expected aggregation value for \"15\" = %v; got: %v", float64(92), agg.Values["15"]) + } + if agg.Values["30"] != float64(100) { + t.Errorf("expected aggregation value for \"30\" = %v; got: %v", float64(100), agg.Values["30"]) + } +} + +func TestAggsMetricsTopHits(t *testing.T) { + s := `{ + "top-tags": { + "buckets": [ + { + "key": "windows-7", + "doc_count": 25365, + "top_tags_hits": { + "hits": { + "total": 25365, + "max_score": 1, + "hits": [ + { + "_index": "stack", + "_type": "question", + "_id": "602679", + "_score": 1, + "_source": { + "title": "Windows port opening" + }, + "sort": [ + 1370143231177 + ] + } + ] + } + } + }, + { + "key": "linux", + "doc_count": 18342, + "top_tags_hits": { + "hits": { + "total": 18342, + "max_score": 1, + "hits": [ + { + "_index": "stack", + "_type": "question", + "_id": "602672", + "_score": 1, + "_source": { + "title": "Ubuntu RFID Screensaver lock-unlock" + }, + "sort": [ + 1370143379747 + ] + } + ] + } + } + }, + { + "key": "windows", + "doc_count": 18119, + "top_tags_hits": { + "hits": { + "total": 18119, + "max_score": 1, + "hits": [ + { + "_index": "stack", + "_type": "question", + "_id": "602678", + "_score": 1, + "_source": { + "title": "If I change my computers date / time, what could be affected?" + }, + "sort": [ + 1370142868283 + ] + } + ] + } + } + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("top-tags") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "windows-7" { + t.Errorf("expected bucket key = %q; got: %q", "windows-7", agg.Buckets[0].Key) + } + if agg.Buckets[1].Key != "linux" { + t.Errorf("expected bucket key = %q; got: %q", "linux", agg.Buckets[1].Key) + } + if agg.Buckets[2].Key != "windows" { + t.Errorf("expected bucket key = %q; got: %q", "windows", agg.Buckets[2].Key) + } + + // Sub-aggregation of top-hits + subAgg, found := agg.Buckets[0].TopHits("top_tags_hits") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) + } + if subAgg.Hits == nil { + t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) + } + if subAgg.Hits.TotalHits != 25365 { + t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 25365, subAgg.Hits.TotalHits) + } + if subAgg.Hits.MaxScore == nil { + t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) + } + if *subAgg.Hits.MaxScore != float64(1.0) { + t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) + } + + subAgg, found = agg.Buckets[1].TopHits("top_tags_hits") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) + } + if subAgg.Hits == nil { + t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) + } + if subAgg.Hits.TotalHits != 18342 { + t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18342, subAgg.Hits.TotalHits) + } + if subAgg.Hits.MaxScore == nil { + t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) + } + if *subAgg.Hits.MaxScore != float64(1.0) { + t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) + } + + subAgg, found = agg.Buckets[2].TopHits("top_tags_hits") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) + } + if subAgg.Hits == nil { + t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) + } + if subAgg.Hits.TotalHits != 18119 { + t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18119, subAgg.Hits.TotalHits) + } + if subAgg.Hits.MaxScore == nil { + t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) + } + if *subAgg.Hits.MaxScore != float64(1.0) { + t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) + } +} + +func TestAggsBucketGlobal(t *testing.T) { + s := `{ + "all_products" : { + "doc_count" : 100, + "avg_price" : { + "value" : 56.3 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Global("all_products") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 100 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount) + } + + // Sub-aggregation + subAgg, found := agg.Avg("avg_price") + if !found { + t.Fatalf("expected sub-aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) + } + if subAgg.Value == nil { + t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) + } + if *subAgg.Value != float64(56.3) { + t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value) + } +} + +func TestAggsBucketFilter(t *testing.T) { + s := `{ + "in_stock_products" : { + "doc_count" : 100, + "avg_price" : { "value" : 56.3 } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Filter("in_stock_products") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 100 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount) + } + + // Sub-aggregation + subAgg, found := agg.Avg("avg_price") + if !found { + t.Fatalf("expected sub-aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) + } + if subAgg.Value == nil { + t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) + } + if *subAgg.Value != float64(56.3) { + t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value) + } +} + +func TestAggsBucketFiltersWithBuckets(t *testing.T) { + s := `{ + "messages" : { + "buckets" : [ + { + "doc_count" : 34, + "monthly" : { + "buckets" : [] + } + }, + { + "doc_count" : 439, + "monthly" : { + "buckets" : [] + } + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Filters("messages") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Fatalf("expected %d buckets; got: %d", 2, len(agg.Buckets)) + } + + if agg.Buckets[0].DocCount != 34 { + t.Fatalf("expected DocCount = %d; got: %d", 34, agg.Buckets[0].DocCount) + } + subAgg, found := agg.Buckets[0].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } + + if agg.Buckets[1].DocCount != 439 { + t.Fatalf("expected DocCount = %d; got: %d", 439, agg.Buckets[1].DocCount) + } + subAgg, found = agg.Buckets[1].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } +} + +func TestAggsBucketFiltersWithNamedBuckets(t *testing.T) { + s := `{ + "messages" : { + "buckets" : { + "errors" : { + "doc_count" : 34, + "monthly" : { + "buckets" : [] + } + }, + "warnings" : { + "doc_count" : 439, + "monthly" : { + "buckets" : [] + } + } + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Filters("messages") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.NamedBuckets == nil { + t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.NamedBuckets) + } + if len(agg.NamedBuckets) != 2 { + t.Fatalf("expected %d buckets; got: %d", 2, len(agg.NamedBuckets)) + } + + if agg.NamedBuckets["errors"].DocCount != 34 { + t.Fatalf("expected DocCount = %d; got: %d", 34, agg.NamedBuckets["errors"].DocCount) + } + subAgg, found := agg.NamedBuckets["errors"].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } + + if agg.NamedBuckets["warnings"].DocCount != 439 { + t.Fatalf("expected DocCount = %d; got: %d", 439, agg.NamedBuckets["warnings"].DocCount) + } + subAgg, found = agg.NamedBuckets["warnings"].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } +} + +func TestAggsBucketMissing(t *testing.T) { + s := `{ + "products_without_a_price" : { + "doc_count" : 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Missing("products_without_a_price") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 10 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) + } +} + +func TestAggsBucketNested(t *testing.T) { + s := `{ + "resellers": { + "min_price": { + "value" : 350 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Nested("resellers") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 0 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 0, agg.DocCount) + } + + // Sub-aggregation + subAgg, found := agg.Avg("min_price") + if !found { + t.Fatalf("expected sub-aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) + } + if subAgg.Value == nil { + t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) + } + if *subAgg.Value != float64(350) { + t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(350), *subAgg.Value) + } +} + +func TestAggsBucketReverseNested(t *testing.T) { + s := `{ + "comment_to_issue": { + "doc_count" : 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.ReverseNested("comment_to_issue") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 10 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) + } +} + +func TestAggsBucketChildren(t *testing.T) { + s := `{ + "to-answers": { + "doc_count" : 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Children("to-answers") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 10 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) + } +} + +func TestAggsBucketTerms(t *testing.T) { + s := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : "olivere", + "doc_count" : 2 + }, { + "key" : "sandrae", + "doc_count" : 1 + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("users") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "olivere" { + t.Errorf("expected key %q; got: %q", "olivere", agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != "sandrae" { + t.Errorf("expected key %q; got: %q", "sandrae", agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketTermsWithNumericKeys(t *testing.T) { + s := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : 17, + "doc_count" : 2 + }, { + "key" : 21, + "doc_count" : 1 + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("users") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != float64(17) { + t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key) + } + if got, err := agg.Buckets[0].KeyNumber.Int64(); err != nil { + t.Errorf("expected to convert key to int64; got: %v", err) + } else if got != 17 { + t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != float64(21) { + t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key) + } + if got, err := agg.Buckets[1].KeyNumber.Int64(); err != nil { + t.Errorf("expected to convert key to int64; got: %v", err) + } else if got != 21 { + t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketTermsWithBoolKeys(t *testing.T) { + s := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : true, + "doc_count" : 2 + }, { + "key" : false, + "doc_count" : 1 + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("users") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != true { + t.Errorf("expected key %v; got: %v", true, agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != false { + t.Errorf("expected key %v; got: %v", false, agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketSignificantTerms(t *testing.T) { + s := `{ + "significantCrimeTypes" : { + "doc_count": 47347, + "buckets" : [ + { + "key": "Bicycle theft", + "doc_count": 3640, + "score": 0.371235374214817, + "bg_count": 66799 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.SignificantTerms("significantCrimeTypes") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 47347 { + t.Fatalf("expected aggregation DocCount != %d; got: %d", 47347, agg.DocCount) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 1 { + t.Errorf("expected %d bucket entries; got: %d", 1, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "Bicycle theft" { + t.Errorf("expected key = %q; got: %q", "Bicycle theft", agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 3640 { + t.Errorf("expected doc count = %d; got: %d", 3640, agg.Buckets[0].DocCount) + } + if agg.Buckets[0].Score != float64(0.371235374214817) { + t.Errorf("expected score = %v; got: %v", float64(0.371235374214817), agg.Buckets[0].Score) + } + if agg.Buckets[0].BgCount != 66799 { + t.Errorf("expected BgCount = %d; got: %d", 66799, agg.Buckets[0].BgCount) + } +} + +func TestAggsBucketSampler(t *testing.T) { + s := `{ + "sample" : { + "doc_count": 1000, + "keywords": { + "doc_count": 1000, + "buckets" : [ + { + "key": "bend", + "doc_count": 58, + "score": 37.982536582524276, + "bg_count": 103 + } + ] + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Sampler("sample") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 1000 { + t.Fatalf("expected aggregation DocCount != %d; got: %d", 1000, agg.DocCount) + } + sub, found := agg.Aggregations["keywords"] + if !found { + t.Fatalf("expected sub aggregation %q", "keywords") + } + if sub == nil { + t.Fatalf("expected sub aggregation %q; got: %v", "keywords", sub) + } +} + +func TestAggsBucketRange(t *testing.T) { + s := `{ + "price_ranges" : { + "buckets": [ + { + "to": 50, + "doc_count": 2 + }, + { + "from": 50, + "to": 100, + "doc_count": 4 + }, + { + "from": 100, + "doc_count": 4 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Range("price_ranges") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(50) { + t.Errorf("expected To = %v; got: %v", float64(50), *agg.Buckets[0].To) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(50) { + t.Errorf("expected From = %v; got: %v", float64(50), *agg.Buckets[1].From) + } + if agg.Buckets[1].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To) + } + if *agg.Buckets[1].To != float64(100) { + t.Errorf("expected To = %v; got: %v", float64(100), *agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 4 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[1].DocCount) + } + if agg.Buckets[2].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From) + } + if *agg.Buckets[2].From != float64(100) { + t.Errorf("expected From = %v; got: %v", float64(100), *agg.Buckets[2].From) + } + if agg.Buckets[2].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To) + } + if agg.Buckets[2].DocCount != 4 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[2].DocCount) + } +} + +func TestAggsBucketDateRange(t *testing.T) { + s := `{ + "range": { + "buckets": [ + { + "to": 1.3437792E+12, + "to_as_string": "08-2012", + "doc_count": 7 + }, + { + "from": 1.3437792E+12, + "from_as_string": "08-2012", + "doc_count": 2 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.DateRange("range") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(1.3437792E+12) { + t.Errorf("expected To = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[0].To) + } + if agg.Buckets[0].ToAsString != "08-2012" { + t.Errorf("expected ToAsString = %q; got: %q", "08-2012", agg.Buckets[0].ToAsString) + } + if agg.Buckets[0].DocCount != 7 { + t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(1.3437792E+12) { + t.Errorf("expected From = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[1].From) + } + if agg.Buckets[1].FromAsString != "08-2012" { + t.Errorf("expected FromAsString = %q; got: %q", "08-2012", agg.Buckets[1].FromAsString) + } + if agg.Buckets[1].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 2 { + t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketIPv4Range(t *testing.T) { + s := `{ + "ip_ranges": { + "buckets" : [ + { + "to": 167772165, + "to_as_string": "10.0.0.5", + "doc_count": 4 + }, + { + "from": 167772165, + "from_as_string": "10.0.0.5", + "doc_count": 6 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.IPv4Range("ip_ranges") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(167772165) { + t.Errorf("expected To = %v; got: %v", float64(167772165), *agg.Buckets[0].To) + } + if agg.Buckets[0].ToAsString != "10.0.0.5" { + t.Errorf("expected ToAsString = %q; got: %q", "10.0.0.5", agg.Buckets[0].ToAsString) + } + if agg.Buckets[0].DocCount != 4 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(167772165) { + t.Errorf("expected From = %v; got: %v", float64(167772165), *agg.Buckets[1].From) + } + if agg.Buckets[1].FromAsString != "10.0.0.5" { + t.Errorf("expected FromAsString = %q; got: %q", "10.0.0.5", agg.Buckets[1].FromAsString) + } + if agg.Buckets[1].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 6 { + t.Errorf("expected DocCount = %d; got: %d", 6, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketHistogram(t *testing.T) { + s := `{ + "prices" : { + "buckets": [ + { + "key": 0, + "doc_count": 2 + }, + { + "key": 50, + "doc_count": 4 + }, + { + "key": 150, + "doc_count": 3 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Histogram("prices") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d buckets; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].Key != 0 { + t.Errorf("expected key = %v; got: %v", 0, agg.Buckets[0].Key) + } + if agg.Buckets[0].KeyAsString != nil { + t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[0].KeyAsString) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count = %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != 50 { + t.Errorf("expected key = %v; got: %v", 50, agg.Buckets[1].Key) + } + if agg.Buckets[1].KeyAsString != nil { + t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[1].KeyAsString) + } + if agg.Buckets[1].DocCount != 4 { + t.Errorf("expected doc count = %d; got: %d", 4, agg.Buckets[1].DocCount) + } + if agg.Buckets[2].Key != 150 { + t.Errorf("expected key = %v; got: %v", 150, agg.Buckets[2].Key) + } + if agg.Buckets[2].KeyAsString != nil { + t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[2].KeyAsString) + } + if agg.Buckets[2].DocCount != 3 { + t.Errorf("expected doc count = %d; got: %d", 3, agg.Buckets[2].DocCount) + } +} + +func TestAggsBucketDateHistogram(t *testing.T) { + s := `{ + "articles_over_time": { + "buckets": [ + { + "key_as_string": "2013-02-02", + "key": 1328140800000, + "doc_count": 1 + }, + { + "key_as_string": "2013-03-02", + "key": 1330646400000, + "doc_count": 2 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.DateHistogram("articles_over_time") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != 1328140800000 { + t.Errorf("expected key %v; got: %v", 1328140800000, agg.Buckets[0].Key) + } + if agg.Buckets[0].KeyAsString == nil { + t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[0].KeyAsString) + } + if *agg.Buckets[0].KeyAsString != "2013-02-02" { + t.Errorf("expected key_as_string %q; got: %q", "2013-02-02", *agg.Buckets[0].KeyAsString) + } + if agg.Buckets[0].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != 1330646400000 { + t.Errorf("expected key %v; got: %v", 1330646400000, agg.Buckets[1].Key) + } + if agg.Buckets[1].KeyAsString == nil { + t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[1].KeyAsString) + } + if *agg.Buckets[1].KeyAsString != "2013-03-02" { + t.Errorf("expected key_as_string %q; got: %q", "2013-03-02", *agg.Buckets[1].KeyAsString) + } + if agg.Buckets[1].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[1].DocCount) + } +} + +func TestAggsMetricsGeoBounds(t *testing.T) { + s := `{ + "viewport": { + "bounds": { + "top_left": { + "lat": 80.45, + "lon": -160.22 + }, + "bottom_right": { + "lat": 40.65, + "lon": 42.57 + } + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.GeoBounds("viewport") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Bounds.TopLeft.Latitude != float64(80.45) { + t.Fatalf("expected Bounds.TopLeft.Latitude != %v; got: %v", float64(80.45), agg.Bounds.TopLeft.Latitude) + } + if agg.Bounds.TopLeft.Longitude != float64(-160.22) { + t.Fatalf("expected Bounds.TopLeft.Longitude != %v; got: %v", float64(-160.22), agg.Bounds.TopLeft.Longitude) + } + if agg.Bounds.BottomRight.Latitude != float64(40.65) { + t.Fatalf("expected Bounds.BottomRight.Latitude != %v; got: %v", float64(40.65), agg.Bounds.BottomRight.Latitude) + } + if agg.Bounds.BottomRight.Longitude != float64(42.57) { + t.Fatalf("expected Bounds.BottomRight.Longitude != %v; got: %v", float64(42.57), agg.Bounds.BottomRight.Longitude) + } +} + +func TestAggsBucketGeoHash(t *testing.T) { + s := `{ + "myLarge-GrainGeoHashGrid": { + "buckets": [ + { + "key": "svz", + "doc_count": 10964 + }, + { + "key": "sv8", + "doc_count": 3198 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.GeoHash("myLarge-GrainGeoHashGrid") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "svz" { + t.Errorf("expected key %q; got: %q", "svz", agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 10964 { + t.Errorf("expected doc count %d; got: %d", 10964, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != "sv8" { + t.Errorf("expected key %q; got: %q", "sv8", agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 3198 { + t.Errorf("expected doc count %d; got: %d", 3198, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketGeoDistance(t *testing.T) { + s := `{ + "rings" : { + "buckets": [ + { + "unit": "km", + "to": 100.0, + "doc_count": 3 + }, + { + "unit": "km", + "from": 100.0, + "to": 300.0, + "doc_count": 1 + }, + { + "unit": "km", + "from": 300.0, + "doc_count": 7 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.GeoDistance("rings") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(100.0) { + t.Errorf("expected To = %v; got: %v", float64(100.0), *agg.Buckets[0].To) + } + if agg.Buckets[0].DocCount != 3 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount) + } + + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(100.0) { + t.Errorf("expected From = %v; got: %v", float64(100.0), *agg.Buckets[1].From) + } + if agg.Buckets[1].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To) + } + if *agg.Buckets[1].To != float64(300.0) { + t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected DocCount = %d; got: %d", 1, agg.Buckets[1].DocCount) + } + + if agg.Buckets[2].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From) + } + if *agg.Buckets[2].From != float64(300.0) { + t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[2].From) + } + if agg.Buckets[2].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To) + } + if agg.Buckets[2].DocCount != 7 { + t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[2].DocCount) + } +} + +func TestAggsSubAggregates(t *testing.T) { + rs := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : "olivere", + "doc_count" : 2, + "ts" : { + "buckets" : [ { + "key_as_string" : "2012-01-01T00:00:00.000Z", + "key" : 1325376000000, + "doc_count" : 2 + } ] + } + }, { + "key" : "sandrae", + "doc_count" : 1, + "ts" : { + "buckets" : [ { + "key_as_string" : "2011-01-01T00:00:00.000Z", + "key" : 1293840000000, + "doc_count" : 1 + } ] + } + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(rs), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + // Access top-level aggregation + users, found := aggs.Terms("users") + if !found { + t.Fatalf("expected users aggregation to be found; got: %v", found) + } + if users == nil { + t.Fatalf("expected users aggregation; got: %v", users) + } + if users.Buckets == nil { + t.Fatalf("expected users buckets; got: %v", users.Buckets) + } + if len(users.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(users.Buckets)) + } + if users.Buckets[0].Key != "olivere" { + t.Errorf("expected key %q; got: %q", "olivere", users.Buckets[0].Key) + } + if users.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, users.Buckets[0].DocCount) + } + if users.Buckets[1].Key != "sandrae" { + t.Errorf("expected key %q; got: %q", "sandrae", users.Buckets[1].Key) + } + if users.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, users.Buckets[1].DocCount) + } + + // Access sub-aggregation + ts, found := users.Buckets[0].DateHistogram("ts") + if !found { + t.Fatalf("expected ts aggregation to be found; got: %v", found) + } + if ts == nil { + t.Fatalf("expected ts aggregation; got: %v", ts) + } + if ts.Buckets == nil { + t.Fatalf("expected ts buckets; got: %v", ts.Buckets) + } + if len(ts.Buckets) != 1 { + t.Errorf("expected %d bucket entries; got: %d", 1, len(ts.Buckets)) + } + if ts.Buckets[0].Key != 1325376000000 { + t.Errorf("expected key %v; got: %v", 1325376000000, ts.Buckets[0].Key) + } + if ts.Buckets[0].KeyAsString == nil { + t.Fatalf("expected key_as_string != %v; got: %v", nil, ts.Buckets[0].KeyAsString) + } + if *ts.Buckets[0].KeyAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected key_as_string %q; got: %q", "2012-01-01T00:00:00.000Z", *ts.Buckets[0].KeyAsString) + } +} + +func TestAggsPipelineAvgBucket(t *testing.T) { + s := `{ + "avg_monthly_sales" : { + "value" : 328.33333333333333 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.AvgBucket("avg_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(328.33333333333333) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(328.33333333333333), *agg.Value) + } +} + +func TestAggsPipelineSumBucket(t *testing.T) { + s := `{ + "sum_monthly_sales" : { + "value" : 985 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.SumBucket("sum_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(985) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(985), *agg.Value) + } +} + +func TestAggsPipelineMaxBucket(t *testing.T) { + s := `{ + "max_monthly_sales" : { + "keys": ["2015/01/01 00:00:00"], + "value" : 550 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.MaxBucket("max_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if len(agg.Keys) != 1 { + t.Fatalf("expected 1 key; got: %d", len(agg.Keys)) + } + if got, want := agg.Keys[0], "2015/01/01 00:00:00"; got != want { + t.Fatalf("expected key %q; got: %v (%T)", want, got, got) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(550) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value) + } +} + +func TestAggsPipelineMinBucket(t *testing.T) { + s := `{ + "min_monthly_sales" : { + "keys": ["2015/02/01 00:00:00"], + "value" : 60 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.MinBucket("min_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if len(agg.Keys) != 1 { + t.Fatalf("expected 1 key; got: %d", len(agg.Keys)) + } + if got, want := agg.Keys[0], "2015/02/01 00:00:00"; got != want { + t.Fatalf("expected key %q; got: %v (%T)", want, got, got) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(60) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(60), *agg.Value) + } +} + +func TestAggsPipelineMovAvg(t *testing.T) { + s := `{ + "the_movavg" : { + "value" : 12.0 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.MovAvg("the_movavg") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(12.0) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(12.0), *agg.Value) + } +} + +func TestAggsPipelineDerivative(t *testing.T) { + s := `{ + "sales_deriv" : { + "value" : 315 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Derivative("sales_deriv") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(315) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(315), *agg.Value) + } +} + +func TestAggsPipelineCumulativeSum(t *testing.T) { + s := `{ + "cumulative_sales" : { + "value" : 550 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.CumulativeSum("cumulative_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(550) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value) + } +} + +func TestAggsPipelineBucketScript(t *testing.T) { + s := `{ + "t-shirt-percentage" : { + "value" : 20 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.BucketScript("t-shirt-percentage") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(20) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value) + } +} + +func TestAggsPipelineSerialDiff(t *testing.T) { + s := `{ + "the_diff" : { + "value" : -722.0 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.SerialDiff("the_diff") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(-722.0) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go new file mode 100644 index 000000000..5e15a3b94 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go @@ -0,0 +1,212 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "fmt" + +// A bool query matches documents matching boolean +// combinations of other queries. +// For more details, see: +// http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html +type BoolQuery struct { + Query + mustClauses []Query + mustNotClauses []Query + filterClauses []Query + shouldClauses []Query + boost *float64 + disableCoord *bool + minimumShouldMatch string + adjustPureNegative *bool + queryName string +} + +// Creates a new bool query. +func NewBoolQuery() *BoolQuery { + return &BoolQuery{ + mustClauses: make([]Query, 0), + mustNotClauses: make([]Query, 0), + filterClauses: make([]Query, 0), + shouldClauses: make([]Query, 0), + } +} + +func (q *BoolQuery) Must(queries ...Query) *BoolQuery { + q.mustClauses = append(q.mustClauses, queries...) + return q +} + +func (q *BoolQuery) MustNot(queries ...Query) *BoolQuery { + q.mustNotClauses = append(q.mustNotClauses, queries...) + return q +} + +func (q *BoolQuery) Filter(filters ...Query) *BoolQuery { + q.filterClauses = append(q.filterClauses, filters...) + return q +} + +func (q *BoolQuery) Should(queries ...Query) *BoolQuery { + q.shouldClauses = append(q.shouldClauses, queries...) + return q +} + +func (q *BoolQuery) Boost(boost float64) *BoolQuery { + q.boost = &boost + return q +} + +func (q *BoolQuery) DisableCoord(disableCoord bool) *BoolQuery { + q.disableCoord = &disableCoord + return q +} + +func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *BoolQuery) MinimumNumberShouldMatch(minimumNumberShouldMatch int) *BoolQuery { + q.minimumShouldMatch = fmt.Sprintf("%d", minimumNumberShouldMatch) + return q +} + +func (q *BoolQuery) AdjustPureNegative(adjustPureNegative bool) *BoolQuery { + q.adjustPureNegative = &adjustPureNegative + return q +} + +func (q *BoolQuery) QueryName(queryName string) *BoolQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the bool query. +func (q *BoolQuery) Source() (interface{}, error) { + // { + // "bool" : { + // "must" : { + // "term" : { "user" : "kimchy" } + // }, + // "must_not" : { + // "range" : { + // "age" : { "from" : 10, "to" : 20 } + // } + // }, + // "filter" : [ + // ... + // ] + // "should" : [ + // { + // "term" : { "tag" : "wow" } + // }, + // { + // "term" : { "tag" : "elasticsearch" } + // } + // ], + // "minimum_number_should_match" : 1, + // "boost" : 1.0 + // } + // } + + query := make(map[string]interface{}) + + boolClause := make(map[string]interface{}) + query["bool"] = boolClause + + // must + if len(q.mustClauses) == 1 { + src, err := q.mustClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["must"] = src + } else if len(q.mustClauses) > 1 { + var clauses []interface{} + for _, subQuery := range q.mustClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["must"] = clauses + } + + // must_not + if len(q.mustNotClauses) == 1 { + src, err := q.mustNotClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["must_not"] = src + } else if len(q.mustNotClauses) > 1 { + var clauses []interface{} + for _, subQuery := range q.mustNotClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["must_not"] = clauses + } + + // filter + if len(q.filterClauses) == 1 { + src, err := q.filterClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["filter"] = src + } else if len(q.filterClauses) > 1 { + var clauses []interface{} + for _, subQuery := range q.filterClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["filter"] = clauses + } + + // should + if len(q.shouldClauses) == 1 { + src, err := q.shouldClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["should"] = src + } else if len(q.shouldClauses) > 1 { + var clauses []interface{} + for _, subQuery := range q.shouldClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["should"] = clauses + } + + if q.boost != nil { + boolClause["boost"] = *q.boost + } + if q.disableCoord != nil { + boolClause["disable_coord"] = *q.disableCoord + } + if q.minimumShouldMatch != "" { + boolClause["minimum_should_match"] = q.minimumShouldMatch + } + if q.adjustPureNegative != nil { + boolClause["adjust_pure_negative"] = *q.adjustPureNegative + } + if q.queryName != "" { + boolClause["_name"] = q.queryName + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool_test.go new file mode 100644 index 000000000..1eb2038fd --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool_test.go @@ -0,0 +1,34 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBoolQuery(t *testing.T) { + q := NewBoolQuery() + q = q.Must(NewTermQuery("tag", "wow")) + q = q.MustNot(NewRangeQuery("age").From(10).To(20)) + q = q.Filter(NewTermQuery("account", "1")) + q = q.Should(NewTermQuery("tag", "sometag"), NewTermQuery("tag", "sometagtag")) + q = q.Boost(10) + q = q.DisableCoord(true) + q = q.QueryName("Test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"bool":{"_name":"Test","boost":10,"disable_coord":true,"filter":{"term":{"account":"1"}},"must":{"term":{"tag":"wow"}},"must_not":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"should":[{"term":{"tag":"sometag"}},{"term":{"tag":"sometagtag"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go new file mode 100644 index 000000000..9f9a5366b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go @@ -0,0 +1,97 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// A boosting query can be used to effectively +// demote results that match a given query. +// For more details, see: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-boosting-query.html +type BoostingQuery struct { + Query + positiveClause Query + negativeClause Query + negativeBoost *float64 + boost *float64 +} + +// Creates a new boosting query. +func NewBoostingQuery() *BoostingQuery { + return &BoostingQuery{} +} + +func (q *BoostingQuery) Positive(positive Query) *BoostingQuery { + q.positiveClause = positive + return q +} + +func (q *BoostingQuery) Negative(negative Query) *BoostingQuery { + q.negativeClause = negative + return q +} + +func (q *BoostingQuery) NegativeBoost(negativeBoost float64) *BoostingQuery { + q.negativeBoost = &negativeBoost + return q +} + +func (q *BoostingQuery) Boost(boost float64) *BoostingQuery { + q.boost = &boost + return q +} + +// Creates the query source for the boosting query. +func (q *BoostingQuery) Source() (interface{}, error) { + // { + // "boosting" : { + // "positive" : { + // "term" : { + // "field1" : "value1" + // } + // }, + // "negative" : { + // "term" : { + // "field2" : "value2" + // } + // }, + // "negative_boost" : 0.2 + // } + // } + + query := make(map[string]interface{}) + + boostingClause := make(map[string]interface{}) + query["boosting"] = boostingClause + + // Negative and positive clause as well as negative boost + // are mandatory in the Java client. + + // positive + if q.positiveClause != nil { + src, err := q.positiveClause.Source() + if err != nil { + return nil, err + } + boostingClause["positive"] = src + } + + // negative + if q.negativeClause != nil { + src, err := q.negativeClause.Source() + if err != nil { + return nil, err + } + boostingClause["negative"] = src + } + + if q.negativeBoost != nil { + boostingClause["negative_boost"] = *q.negativeBoost + } + + if q.boost != nil { + boostingClause["boost"] = *q.boost + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting_test.go new file mode 100644 index 000000000..6c7f263f4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting_test.go @@ -0,0 +1,30 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBoostingQuery(t *testing.T) { + q := NewBoostingQuery() + q = q.Positive(NewTermQuery("tag", "wow")) + q = q.Negative(NewRangeQuery("age").From(10).To(20)) + q = q.NegativeBoost(0.2) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"boosting":{"negative":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"negative_boost":0.2,"positive":{"term":{"tag":"wow"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go new file mode 100644 index 000000000..e99f44303 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go @@ -0,0 +1,146 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CommonTermsQuery is a modern alternative to stopwords +// which improves the precision and recall of search results +// (by taking stopwords into account), without sacrificing performance. +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-common-terms-query.html +type CommonTermsQuery struct { + Query + name string + text interface{} + cutoffFreq *float64 + highFreq *float64 + highFreqOp string + highFreqMinimumShouldMatch string + lowFreq *float64 + lowFreqOp string + lowFreqMinimumShouldMatch string + analyzer string + boost *float64 + disableCoord *bool + queryName string +} + +// NewCommonTermsQuery creates and initializes a new common terms query. +func NewCommonTermsQuery(name string, text interface{}) *CommonTermsQuery { + return &CommonTermsQuery{name: name, text: text} +} + +func (q *CommonTermsQuery) CutoffFrequency(f float64) *CommonTermsQuery { + q.cutoffFreq = &f + return q +} + +func (q *CommonTermsQuery) HighFreq(f float64) *CommonTermsQuery { + q.highFreq = &f + return q +} + +func (q *CommonTermsQuery) HighFreqOperator(op string) *CommonTermsQuery { + q.highFreqOp = op + return q +} + +func (q *CommonTermsQuery) HighFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { + q.highFreqMinimumShouldMatch = minShouldMatch + return q +} + +func (q *CommonTermsQuery) LowFreq(f float64) *CommonTermsQuery { + q.lowFreq = &f + return q +} + +func (q *CommonTermsQuery) LowFreqOperator(op string) *CommonTermsQuery { + q.lowFreqOp = op + return q +} + +func (q *CommonTermsQuery) LowFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { + q.lowFreqMinimumShouldMatch = minShouldMatch + return q +} + +func (q *CommonTermsQuery) Analyzer(analyzer string) *CommonTermsQuery { + q.analyzer = analyzer + return q +} + +func (q *CommonTermsQuery) Boost(boost float64) *CommonTermsQuery { + q.boost = &boost + return q +} + +func (q *CommonTermsQuery) DisableCoord(disableCoord bool) *CommonTermsQuery { + q.disableCoord = &disableCoord + return q +} + +func (q *CommonTermsQuery) QueryName(queryName string) *CommonTermsQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the common query. +func (q *CommonTermsQuery) Source() (interface{}, error) { + // { + // "common": { + // "body": { + // "query": "this is bonsai cool", + // "cutoff_frequency": 0.001 + // } + // } + // } + source := make(map[string]interface{}) + body := make(map[string]interface{}) + query := make(map[string]interface{}) + + source["common"] = body + body[q.name] = query + query["query"] = q.text + + if q.cutoffFreq != nil { + query["cutoff_frequency"] = *q.cutoffFreq + } + if q.highFreq != nil { + query["high_freq"] = *q.highFreq + } + if q.highFreqOp != "" { + query["high_freq_operator"] = q.highFreqOp + } + if q.lowFreq != nil { + query["low_freq"] = *q.lowFreq + } + if q.lowFreqOp != "" { + query["low_freq_operator"] = q.lowFreqOp + } + if q.lowFreqMinimumShouldMatch != "" || q.highFreqMinimumShouldMatch != "" { + mm := make(map[string]interface{}) + if q.lowFreqMinimumShouldMatch != "" { + mm["low_freq"] = q.lowFreqMinimumShouldMatch + } + if q.highFreqMinimumShouldMatch != "" { + mm["high_freq"] = q.highFreqMinimumShouldMatch + } + query["minimum_should_match"] = mm + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.disableCoord != nil { + query["disable_coord"] = *q.disableCoord + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms_test.go new file mode 100644 index 000000000..cade9247f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms_test.go @@ -0,0 +1,86 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" + + "golang.org/x/net/context" +) + +func TestCommonTermsQuery(t *testing.T) { + q := NewCommonTermsQuery("message", "Golang").CutoffFrequency(0.001) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"common":{"message":{"cutoff_frequency":0.001,"query":"Golang"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchQueriesCommonTermsQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Common terms query + q := NewCommonTermsQuery("message", "Golang") + searchResult, err := client.Search().Index(testIndexName).Query(q).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 1 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 1 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go new file mode 100644 index 000000000..0fc500cac --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go @@ -0,0 +1,59 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ConstantScoreQuery is a query that wraps a filter and simply returns +// a constant score equal to the query boost for every document in the filter. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-constant-score-query.html +type ConstantScoreQuery struct { + filter Query + boost *float64 +} + +// ConstantScoreQuery creates and initializes a new constant score query. +func NewConstantScoreQuery(filter Query) *ConstantScoreQuery { + return &ConstantScoreQuery{ + filter: filter, + } +} + +// Boost sets the boost for this query. Documents matching this query +// will (in addition to the normal weightings) have their score multiplied +// by the boost provided. +func (q *ConstantScoreQuery) Boost(boost float64) *ConstantScoreQuery { + q.boost = &boost + return q +} + +// Source returns the query source. +func (q *ConstantScoreQuery) Source() (interface{}, error) { + // "constant_score" : { + // "filter" : { + // .... + // }, + // "boost" : 1.5 + // } + + query := make(map[string]interface{}) + + params := make(map[string]interface{}) + query["constant_score"] = params + + // filter + src, err := q.filter.Source() + if err != nil { + return nil, err + } + params["filter"] = src + + // boost + if q.boost != nil { + params["boost"] = *q.boost + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score_test.go new file mode 100644 index 000000000..6508a91fb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestConstantScoreQuery(t *testing.T) { + q := NewConstantScoreQuery(NewTermQuery("user", "kimchy")).Boost(1.2) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"constant_score":{"boost":1.2,"filter":{"term":{"user":"kimchy"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go new file mode 100644 index 000000000..52eaa31fb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go @@ -0,0 +1,104 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DisMaxQuery is a query that generates the union of documents produced by +// its subqueries, and that scores each document with the maximum score +// for that document as produced by any subquery, plus a tie breaking +// increment for any additional matching subqueries. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-dis-max-query.html +type DisMaxQuery struct { + queries []Query + boost *float64 + tieBreaker *float64 + queryName string +} + +// NewDisMaxQuery creates and initializes a new dis max query. +func NewDisMaxQuery() *DisMaxQuery { + return &DisMaxQuery{ + queries: make([]Query, 0), + } +} + +// Query adds one or more queries to the dis max query. +func (q *DisMaxQuery) Query(queries ...Query) *DisMaxQuery { + q.queries = append(q.queries, queries...) + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by +// the boost provided. +func (q *DisMaxQuery) Boost(boost float64) *DisMaxQuery { + q.boost = &boost + return q +} + +// TieBreaker is the factor by which the score of each non-maximum disjunct +// for a document is multiplied with and added into the final score. +// +// If non-zero, the value should be small, on the order of 0.1, which says +// that 10 occurrences of word in a lower-scored field that is also in a +// higher scored field is just as good as a unique word in the lower scored +// field (i.e., one that is not in any higher scored field). +func (q *DisMaxQuery) TieBreaker(tieBreaker float64) *DisMaxQuery { + q.tieBreaker = &tieBreaker + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched filters per hit. +func (q *DisMaxQuery) QueryName(queryName string) *DisMaxQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable content for this query. +func (q *DisMaxQuery) Source() (interface{}, error) { + // { + // "dis_max" : { + // "tie_breaker" : 0.7, + // "boost" : 1.2, + // "queries" : { + // { + // "term" : { "age" : 34 } + // }, + // { + // "term" : { "age" : 35 } + // } + // ] + // } + // } + + query := make(map[string]interface{}) + params := make(map[string]interface{}) + query["dis_max"] = params + + if q.tieBreaker != nil { + params["tie_breaker"] = *q.tieBreaker + } + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + // queries + var clauses []interface{} + for _, subQuery := range q.queries { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + params["queries"] = clauses + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max_test.go new file mode 100644 index 000000000..76ddfb079 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max_test.go @@ -0,0 +1,28 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDisMaxQuery(t *testing.T) { + q := NewDisMaxQuery() + q = q.Query(NewTermQuery("age", 34), NewTermQuery("age", 35)).Boost(1.2).TieBreaker(0.7) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"dis_max":{"boost":1.2,"queries":[{"term":{"age":34}},{"term":{"age":35}}],"tie_breaker":0.7}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go new file mode 100644 index 000000000..b88555fc5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go @@ -0,0 +1,49 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ExistsQuery is a query that only matches on documents that the field +// has a value in them. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-exists-query.html +type ExistsQuery struct { + name string + queryName string +} + +// NewExistsQuery creates and initializes a new dis max query. +func NewExistsQuery(name string) *ExistsQuery { + return &ExistsQuery{ + name: name, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched queries per hit. +func (q *ExistsQuery) QueryName(queryName string) *ExistsQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable content for this query. +func (q *ExistsQuery) Source() (interface{}, error) { + // { + // "exists" : { + // "field" : "user" + // } + // } + + query := make(map[string]interface{}) + params := make(map[string]interface{}) + query["exists"] = params + + params["field"] = q.name + if q.queryName != "" { + params["_name"] = q.queryName + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists_test.go new file mode 100644 index 000000000..f2d047087 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestExistsQuery(t *testing.T) { + q := NewExistsQuery("user") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"exists":{"field":"user"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go new file mode 100644 index 000000000..be15b6211 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go @@ -0,0 +1,172 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FunctionScoreQuery allows you to modify the score of documents that +// are retrieved by a query. This can be useful if, for example, +// a score function is computationally expensive and it is sufficient +// to compute the score on a filtered set of documents. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +type FunctionScoreQuery struct { + query Query + filter Query + boost *float64 + maxBoost *float64 + scoreMode string + boostMode string + filters []Query + scoreFuncs []ScoreFunction + minScore *float64 + weight *float64 +} + +// NewFunctionScoreQuery creates and initializes a new function score query. +func NewFunctionScoreQuery() *FunctionScoreQuery { + return &FunctionScoreQuery{ + filters: make([]Query, 0), + scoreFuncs: make([]ScoreFunction, 0), + } +} + +// Query sets the query for the function score query. +func (q *FunctionScoreQuery) Query(query Query) *FunctionScoreQuery { + q.query = query + q.filter = nil + return q +} + +// Filter sets the filter for the function score query. +func (q *FunctionScoreQuery) Filter(filter Query) *FunctionScoreQuery { + q.query = nil + q.filter = filter + return q +} + +// Add adds a score function that will execute on all the documents +// matching the filter. +func (q *FunctionScoreQuery) Add(filter Query, scoreFunc ScoreFunction) *FunctionScoreQuery { + q.filters = append(q.filters, filter) + q.scoreFuncs = append(q.scoreFuncs, scoreFunc) + return q +} + +// AddScoreFunc adds a score function that will execute the function on all documents. +func (q *FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) *FunctionScoreQuery { + q.filters = append(q.filters, nil) + q.scoreFuncs = append(q.scoreFuncs, scoreFunc) + return q +} + +// ScoreMode defines how results of individual score functions will be aggregated. +// Can be first, avg, max, sum, min, or multiply. +func (q *FunctionScoreQuery) ScoreMode(scoreMode string) *FunctionScoreQuery { + q.scoreMode = scoreMode + return q +} + +// BoostMode defines how the combined result of score functions will +// influence the final score together with the sub query score. +func (q *FunctionScoreQuery) BoostMode(boostMode string) *FunctionScoreQuery { + q.boostMode = boostMode + return q +} + +// MaxBoost is the maximum boost that will be applied by function score. +func (q *FunctionScoreQuery) MaxBoost(maxBoost float64) *FunctionScoreQuery { + q.maxBoost = &maxBoost + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by the +// boost provided. +func (q *FunctionScoreQuery) Boost(boost float64) *FunctionScoreQuery { + q.boost = &boost + return q +} + +// MinScore sets the minimum score. +func (q *FunctionScoreQuery) MinScore(minScore float64) *FunctionScoreQuery { + q.minScore = &minScore + return q +} + +// Source returns JSON for the function score query. +func (q *FunctionScoreQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["function_score"] = query + + if q.query != nil { + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + } else if q.filter != nil { + src, err := q.filter.Source() + if err != nil { + return nil, err + } + query["filter"] = src + } + + if len(q.filters) == 1 && q.filters[0] == nil { + // Weight needs to be serialized on this level. + if weight := q.scoreFuncs[0].GetWeight(); weight != nil { + query["weight"] = weight + } + // Serialize the score function + src, err := q.scoreFuncs[0].Source() + if err != nil { + return nil, err + } + query[q.scoreFuncs[0].Name()] = src + } else { + funcs := make([]interface{}, len(q.filters)) + for i, filter := range q.filters { + hsh := make(map[string]interface{}) + if filter != nil { + src, err := filter.Source() + if err != nil { + return nil, err + } + hsh["filter"] = src + } + // Weight needs to be serialized on this level. + if weight := q.scoreFuncs[i].GetWeight(); weight != nil { + hsh["weight"] = weight + } + // Serialize the score function + src, err := q.scoreFuncs[i].Source() + if err != nil { + return nil, err + } + hsh[q.scoreFuncs[i].Name()] = src + funcs[i] = hsh + } + query["functions"] = funcs + } + + if q.scoreMode != "" { + query["score_mode"] = q.scoreMode + } + if q.boostMode != "" { + query["boost_mode"] = q.boostMode + } + if q.maxBoost != nil { + query["max_boost"] = *q.maxBoost + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.minScore != nil { + query["min_score"] = *q.minScore + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go new file mode 100644 index 000000000..5c60018ff --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go @@ -0,0 +1,567 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "strings" +) + +// ScoreFunction is used in combination with the Function Score Query. +type ScoreFunction interface { + Name() string + GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery + Source() (interface{}, error) +} + +// -- Exponential Decay -- + +// ExponentialDecayFunction builds an exponential decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type ExponentialDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewExponentialDecayFunction creates a new ExponentialDecayFunction. +func NewExponentialDecayFunction() *ExponentialDecayFunction { + return &ExponentialDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *ExponentialDecayFunction) Name() string { + return "exp" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *ExponentialDecayFunction) FieldName(fieldName string) *ExponentialDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *ExponentialDecayFunction) Origin(origin interface{}) *ExponentialDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *ExponentialDecayFunction) Scale(scale interface{}) *ExponentialDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *ExponentialDecayFunction) Decay(decay float64) *ExponentialDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *ExponentialDecayFunction) Offset(offset interface{}) *ExponentialDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *ExponentialDecayFunction) Weight(weight float64) *ExponentialDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *ExponentialDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *ExponentialDecayFunction) MultiValueMode(mode string) *ExponentialDecayFunction { + fn.multiValueMode = mode + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *ExponentialDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + return source, nil +} + +// -- Gauss Decay -- + +// GaussDecayFunction builds a gauss decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type GaussDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewGaussDecayFunction returns a new GaussDecayFunction. +func NewGaussDecayFunction() *GaussDecayFunction { + return &GaussDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *GaussDecayFunction) Name() string { + return "gauss" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *GaussDecayFunction) FieldName(fieldName string) *GaussDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *GaussDecayFunction) Origin(origin interface{}) *GaussDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *GaussDecayFunction) Scale(scale interface{}) *GaussDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *GaussDecayFunction) Decay(decay float64) *GaussDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *GaussDecayFunction) Offset(offset interface{}) *GaussDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *GaussDecayFunction) Weight(weight float64) *GaussDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *GaussDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *GaussDecayFunction) MultiValueMode(mode string) *GaussDecayFunction { + fn.multiValueMode = mode + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *GaussDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Linear Decay -- + +// LinearDecayFunction builds a linear decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type LinearDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewLinearDecayFunction initializes and returns a new LinearDecayFunction. +func NewLinearDecayFunction() *LinearDecayFunction { + return &LinearDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *LinearDecayFunction) Name() string { + return "linear" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *LinearDecayFunction) FieldName(fieldName string) *LinearDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *LinearDecayFunction) Origin(origin interface{}) *LinearDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *LinearDecayFunction) Decay(decay float64) *LinearDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *LinearDecayFunction) Offset(offset interface{}) *LinearDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *LinearDecayFunction) Weight(weight float64) *LinearDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *LinearDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *LinearDecayFunction) MultiValueMode(mode string) *LinearDecayFunction { + fn.multiValueMode = mode + return fn +} + +// GetMultiValueMode returns how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *LinearDecayFunction) GetMultiValueMode() string { + return fn.multiValueMode +} + +// Source returns the serializable JSON data of this score function. +func (fn *LinearDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Script -- + +// ScriptFunction builds a script score function. It uses a script to +// compute or influence the score of documents that match with the inner +// query or filter. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_script_score +// for details. +type ScriptFunction struct { + script *Script + weight *float64 +} + +// NewScriptFunction initializes and returns a new ScriptFunction. +func NewScriptFunction(script *Script) *ScriptFunction { + return &ScriptFunction{ + script: script, + } +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *ScriptFunction) Name() string { + return "script_score" +} + +// Script specifies the script to be executed. +func (fn *ScriptFunction) Script(script *Script) *ScriptFunction { + fn.script = script + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *ScriptFunction) Weight(weight float64) *ScriptFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *ScriptFunction) GetWeight() *float64 { + return fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *ScriptFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.script != nil { + src, err := fn.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Field value factor -- + +// FieldValueFactorFunction is a function score function that allows you +// to use a field from a document to influence the score. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_field_value_factor. +type FieldValueFactorFunction struct { + field string + factor *float64 + missing *float64 + weight *float64 + modifier string +} + +// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction. +func NewFieldValueFactorFunction() *FieldValueFactorFunction { + return &FieldValueFactorFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *FieldValueFactorFunction) Name() string { + return "field_value_factor" +} + +// Field is the field to be extracted from the document. +func (fn *FieldValueFactorFunction) Field(field string) *FieldValueFactorFunction { + fn.field = field + return fn +} + +// Factor is the (optional) factor to multiply the field with. If you do not +// specify a factor, the default is 1. +func (fn *FieldValueFactorFunction) Factor(factor float64) *FieldValueFactorFunction { + fn.factor = &factor + return fn +} + +// Modifier to apply to the field value. It can be one of: none, log, log1p, +// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none. +func (fn *FieldValueFactorFunction) Modifier(modifier string) *FieldValueFactorFunction { + fn.modifier = modifier + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *FieldValueFactorFunction) Weight(weight float64) *FieldValueFactorFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *FieldValueFactorFunction) GetWeight() *float64 { + return fn.weight +} + +// Missing is used if a document does not have that field. +func (fn *FieldValueFactorFunction) Missing(missing float64) *FieldValueFactorFunction { + fn.missing = &missing + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *FieldValueFactorFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.field != "" { + source["field"] = fn.field + } + if fn.factor != nil { + source["factor"] = *fn.factor + } + if fn.missing != nil { + source["missing"] = *fn.missing + } + if fn.modifier != "" { + source["modifier"] = strings.ToLower(fn.modifier) + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Weight Factor -- + +// WeightFactorFunction builds a weight factor function that multiplies +// the weight to the score. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_weight +// for details. +type WeightFactorFunction struct { + weight float64 +} + +// NewWeightFactorFunction initializes and returns a new WeightFactorFunction. +func NewWeightFactorFunction(weight float64) *WeightFactorFunction { + return &WeightFactorFunction{weight: weight} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *WeightFactorFunction) Name() string { + return "weight" +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *WeightFactorFunction) Weight(weight float64) *WeightFactorFunction { + fn.weight = weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *WeightFactorFunction) GetWeight() *float64 { + return &fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *WeightFactorFunction) Source() (interface{}, error) { + // Notice that the weight has to be serialized in FunctionScoreQuery. + return fn.weight, nil +} + +// -- Random -- + +// RandomFunction builds a random score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_random +// for details. +type RandomFunction struct { + seed interface{} + weight *float64 +} + +// NewRandomFunction initializes and returns a new RandomFunction. +func NewRandomFunction() *RandomFunction { + return &RandomFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *RandomFunction) Name() string { + return "random_score" +} + +// Seed is documented in 1.6 as a numeric value. However, in the source code +// of the Java client, it also accepts strings. So we accept both here, too. +func (fn *RandomFunction) Seed(seed interface{}) *RandomFunction { + fn.seed = seed + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *RandomFunction) Weight(weight float64) *RandomFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *RandomFunction) GetWeight() *float64 { + return fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *RandomFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.seed != nil { + source["seed"] = fn.seed + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_test.go new file mode 100644 index 000000000..a8e7430ce --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_test.go @@ -0,0 +1,166 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFunctionScoreQuery(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + Add(NewTermQuery("name.last", "banon"), NewWeightFactorFunction(1.5)). + AddScoreFunc(NewWeightFactorFunction(3)). + AddScoreFunc(NewRandomFunction()). + Boost(3). + MaxBoost(10). + ScoreMode("avg") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":3,"functions":[{"filter":{"term":{"name.last":"banon"}},"weight":1.5},{"weight":3},{"random_score":{}}],"max_boost":10,"query":{"term":{"name.last":"banon"}},"score_mode":"avg"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFunctionScoreQueryWithNilFilter(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("tag", "wow")). + AddScoreFunc(NewRandomFunction()). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","max_boost":12,"query":{"term":{"tag":"wow"}},"random_score":{},"score_mode":"max"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldValueFactor(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income")). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldValueFactorWithWeight(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max","weight":2.5}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldValueFactorWithMultipleScoreFuncsAndWeights(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)). + AddScoreFunc(NewScriptFunction(NewScript("_score * doc['my_numeric_field'].value")).Weight(1.25)). + AddScoreFunc(NewWeightFactorFunction(0.5)). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","functions":[{"field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"weight":2.5},{"script_score":{"script":"_score * doc['my_numeric_field'].value"},"weight":1.25},{"weight":0.5}],"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFunctionScoreQueryWithGaussScoreFunc(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33)) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"gauss":{"pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFunctionScoreQueryWithGaussScoreFuncAndMultiValueMode(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33).MultiValueMode("avg")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"gauss":{"multi_value_mode":"avg","pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go new file mode 100644 index 000000000..152cbb0e6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go @@ -0,0 +1,120 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FuzzyQuery uses similarity based on Levenshtein edit distance for +// string fields, and a +/- margin on numeric and date fields. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html +type FuzzyQuery struct { + name string + value interface{} + boost *float64 + fuzziness interface{} + prefixLength *int + maxExpansions *int + transpositions *bool + rewrite string + queryName string +} + +// NewFuzzyQuery creates a new fuzzy query. +func NewFuzzyQuery(name string, value interface{}) *FuzzyQuery { + q := &FuzzyQuery{ + name: name, + value: value, + } + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by +// the boost provided. +func (q *FuzzyQuery) Boost(boost float64) *FuzzyQuery { + q.boost = &boost + return q +} + +// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings +// like "auto", "0..1", "1..4" or "0.0..1.0". +func (q *FuzzyQuery) Fuzziness(fuzziness interface{}) *FuzzyQuery { + q.fuzziness = fuzziness + return q +} + +func (q *FuzzyQuery) PrefixLength(prefixLength int) *FuzzyQuery { + q.prefixLength = &prefixLength + return q +} + +func (q *FuzzyQuery) MaxExpansions(maxExpansions int) *FuzzyQuery { + q.maxExpansions = &maxExpansions + return q +} + +func (q *FuzzyQuery) Transpositions(transpositions bool) *FuzzyQuery { + q.transpositions = &transpositions + return q +} + +func (q *FuzzyQuery) Rewrite(rewrite string) *FuzzyQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *FuzzyQuery) QueryName(queryName string) *FuzzyQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *FuzzyQuery) Source() (interface{}, error) { + // { + // "fuzzy" : { + // "user" : { + // "value" : "ki", + // "boost" : 1.0, + // "fuzziness" : 2, + // "prefix_length" : 0, + // "max_expansions" : 100 + // } + // } + + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["fuzzy"] = query + + fq := make(map[string]interface{}) + query[q.name] = fq + + fq["value"] = q.value + + if q.boost != nil { + fq["boost"] = *q.boost + } + if q.transpositions != nil { + fq["transpositions"] = *q.transpositions + } + if q.fuzziness != nil { + fq["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + fq["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + fq["max_expansions"] = *q.maxExpansions + } + if q.rewrite != "" { + fq["rewrite"] = q.rewrite + } + if q.queryName != "" { + fq["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy_test.go new file mode 100644 index 000000000..89140ca23 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFuzzyQuery(t *testing.T) { + q := NewFuzzyQuery("user", "ki").Boost(1.5).Fuzziness(2).PrefixLength(0).MaxExpansions(100) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fuzzy":{"user":{"boost":1.5,"fuzziness":2,"max_expansions":100,"prefix_length":0,"value":"ki"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go new file mode 100644 index 000000000..4b4e95501 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go @@ -0,0 +1,121 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// GeoBoundingBoxQuery allows to filter hits based on a point location using +// a bounding box. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-bounding-box-query.html +type GeoBoundingBoxQuery struct { + name string + top *float64 + left *float64 + bottom *float64 + right *float64 + typ string + queryName string +} + +// NewGeoBoundingBoxQuery creates and initializes a new GeoBoundingBoxQuery. +func NewGeoBoundingBoxQuery(name string) *GeoBoundingBoxQuery { + return &GeoBoundingBoxQuery{ + name: name, + } +} + +func (q *GeoBoundingBoxQuery) TopLeft(top, left float64) *GeoBoundingBoxQuery { + q.top = &top + q.left = &left + return q +} + +func (q *GeoBoundingBoxQuery) TopLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.TopLeft(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) BottomRight(bottom, right float64) *GeoBoundingBoxQuery { + q.bottom = &bottom + q.right = &right + return q +} + +func (q *GeoBoundingBoxQuery) BottomRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.BottomRight(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) BottomLeft(bottom, left float64) *GeoBoundingBoxQuery { + q.bottom = &bottom + q.left = &left + return q +} + +func (q *GeoBoundingBoxQuery) BottomLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.BottomLeft(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) TopRight(top, right float64) *GeoBoundingBoxQuery { + q.top = &top + q.right = &right + return q +} + +func (q *GeoBoundingBoxQuery) TopRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.TopRight(point.Lat, point.Lon) +} + +// Type sets the type of executing the geo bounding box. It can be either +// memory or indexed. It defaults to memory. +func (q *GeoBoundingBoxQuery) Type(typ string) *GeoBoundingBoxQuery { + q.typ = typ + return q +} + +func (q *GeoBoundingBoxQuery) QueryName(queryName string) *GeoBoundingBoxQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoBoundingBoxQuery) Source() (interface{}, error) { + // { + // "geo_bbox" : { + // ... + // } + // } + + if q.top == nil { + return nil, errors.New("geo_bounding_box requires top latitude to be set") + } + if q.bottom == nil { + return nil, errors.New("geo_bounding_box requires bottom latitude to be set") + } + if q.right == nil { + return nil, errors.New("geo_bounding_box requires right longitude to be set") + } + if q.left == nil { + return nil, errors.New("geo_bounding_box requires left longitude to be set") + } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["geo_bbox"] = params + + box := make(map[string]interface{}) + box["top_left"] = []float64{*q.left, *q.top} + box["bottom_right"] = []float64{*q.right, *q.bottom} + params[q.name] = box + + if q.typ != "" { + params["type"] = q.typ + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box_test.go new file mode 100644 index 000000000..59cd437d5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box_test.go @@ -0,0 +1,63 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoBoundingBoxQueryIncomplete(t *testing.T) { + q := NewGeoBoundingBoxQuery("pin.location") + q = q.TopLeft(40.73, -74.1) + // no bottom and no right here + q = q.Type("memory") + src, err := q.Source() + if err == nil { + t.Fatal("expected error") + } + if src != nil { + t.Fatal("expected empty source") + } +} + +func TestGeoBoundingBoxQuery(t *testing.T) { + q := NewGeoBoundingBoxQuery("pin.location") + q = q.TopLeft(40.73, -74.1) + q = q.BottomRight(40.01, -71.12) + q = q.Type("memory") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bbox":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]},"type":"memory"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoBoundingBoxQueryWithGeoPoint(t *testing.T) { + q := NewGeoBoundingBoxQuery("pin.location") + q = q.TopLeftFromGeoPoint(GeoPointFromLatLon(40.73, -74.1)) + q = q.BottomRightFromGeoPoint(GeoPointFromLatLon(40.01, -71.12)) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bbox":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go new file mode 100644 index 000000000..f84e73b23 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go @@ -0,0 +1,116 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoDistanceQuery filters documents that include only hits that exists +// within a specific distance from a geo point. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-query.html +type GeoDistanceQuery struct { + name string + distance string + lat float64 + lon float64 + geohash string + distanceType string + optimizeBbox string + queryName string +} + +// NewGeoDistanceQuery creates and initializes a new GeoDistanceQuery. +func NewGeoDistanceQuery(name string) *GeoDistanceQuery { + return &GeoDistanceQuery{name: name} +} + +func (q *GeoDistanceQuery) GeoPoint(point *GeoPoint) *GeoDistanceQuery { + q.lat = point.Lat + q.lon = point.Lon + return q +} + +func (q *GeoDistanceQuery) Point(lat, lon float64) *GeoDistanceQuery { + q.lat = lat + q.lon = lon + return q +} + +func (q *GeoDistanceQuery) Lat(lat float64) *GeoDistanceQuery { + q.lat = lat + return q +} + +func (q *GeoDistanceQuery) Lon(lon float64) *GeoDistanceQuery { + q.lon = lon + return q +} + +func (q *GeoDistanceQuery) GeoHash(geohash string) *GeoDistanceQuery { + q.geohash = geohash + return q +} + +func (q *GeoDistanceQuery) Distance(distance string) *GeoDistanceQuery { + q.distance = distance + return q +} + +func (q *GeoDistanceQuery) DistanceType(distanceType string) *GeoDistanceQuery { + q.distanceType = distanceType + return q +} + +func (q *GeoDistanceQuery) OptimizeBbox(optimizeBbox string) *GeoDistanceQuery { + q.optimizeBbox = optimizeBbox + return q +} + +func (q *GeoDistanceQuery) QueryName(queryName string) *GeoDistanceQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoDistanceQuery) Source() (interface{}, error) { + // { + // "geo_distance" : { + // "distance" : "200km", + // "pin.location" : { + // "lat" : 40, + // "lon" : -70 + // } + // } + // } + + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + + if q.geohash != "" { + params[q.name] = q.geohash + } else { + location := make(map[string]interface{}) + location["lat"] = q.lat + location["lon"] = q.lon + params[q.name] = location + } + + if q.distance != "" { + params["distance"] = q.distance + } + if q.distanceType != "" { + params["distance_type"] = q.distanceType + } + if q.optimizeBbox != "" { + params["optimize_bbox"] = q.optimizeBbox + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + source["geo_distance"] = params + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance_test.go new file mode 100644 index 000000000..7b91d94e8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance_test.go @@ -0,0 +1,70 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoDistanceQuery(t *testing.T) { + q := NewGeoDistanceQuery("pin.location") + q = q.Lat(40) + q = q.Lon(-70) + q = q.Distance("200km") + q = q.DistanceType("plane") + q = q.OptimizeBbox("memory") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"distance":"200km","distance_type":"plane","optimize_bbox":"memory","pin.location":{"lat":40,"lon":-70}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceQueryWithGeoPoint(t *testing.T) { + q := NewGeoDistanceQuery("pin.location") + q = q.GeoPoint(GeoPointFromLatLon(40, -70)) + q = q.Distance("200km") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"distance":"200km","pin.location":{"lat":40,"lon":-70}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceQueryWithGeoHash(t *testing.T) { + q := NewGeoDistanceQuery("pin.location") + q = q.GeoHash("drm3btev3e86") + q = q.Distance("12km") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"distance":"12km","pin.location":"drm3btev3e86"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go new file mode 100644 index 000000000..dbd46a1ef --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go @@ -0,0 +1,72 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoPolygonQuery allows to include hits that only fall within a polygon of points. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-polygon-query.html +type GeoPolygonQuery struct { + name string + points []*GeoPoint + queryName string +} + +// NewGeoPolygonQuery creates and initializes a new GeoPolygonQuery. +func NewGeoPolygonQuery(name string) *GeoPolygonQuery { + return &GeoPolygonQuery{ + name: name, + points: make([]*GeoPoint, 0), + } +} + +// AddPoint adds a point from latitude and longitude. +func (q *GeoPolygonQuery) AddPoint(lat, lon float64) *GeoPolygonQuery { + q.points = append(q.points, GeoPointFromLatLon(lat, lon)) + return q +} + +// AddGeoPoint adds a GeoPoint. +func (q *GeoPolygonQuery) AddGeoPoint(point *GeoPoint) *GeoPolygonQuery { + q.points = append(q.points, point) + return q +} + +func (q *GeoPolygonQuery) QueryName(queryName string) *GeoPolygonQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoPolygonQuery) Source() (interface{}, error) { + // "geo_polygon" : { + // "person.location" : { + // "points" : [ + // {"lat" : 40, "lon" : -70}, + // {"lat" : 30, "lon" : -80}, + // {"lat" : 20, "lon" : -90} + // ] + // } + // } + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + source["geo_polygon"] = params + + polygon := make(map[string]interface{}) + params[q.name] = polygon + + var points []interface{} + for _, point := range q.points { + points = append(points, point.Source()) + } + polygon["points"] = points + + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon_test.go new file mode 100644 index 000000000..932c57d7b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon_test.go @@ -0,0 +1,58 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoPolygonQuery(t *testing.T) { + q := NewGeoPolygonQuery("person.location") + q = q.AddPoint(40, -70) + q = q.AddPoint(30, -80) + point, err := GeoPointFromString("20,-90") + if err != nil { + t.Fatalf("GeoPointFromString failed: %v", err) + } + q = q.AddGeoPoint(point) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoPolygonQueryFromGeoPoints(t *testing.T) { + q := NewGeoPolygonQuery("person.location") + q = q.AddGeoPoint(&GeoPoint{Lat: 40, Lon: -70}) + q = q.AddGeoPoint(GeoPointFromLatLon(30, -80)) + point, err := GeoPointFromString("20,-90") + if err != nil { + t.Fatalf("GeoPointFromString failed: %v", err) + } + q = q.AddGeoPoint(point) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go new file mode 100644 index 000000000..ab0abb7f0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go @@ -0,0 +1,131 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasChildQuery accepts a query and the child type to run against, and results +// in parent documents that have child docs matching the query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-has-child-query.html +type HasChildQuery struct { + query Query + childType string + boost *float64 + scoreMode string + minChildren *int + maxChildren *int + shortCircuitCutoff *int + queryName string + innerHit *InnerHit +} + +// NewHasChildQuery creates and initializes a new has_child query. +func NewHasChildQuery(childType string, query Query) *HasChildQuery { + return &HasChildQuery{ + query: query, + childType: childType, + } +} + +// Boost sets the boost for this query. +func (q *HasChildQuery) Boost(boost float64) *HasChildQuery { + q.boost = &boost + return q +} + +// ScoreMode defines how the scores from the matching child documents +// are mapped into the parent document. Allowed values are: min, max, +// avg, or none. +func (q *HasChildQuery) ScoreMode(scoreMode string) *HasChildQuery { + q.scoreMode = scoreMode + return q +} + +// MinChildren defines the minimum number of children that are required +// to match for the parent to be considered a match. +func (q *HasChildQuery) MinChildren(minChildren int) *HasChildQuery { + q.minChildren = &minChildren + return q +} + +// MaxChildren defines the maximum number of children that are required +// to match for the parent to be considered a match. +func (q *HasChildQuery) MaxChildren(maxChildren int) *HasChildQuery { + q.maxChildren = &maxChildren + return q +} + +// ShortCircuitCutoff configures what cut off point only to evaluate +// parent documents that contain the matching parent id terms instead +// of evaluating all parent docs. +func (q *HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) *HasChildQuery { + q.shortCircuitCutoff = &shortCircuitCutoff + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *HasChildQuery) QueryName(queryName string) *HasChildQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *HasChildQuery) InnerHit(innerHit *InnerHit) *HasChildQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the function score query. +func (q *HasChildQuery) Source() (interface{}, error) { + // { + // "has_child" : { + // "type" : "blog_tag", + // "score_mode" : "min", + // "query" : { + // "term" : { + // "tag" : "something" + // } + // } + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["has_child"] = query + + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + query["type"] = q.childType + if q.boost != nil { + query["boost"] = *q.boost + } + if q.scoreMode != "" { + query["score_mode"] = q.scoreMode + } + if q.minChildren != nil { + query["min_children"] = *q.minChildren + } + if q.maxChildren != nil { + query["max_children"] = *q.maxChildren + } + if q.shortCircuitCutoff != nil { + query["short_circuit_cutoff"] = *q.shortCircuitCutoff + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child_test.go new file mode 100644 index 000000000..745c263f9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child_test.go @@ -0,0 +1,45 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestHasChildQuery(t *testing.T) { + q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")).ScoreMode("min") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"has_child":{"query":{"term":{"tag":"something"}},"score_mode":"min","type":"blog_tag"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHasChildQueryWithInnerHit(t *testing.T) { + q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")) + q = q.InnerHit(NewInnerHit().Name("comments")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"has_child":{"inner_hits":{"name":"comments"},"query":{"term":{"tag":"something"}},"type":"blog_tag"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go new file mode 100644 index 000000000..ee77d5cb4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go @@ -0,0 +1,97 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasParentQuery accepts a query and a parent type. The query is executed +// in the parent document space which is specified by the parent type. +// This query returns child documents which associated parents have matched. +// For the rest has_parent query has the same options and works in the +// same manner as has_child query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-has-parent-query.html +type HasParentQuery struct { + query Query + parentType string + boost *float64 + score *bool + queryName string + innerHit *InnerHit +} + +// NewHasParentQuery creates and initializes a new has_parent query. +func NewHasParentQuery(parentType string, query Query) *HasParentQuery { + return &HasParentQuery{ + query: query, + parentType: parentType, + } +} + +// Boost sets the boost for this query. +func (q *HasParentQuery) Boost(boost float64) *HasParentQuery { + q.boost = &boost + return q +} + +// Score defines if the parent score is mapped into the child documents. +func (q *HasParentQuery) Score(score bool) *HasParentQuery { + q.score = &score + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *HasParentQuery) QueryName(queryName string) *HasParentQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *HasParentQuery) InnerHit(innerHit *InnerHit) *HasParentQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the function score query. +func (q *HasParentQuery) Source() (interface{}, error) { + // { + // "has_parent" : { + // "parent_type" : "blog", + // "query" : { + // "term" : { + // "tag" : "something" + // } + // } + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["has_parent"] = query + + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + query["parent_type"] = q.parentType + if q.boost != nil { + query["boost"] = *q.boost + } + if q.score != nil { + query["score"] = *q.score + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent_test.go new file mode 100644 index 000000000..0fec395e3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestHasParentQueryTest(t *testing.T) { + q := NewHasParentQuery("blog", NewTermQuery("tag", "something")).Score(true) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"has_parent":{"parent_type":"blog","query":{"term":{"tag":"something"}},"score":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go new file mode 100644 index 000000000..be70a65b7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go @@ -0,0 +1,76 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// IdsQuery filters documents that only have the provided ids. +// Note, this query uses the _uid field. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-ids-query.html +type IdsQuery struct { + types []string + values []string + boost *float64 + queryName string +} + +// NewIdsQuery creates and initializes a new ids query. +func NewIdsQuery(types ...string) *IdsQuery { + return &IdsQuery{ + types: types, + values: make([]string, 0), + } +} + +// Ids adds ids to the filter. +func (q *IdsQuery) Ids(ids ...string) *IdsQuery { + q.values = append(q.values, ids...) + return q +} + +// Boost sets the boost for this query. +func (q *IdsQuery) Boost(boost float64) *IdsQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter. +func (q *IdsQuery) QueryName(queryName string) *IdsQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *IdsQuery) Source() (interface{}, error) { + // { + // "ids" : { + // "type" : "my_type", + // "values" : ["1", "4", "100"] + // } + // } + + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["ids"] = query + + // type(s) + if len(q.types) == 1 { + query["type"] = q.types[0] + } else if len(q.types) > 1 { + query["types"] = q.types + } + + // values + query["values"] = q.values + + if q.boost != nil { + query["boost"] = *q.boost + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids_test.go new file mode 100644 index 000000000..b36605b4d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestIdsQuery(t *testing.T) { + q := NewIdsQuery("my_type").Ids("1", "4", "100").Boost(10.5).QueryName("my_query") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"ids":{"_name":"my_query","boost":10.5,"type":"my_type","values":["1","4","100"]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go new file mode 100644 index 000000000..60c76a7c4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go @@ -0,0 +1,89 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// IndicesQuery can be used when executed across multiple indices, allowing +// to have a query that executes only when executed on an index that matches +// a specific list of indices, and another query that executes when it is +// executed on an index that does not match the listed indices. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-indices-query.html +type IndicesQuery struct { + query Query + indices []string + noMatchQueryType string + noMatchQuery Query + queryName string +} + +// NewIndicesQuery creates and initializes a new indices query. +func NewIndicesQuery(query Query, indices ...string) *IndicesQuery { + return &IndicesQuery{ + query: query, + indices: indices, + } +} + +// NoMatchQuery sets the query to use when it executes on an index that +// does not match the indices provided. +func (q *IndicesQuery) NoMatchQuery(query Query) *IndicesQuery { + q.noMatchQuery = query + return q +} + +// NoMatchQueryType sets the no match query which can be either all or none. +func (q *IndicesQuery) NoMatchQueryType(typ string) *IndicesQuery { + q.noMatchQueryType = typ + return q +} + +// QueryName sets the query name for the filter. +func (q *IndicesQuery) QueryName(queryName string) *IndicesQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *IndicesQuery) Source() (interface{}, error) { + // { + // "indices" : { + // "indices" : ["index1", "index2"], + // "query" : { + // "term" : { "tag" : "wow" } + // }, + // "no_match_query" : { + // "term" : { "tag" : "kow" } + // } + // } + // } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["indices"] = params + + params["indices"] = q.indices + + src, err := q.query.Source() + if err != nil { + return nil, err + } + params["query"] = src + + if q.noMatchQuery != nil { + src, err := q.noMatchQuery.Source() + if err != nil { + return nil, err + } + params["no_match_query"] = src + } else if q.noMatchQueryType != "" { + params["no_match_query"] = q.noMatchQueryType + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices_test.go new file mode 100644 index 000000000..0c04499d1 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestIndicesQuery(t *testing.T) { + q := NewIndicesQuery(NewTermQuery("tag", "wow"), "index1", "index2") + q = q.NoMatchQuery(NewTermQuery("tag", "kow")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices":{"indices":["index1","index2"],"no_match_query":{"term":{"tag":"kow"}},"query":{"term":{"tag":"wow"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestIndicesQueryWithNoMatchQueryType(t *testing.T) { + q := NewIndicesQuery(NewTermQuery("tag", "wow"), "index1", "index2") + q = q.NoMatchQueryType("all") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices":{"indices":["index1","index2"],"no_match_query":"all","query":{"term":{"tag":"wow"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go new file mode 100644 index 000000000..e4fe6a3b8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go @@ -0,0 +1,214 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MatchQuery is a family of queries that accepts text/numerics/dates, +// analyzes them, and constructs a query. +// +// To create a new MatchQuery, use NewMatchQuery. To create specific types +// of queries, e.g. a match_phrase query, use NewMatchPhrQuery(...).Type("phrase"), +// or use one of the shortcuts e.g. NewMatchPhraseQuery(...). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html +type MatchQuery struct { + name string + text interface{} + typ string // boolean, phrase, phrase_prefix + operator string // or / and + analyzer string + boost *float64 + slop *int + fuzziness string + prefixLength *int + maxExpansions *int + minimumShouldMatch string + rewrite string + fuzzyRewrite string + lenient *bool + fuzzyTranspositions *bool + zeroTermsQuery string + cutoffFrequency *float64 + queryName string +} + +// NewMatchQuery creates and initializes a new MatchQuery. +func NewMatchQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text} +} + +// NewMatchPhraseQuery creates and initializes a new MatchQuery of type phrase. +func NewMatchPhraseQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text, typ: "phrase"} +} + +// NewMatchPhrasePrefixQuery creates and initializes a new MatchQuery of type phrase_prefix. +func NewMatchPhrasePrefixQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text, typ: "phrase_prefix"} +} + +// Type can be "boolean", "phrase", or "phrase_prefix". Defaults to "boolean". +func (q *MatchQuery) Type(typ string) *MatchQuery { + q.typ = typ + return q +} + +// Operator sets the operator to use when using a boolean query. +// Can be "AND" or "OR" (default). +func (q *MatchQuery) Operator(operator string) *MatchQuery { + q.operator = operator + return q +} + +// Analyzer explicitly sets the analyzer to use. It defaults to use explicit +// mapping config for the field, or, if not set, the default search analyzer. +func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost to apply to this query. +func (q *MatchQuery) Boost(boost float64) *MatchQuery { + q.boost = &boost + return q +} + +// Slop sets the phrase slop if evaluated to a phrase query type. +func (q *MatchQuery) Slop(slop int) *MatchQuery { + q.slop = &slop + return q +} + +// Fuzziness sets the fuzziness when evaluated to a fuzzy query type. +// Defaults to "AUTO". +func (q *MatchQuery) Fuzziness(fuzziness string) *MatchQuery { + q.fuzziness = fuzziness + return q +} + +func (q *MatchQuery) PrefixLength(prefixLength int) *MatchQuery { + q.prefixLength = &prefixLength + return q +} + +// MaxExpansions is used with fuzzy or prefix type queries. It specifies +// the number of term expansions to use. It defaults to unbounded so that +// its recommended to set it to a reasonable value for faster execution. +func (q *MatchQuery) MaxExpansions(maxExpansions int) *MatchQuery { + q.maxExpansions = &maxExpansions + return q +} + +// CutoffFrequency can be a value in [0..1] (or an absolute number >=1). +// It represents the maximum treshold of a terms document frequency to be +// considered a low frequency term. +func (q *MatchQuery) CutoffFrequency(cutoff float64) *MatchQuery { + q.cutoffFrequency = &cutoff + return q +} + +func (q *MatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MatchQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *MatchQuery) Rewrite(rewrite string) *MatchQuery { + q.rewrite = rewrite + return q +} + +func (q *MatchQuery) FuzzyRewrite(fuzzyRewrite string) *MatchQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +func (q *MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) *MatchQuery { + q.fuzzyTranspositions = &fuzzyTranspositions + return q +} + +// Lenient specifies whether format based failures will be ignored. +func (q *MatchQuery) Lenient(lenient bool) *MatchQuery { + q.lenient = &lenient + return q +} + +// ZeroTermsQuery can be "all" or "none". +func (q *MatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MatchQuery { + q.zeroTermsQuery = zeroTermsQuery + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *MatchQuery) QueryName(queryName string) *MatchQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *MatchQuery) Source() (interface{}, error) { + // {"match":{"name":{"query":"value","type":"boolean/phrase"}}} + source := make(map[string]interface{}) + + match := make(map[string]interface{}) + source["match"] = match + + query := make(map[string]interface{}) + match[q.name] = query + + query["query"] = q.text + + if q.typ != "" { + query["type"] = q.typ + } + if q.operator != "" { + query["operator"] = q.operator + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.slop != nil { + query["slop"] = *q.slop + } + if q.fuzziness != "" { + query["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + query["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + query["max_expansions"] = *q.maxExpansions + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.rewrite != "" { + query["rewrite"] = q.rewrite + } + if q.fuzzyRewrite != "" { + query["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.fuzzyTranspositions != nil { + query["fuzzy_transpositions"] = *q.fuzzyTranspositions + } + if q.zeroTermsQuery != "" { + query["zero_terms_query"] = q.zeroTermsQuery + } + if q.cutoffFrequency != nil { + query["cutoff_frequency"] = q.cutoffFrequency + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go new file mode 100644 index 000000000..2b3d8a71a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go @@ -0,0 +1,41 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MatchAllQuery is the most simple query, which matches all documents, +// giving them all a _score of 1.0. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-match-all-query.html +type MatchAllQuery struct { + boost *float64 +} + +// NewMatchAllQuery creates and initializes a new match all query. +func NewMatchAllQuery() *MatchAllQuery { + return &MatchAllQuery{} +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by the +// boost provided. +func (q *MatchAllQuery) Boost(boost float64) *MatchAllQuery { + q.boost = &boost + return q +} + +// Source returns JSON for the function score query. +func (q MatchAllQuery) Source() (interface{}, error) { + // { + // "match_all" : { ... } + // } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["match_all"] = params + if q.boost != nil { + params["boost"] = *q.boost + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_missing_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all_test.go similarity index 63% rename from vendor/gopkg.in/olivere/elastic.v3/search_queries_missing_test.go rename to vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all_test.go index 096b0b3cd..11cf5c5f7 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_missing_test.go +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -9,8 +9,8 @@ import ( "testing" ) -func TestMissingQuery(t *testing.T) { - q := NewMissingQuery("user") +func TestMatchAllQuery(t *testing.T) { + q := NewMatchAllQuery() src, err := q.Source() if err != nil { t.Fatal(err) @@ -20,14 +20,14 @@ func TestMissingQuery(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"missing":{"field":"user"}}` + expected := `{"match_all":{}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } } -func TestMissingQueryWithParams(t *testing.T) { - q := NewMissingQuery("user").NullValue(true).Existence(true).QueryName("_my_query") +func TestMatchAllQueryWithBoost(t *testing.T) { + q := NewMatchAllQuery().Boost(3.14) src, err := q.Source() if err != nil { t.Fatal(err) @@ -37,7 +37,7 @@ func TestMissingQueryWithParams(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"missing":{"_name":"_my_query","existence":true,"field":"user","null_value":true}}` + expected := `{"match_all":{"boost":3.14}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_test.go new file mode 100644 index 000000000..af3fe688a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMatchQuery(t *testing.T) { + q := NewMatchQuery("message", "this is a test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"query":"this is a test"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchPhraseQuery(t *testing.T) { + q := NewMatchPhraseQuery("message", "this is a test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"query":"this is a test","type":"phrase"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchPhrasePrefixQuery(t *testing.T) { + q := NewMatchPhrasePrefixQuery("message", "this is a test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"query":"this is a test","type":"phrase_prefix"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchQueryWithOptions(t *testing.T) { + q := NewMatchQuery("message", "this is a test").Analyzer("whitespace").Operator("or").Boost(2.5) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"analyzer":"whitespace","boost":2.5,"operator":"or","query":"this is a test"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go new file mode 100644 index 000000000..97f0730a1 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go @@ -0,0 +1,412 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// MoreLikeThis query (MLT Query) finds documents that are "like" a given +// set of documents. In order to do so, MLT selects a set of representative +// terms of these input documents, forms a query using these terms, executes +// the query and returns the results. The user controls the input documents, +// how the terms should be selected and how the query is formed. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html +type MoreLikeThisQuery struct { + fields []string + docs []*MoreLikeThisQueryItem + unlikeDocs []*MoreLikeThisQueryItem + include *bool + minimumShouldMatch string + minTermFreq *int + maxQueryTerms *int + stopWords []string + minDocFreq *int + maxDocFreq *int + minWordLen *int + maxWordLen *int + boostTerms *float64 + boost *float64 + analyzer string + failOnUnsupportedField *bool + queryName string +} + +// NewMoreLikeThisQuery creates and initializes a new MoreLikeThisQuery. +func NewMoreLikeThisQuery() *MoreLikeThisQuery { + return &MoreLikeThisQuery{ + fields: make([]string, 0), + stopWords: make([]string, 0), + docs: make([]*MoreLikeThisQueryItem, 0), + unlikeDocs: make([]*MoreLikeThisQueryItem, 0), + } +} + +// Field adds one or more field names to the query. +func (q *MoreLikeThisQuery) Field(fields ...string) *MoreLikeThisQuery { + q.fields = append(q.fields, fields...) + return q +} + +// StopWord sets the stopwords. Any word in this set is considered +// "uninteresting" and ignored. Even if your Analyzer allows stopwords, +// you might want to tell the MoreLikeThis code to ignore them, as for +// the purposes of document similarity it seems reasonable to assume that +// "a stop word is never interesting". +func (q *MoreLikeThisQuery) StopWord(stopWords ...string) *MoreLikeThisQuery { + q.stopWords = append(q.stopWords, stopWords...) + return q +} + +// LikeText sets the text to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) LikeText(likeTexts ...string) *MoreLikeThisQuery { + for _, s := range likeTexts { + item := NewMoreLikeThisQueryItem().LikeText(s) + q.docs = append(q.docs, item) + } + return q +} + +// LikeItems sets the documents to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) LikeItems(docs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { + q.docs = append(q.docs, docs...) + return q +} + +// IgnoreLikeText sets the text from which the terms should not be selected from. +func (q *MoreLikeThisQuery) IgnoreLikeText(ignoreLikeText ...string) *MoreLikeThisQuery { + for _, s := range ignoreLikeText { + item := NewMoreLikeThisQueryItem().LikeText(s) + q.unlikeDocs = append(q.unlikeDocs, item) + } + return q +} + +// IgnoreLikeItems sets the documents from which the terms should not be selected from. +func (q *MoreLikeThisQuery) IgnoreLikeItems(ignoreDocs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { + q.unlikeDocs = append(q.unlikeDocs, ignoreDocs...) + return q +} + +// Ids sets the document ids to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) Ids(ids ...string) *MoreLikeThisQuery { + for _, id := range ids { + item := NewMoreLikeThisQueryItem().Id(id) + q.docs = append(q.docs, item) + } + return q +} + +// Include specifies whether the input documents should also be included +// in the results returned. Defaults to false. +func (q *MoreLikeThisQuery) Include(include bool) *MoreLikeThisQuery { + q.include = &include + return q +} + +// MinimumShouldMatch sets the number of terms that must match the generated +// query expressed in the common syntax for minimum should match. +// The default value is "30%". +// +// This used to be "PercentTermsToMatch" in Elasticsearch versions before 2.0. +func (q *MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) *MoreLikeThisQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// MinTermFreq is the frequency below which terms will be ignored in the +// source doc. The default frequency is 2. +func (q *MoreLikeThisQuery) MinTermFreq(minTermFreq int) *MoreLikeThisQuery { + q.minTermFreq = &minTermFreq + return q +} + +// MaxQueryTerms sets the maximum number of query terms that will be included +// in any generated query. It defaults to 25. +func (q *MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) *MoreLikeThisQuery { + q.maxQueryTerms = &maxQueryTerms + return q +} + +// MinDocFreq sets the frequency at which words will be ignored which do +// not occur in at least this many docs. The default is 5. +func (q *MoreLikeThisQuery) MinDocFreq(minDocFreq int) *MoreLikeThisQuery { + q.minDocFreq = &minDocFreq + return q +} + +// MaxDocFreq sets the maximum frequency for which words may still appear. +// Words that appear in more than this many docs will be ignored. +// It defaults to unbounded. +func (q *MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) *MoreLikeThisQuery { + q.maxDocFreq = &maxDocFreq + return q +} + +// MinWordLength sets the minimum word length below which words will be +// ignored. It defaults to 0. +func (q *MoreLikeThisQuery) MinWordLen(minWordLen int) *MoreLikeThisQuery { + q.minWordLen = &minWordLen + return q +} + +// MaxWordLen sets the maximum word length above which words will be ignored. +// Defaults to unbounded (0). +func (q *MoreLikeThisQuery) MaxWordLen(maxWordLen int) *MoreLikeThisQuery { + q.maxWordLen = &maxWordLen + return q +} + +// BoostTerms sets the boost factor to use when boosting terms. +// It defaults to 1. +func (q *MoreLikeThisQuery) BoostTerms(boostTerms float64) *MoreLikeThisQuery { + q.boostTerms = &boostTerms + return q +} + +// Analyzer specifies the analyzer that will be use to analyze the text. +// Defaults to the analyzer associated with the field. +func (q *MoreLikeThisQuery) Analyzer(analyzer string) *MoreLikeThisQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost for this query. +func (q *MoreLikeThisQuery) Boost(boost float64) *MoreLikeThisQuery { + q.boost = &boost + return q +} + +// FailOnUnsupportedField indicates whether to fail or return no result +// when this query is run against a field which is not supported such as +// a binary/numeric field. +func (q *MoreLikeThisQuery) FailOnUnsupportedField(fail bool) *MoreLikeThisQuery { + q.failOnUnsupportedField = &fail + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *MoreLikeThisQuery) QueryName(queryName string) *MoreLikeThisQuery { + q.queryName = queryName + return q +} + +// Source creates the source for the MLT query. +// It may return an error if the caller forgot to specify any documents to +// be "liked" in the MoreLikeThisQuery. +func (q *MoreLikeThisQuery) Source() (interface{}, error) { + // { + // "match_all" : { ... } + // } + if len(q.docs) == 0 { + return nil, errors.New(`more_like_this requires some documents to be "liked"`) + } + + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + source["mlt"] = params + + if len(q.fields) > 0 { + params["fields"] = q.fields + } + + var likes []interface{} + for _, doc := range q.docs { + src, err := doc.Source() + if err != nil { + return nil, err + } + likes = append(likes, src) + } + params["like"] = likes + + if len(q.unlikeDocs) > 0 { + var dontLikes []interface{} + for _, doc := range q.unlikeDocs { + src, err := doc.Source() + if err != nil { + return nil, err + } + dontLikes = append(dontLikes, src) + } + params["unlike"] = dontLikes + } + + if q.minimumShouldMatch != "" { + params["minimum_should_match"] = q.minimumShouldMatch + } + if q.minTermFreq != nil { + params["min_term_freq"] = *q.minTermFreq + } + if q.maxQueryTerms != nil { + params["max_query_terms"] = *q.maxQueryTerms + } + if len(q.stopWords) > 0 { + params["stop_words"] = q.stopWords + } + if q.minDocFreq != nil { + params["min_doc_freq"] = *q.minDocFreq + } + if q.maxDocFreq != nil { + params["max_doc_freq"] = *q.maxDocFreq + } + if q.minWordLen != nil { + params["min_word_len"] = *q.minWordLen + } + if q.maxWordLen != nil { + params["max_word_len"] = *q.maxWordLen + } + if q.boostTerms != nil { + params["boost_terms"] = *q.boostTerms + } + if q.boost != nil { + params["boost"] = *q.boost + } + if q.analyzer != "" { + params["analyzer"] = q.analyzer + } + if q.failOnUnsupportedField != nil { + params["fail_on_unsupported_field"] = *q.failOnUnsupportedField + } + if q.queryName != "" { + params["_name"] = q.queryName + } + if q.include != nil { + params["include"] = *q.include + } + + return source, nil +} + +// -- MoreLikeThisQueryItem -- + +// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery +// to be "liked" or "unliked". +type MoreLikeThisQueryItem struct { + likeText string + + index string + typ string + id string + doc interface{} + fields []string + routing string + fsc *FetchSourceContext + version int64 + versionType string +} + +// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem. +func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem { + return &MoreLikeThisQueryItem{ + version: -1, + } +} + +// LikeText represents a text to be "liked". +func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem { + item.likeText = likeText + return item +} + +// Index represents the index of the item. +func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem { + item.index = index + return item +} + +// Type represents the document type of the item. +func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem { + item.typ = typ + return item +} + +// Id represents the document id of the item. +func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem { + item.id = id + return item +} + +// Doc represents a raw document template for the item. +func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem { + item.doc = doc + return item +} + +// Fields represents the list of fields of the item. +func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem { + item.fields = append(item.fields, fields...) + return item +} + +// Routing sets the routing associated with the item. +func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem { + item.routing = routing + return item +} + +// FetchSourceContext represents the fetch source of the item which controls +// if and how _source should be returned. +func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem { + item.fsc = fsc + return item +} + +// Version specifies the version of the item. +func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem { + item.version = version + return item +} + +// VersionType represents the version type of the item. +func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem { + item.versionType = versionType + return item +} + +// Source returns the JSON-serializable fragment of the entity. +func (item *MoreLikeThisQueryItem) Source() (interface{}, error) { + if item.likeText != "" { + return item.likeText, nil + } + + source := make(map[string]interface{}) + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.id != "" { + source["_id"] = item.id + } + if item.doc != nil { + source["doc"] = item.doc + } + if len(item.fields) > 0 { + source["fields"] = item.fields + } + if item.routing != "" { + source["_routing"] = item.routing + } + if item.fsc != nil { + src, err := item.fsc.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + if item.version >= 0 { + source["_version"] = item.version + } + if item.versionType != "" { + source["_version_type"] = item.versionType + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this_test.go new file mode 100644 index 000000000..76691a33b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this_test.go @@ -0,0 +1,93 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestMoreLikeThisQuerySourceWithLikeText(t *testing.T) { + q := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatal(err) + } + got := string(data) + expected := `{"mlt":{"fields":["message"],"like":["Golang topic"]}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMoreLikeThisQuerySourceWithLikeAndUnlikeItems(t *testing.T) { + q := NewMoreLikeThisQuery() + q = q.LikeItems( + NewMoreLikeThisQueryItem().Id("1"), + NewMoreLikeThisQueryItem().Index(testIndexName2).Type("comment").Id("2").Routing("routing_id"), + ) + q = q.IgnoreLikeItems(NewMoreLikeThisQueryItem().Id("3")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatal(err) + } + got := string(data) + expected := `{"mlt":{"like":[{"_id":"1"},{"_id":"2","_index":"elastic-test2","_routing":"routing_id","_type":"comment"}],"unlike":[{"_id":"3"}]}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMoreLikeThisQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another Golang topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Common query + mltq := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message") + res, err := client.Search(). + Index(testIndexName). + Query(mltq). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go new file mode 100644 index 000000000..3337ce59c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go @@ -0,0 +1,275 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// MultiMatchQuery builds on the MatchQuery to allow multi-field queries. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-multi-match-query.html +type MultiMatchQuery struct { + text interface{} + fields []string + fieldBoosts map[string]*float64 + typ string // best_fields, boolean, most_fields, cross_fields, phrase, phrase_prefix + operator string // AND or OR + analyzer string + boost *float64 + slop *int + fuzziness string + prefixLength *int + maxExpansions *int + minimumShouldMatch string + rewrite string + fuzzyRewrite string + tieBreaker *float64 + lenient *bool + cutoffFrequency *float64 + zeroTermsQuery string + queryName string +} + +// MultiMatchQuery creates and initializes a new MultiMatchQuery. +func NewMultiMatchQuery(text interface{}, fields ...string) *MultiMatchQuery { + q := &MultiMatchQuery{ + text: text, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } + q.fields = append(q.fields, fields...) + return q +} + +// Field adds a field to run the multi match against. +func (q *MultiMatchQuery) Field(field string) *MultiMatchQuery { + q.fields = append(q.fields, field) + return q +} + +// FieldWithBoost adds a field to run the multi match against with a specific boost. +func (q *MultiMatchQuery) FieldWithBoost(field string, boost float64) *MultiMatchQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// Type can be "best_fields", "boolean", "most_fields", "cross_fields", +// "phrase", or "phrase_prefix". +func (q *MultiMatchQuery) Type(typ string) *MultiMatchQuery { + var zero = float64(0.0) + var one = float64(1.0) + + switch strings.ToLower(typ) { + default: // best_fields / boolean + q.typ = "best_fields" + q.tieBreaker = &zero + case "most_fields": + q.typ = "most_fields" + q.tieBreaker = &one + case "cross_fields": + q.typ = "cross_fields" + q.tieBreaker = &zero + case "phrase": + q.typ = "phrase" + q.tieBreaker = &zero + case "phrase_prefix": + q.typ = "phrase_prefix" + q.tieBreaker = &zero + } + return q +} + +// Operator sets the operator to use when using boolean query. +// It can be either AND or OR (default). +func (q *MultiMatchQuery) Operator(operator string) *MultiMatchQuery { + q.operator = operator + return q +} + +// Analyzer sets the analyzer to use explicitly. It defaults to use explicit +// mapping config for the field, or, if not set, the default search analyzer. +func (q *MultiMatchQuery) Analyzer(analyzer string) *MultiMatchQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost for this query. +func (q *MultiMatchQuery) Boost(boost float64) *MultiMatchQuery { + q.boost = &boost + return q +} + +// Slop sets the phrase slop if evaluated to a phrase query type. +func (q *MultiMatchQuery) Slop(slop int) *MultiMatchQuery { + q.slop = &slop + return q +} + +// Fuzziness sets the fuzziness used when evaluated to a fuzzy query type. +// It defaults to "AUTO". +func (q *MultiMatchQuery) Fuzziness(fuzziness string) *MultiMatchQuery { + q.fuzziness = fuzziness + return q +} + +// PrefixLength for the fuzzy process. +func (q *MultiMatchQuery) PrefixLength(prefixLength int) *MultiMatchQuery { + q.prefixLength = &prefixLength + return q +} + +// MaxExpansions is the number of term expansions to use when using fuzzy +// or prefix type query. It defaults to unbounded so it's recommended +// to set it to a reasonable value for faster execution. +func (q *MultiMatchQuery) MaxExpansions(maxExpansions int) *MultiMatchQuery { + q.maxExpansions = &maxExpansions + return q +} + +// MinimumShouldMatch represents the minimum number of optional should clauses +// to match. +func (q *MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MultiMatchQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *MultiMatchQuery) Rewrite(rewrite string) *MultiMatchQuery { + q.rewrite = rewrite + return q +} + +func (q *MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) *MultiMatchQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +// TieBreaker for "best-match" disjunction queries (OR queries). +// The tie breaker capability allows documents that match more than one +// query clause (in this case on more than one field) to be scored better +// than documents that match only the best of the fields, without confusing +// this with the better case of two distinct matches in the multiple fields. +// +// A tie-breaker value of 1.0 is interpreted as a signal to score queries as +// "most-match" queries where all matching query clauses are considered for scoring. +func (q *MultiMatchQuery) TieBreaker(tieBreaker float64) *MultiMatchQuery { + q.tieBreaker = &tieBreaker + return q +} + +// Lenient indicates whether format based failures will be ignored. +func (q *MultiMatchQuery) Lenient(lenient bool) *MultiMatchQuery { + q.lenient = &lenient + return q +} + +// CutoffFrequency sets a cutoff value in [0..1] (or absolute number >=1) +// representing the maximum threshold of a terms document frequency to be +// considered a low frequency term. +func (q *MultiMatchQuery) CutoffFrequency(cutoff float64) *MultiMatchQuery { + q.cutoffFrequency = &cutoff + return q +} + +// ZeroTermsQuery can be "all" or "none". +func (q *MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MultiMatchQuery { + q.zeroTermsQuery = zeroTermsQuery + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *MultiMatchQuery) QueryName(queryName string) *MultiMatchQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *MultiMatchQuery) Source() (interface{}, error) { + // + // { + // "multi_match" : { + // "query" : "this is a test", + // "fields" : [ "subject", "message" ] + // } + // } + + source := make(map[string]interface{}) + + multiMatch := make(map[string]interface{}) + source["multi_match"] = multiMatch + + multiMatch["query"] = q.text + + if len(q.fields) > 0 { + var fields []string + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + multiMatch["fields"] = fields + } + + if q.typ != "" { + multiMatch["type"] = q.typ + } + + if q.operator != "" { + multiMatch["operator"] = q.operator + } + if q.analyzer != "" { + multiMatch["analyzer"] = q.analyzer + } + if q.boost != nil { + multiMatch["boost"] = *q.boost + } + if q.slop != nil { + multiMatch["slop"] = *q.slop + } + if q.fuzziness != "" { + multiMatch["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + multiMatch["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + multiMatch["max_expansions"] = *q.maxExpansions + } + if q.minimumShouldMatch != "" { + multiMatch["minimum_should_match"] = q.minimumShouldMatch + } + if q.rewrite != "" { + multiMatch["rewrite"] = q.rewrite + } + if q.fuzzyRewrite != "" { + multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.tieBreaker != nil { + multiMatch["tie_breaker"] = *q.tieBreaker + } + if q.lenient != nil { + multiMatch["lenient"] = *q.lenient + } + if q.cutoffFrequency != nil { + multiMatch["cutoff_frequency"] = *q.cutoffFrequency + } + if q.zeroTermsQuery != "" { + multiMatch["zero_terms_query"] = q.zeroTermsQuery + } + if q.queryName != "" { + multiMatch["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match_test.go new file mode 100644 index 000000000..d897f7e72 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match_test.go @@ -0,0 +1,131 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMultiMatchQuery(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryBestFields(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("best_fields") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"best_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryMostFields(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("most_fields") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":1,"type":"most_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryCrossFields(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("cross_fields") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"cross_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryPhrase(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryPhrasePrefix(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase_prefix") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase_prefix"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryBestFieldsWithCustomTieBreaker(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message"). + Type("best_fields"). + TieBreaker(0.3) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0.3,"type":"best_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go new file mode 100644 index 000000000..3b5655da4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go @@ -0,0 +1,85 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NestedQuery allows to query nested objects / docs. +// The query is executed against the nested objects / docs as if they were +// indexed as separate docs (they are, internally) and resulting in the +// root parent doc (or parent nested mapping). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-nested-query.html +type NestedQuery struct { + query Query + path string + scoreMode string + boost *float64 + queryName string + innerHit *InnerHit +} + +// NewNestedQuery creates and initializes a new NestedQuery. +func NewNestedQuery(path string, query Query) *NestedQuery { + return &NestedQuery{path: path, query: query} +} + +// ScoreMode specifies the score mode. +func (q *NestedQuery) ScoreMode(scoreMode string) *NestedQuery { + q.scoreMode = scoreMode + return q +} + +// Boost sets the boost for this query. +func (q *NestedQuery) Boost(boost float64) *NestedQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *NestedQuery) QueryName(queryName string) *NestedQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this nested query +// and reusing the defined path and query. +func (q *NestedQuery) InnerHit(innerHit *InnerHit) *NestedQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the query. +func (q *NestedQuery) Source() (interface{}, error) { + query := make(map[string]interface{}) + nq := make(map[string]interface{}) + query["nested"] = nq + + src, err := q.query.Source() + if err != nil { + return nil, err + } + nq["query"] = src + + nq["path"] = q.path + + if q.scoreMode != "" { + nq["score_mode"] = q.scoreMode + } + if q.boost != nil { + nq["boost"] = *q.boost + } + if q.queryName != "" { + nq["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + nq["inner_hits"] = src + } + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested_test.go new file mode 100644 index 000000000..af9740553 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested_test.go @@ -0,0 +1,52 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestNestedQuery(t *testing.T) { + bq := NewBoolQuery() + bq = bq.Must(NewTermQuery("obj1.name", "blue")) + bq = bq.Must(NewRangeQuery("obj1.count").Gt(5)) + q := NewNestedQuery("obj1", bq).QueryName("qname") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"nested":{"_name":"qname","path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNestedQueryWithInnerHit(t *testing.T) { + bq := NewBoolQuery() + bq = bq.Must(NewTermQuery("obj1.name", "blue")) + bq = bq.Must(NewRangeQuery("obj1.count").Gt(5)) + q := NewNestedQuery("obj1", bq) + q = q.QueryName("qname") + q = q.InnerHit(NewInnerHit().Name("comments").Query(NewTermQuery("user", "olivere"))) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"nested":{"_name":"qname","inner_hits":{"name":"comments","query":{"term":{"user":"olivere"}}},"path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go new file mode 100644 index 000000000..16f7611ed --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go @@ -0,0 +1,115 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// PercolatorQuery can be used to match queries stored in an index. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html +type PercolatorQuery struct { + field string + documentType string + document interface{} + indexedDocumentIndex string + indexedDocumentType string + indexedDocumentId string + indexedDocumentRouting string + indexedDocumentPreference string + indexedDocumentVersion *int64 +} + +// NewPercolatorQuery creates and initializes a new Percolator query. +func NewPercolatorQuery() *PercolatorQuery { + return &PercolatorQuery{} +} + +func (q *PercolatorQuery) Field(field string) *PercolatorQuery { + q.field = field + return q +} + +func (q *PercolatorQuery) DocumentType(typ string) *PercolatorQuery { + q.documentType = typ + return q +} + +func (q *PercolatorQuery) Document(doc interface{}) *PercolatorQuery { + q.document = doc + return q +} + +func (q *PercolatorQuery) IndexedDocumentIndex(index string) *PercolatorQuery { + q.indexedDocumentIndex = index + return q +} + +func (q *PercolatorQuery) IndexedDocumentType(typ string) *PercolatorQuery { + q.indexedDocumentType = typ + return q +} + +func (q *PercolatorQuery) IndexedDocumentId(id string) *PercolatorQuery { + q.indexedDocumentId = id + return q +} + +func (q *PercolatorQuery) IndexedDocumentRouting(routing string) *PercolatorQuery { + q.indexedDocumentRouting = routing + return q +} + +func (q *PercolatorQuery) IndexedDocumentPreference(preference string) *PercolatorQuery { + q.indexedDocumentPreference = preference + return q +} + +func (q *PercolatorQuery) IndexedDocumentVersion(version int64) *PercolatorQuery { + q.indexedDocumentVersion = &version + return q +} + +// Source returns JSON for the percolate query. +func (q *PercolatorQuery) Source() (interface{}, error) { + if len(q.field) == 0 { + return nil, errors.New("elastic: Field is required in PercolatorQuery") + } + if len(q.documentType) == 0 { + return nil, errors.New("elastic: DocumentType is required in PercolatorQuery") + } + if q.document == nil { + return nil, errors.New("elastic: Document is required in PercolatorQuery") + } + + // { + // "percolate" : { ... } + // } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["percolate"] = params + params["field"] = q.field + params["document_type"] = q.documentType + params["document"] = q.document + if len(q.indexedDocumentIndex) > 0 { + params["index"] = q.indexedDocumentIndex + } + if len(q.indexedDocumentType) > 0 { + params["type"] = q.indexedDocumentType + } + if len(q.indexedDocumentId) > 0 { + params["id"] = q.indexedDocumentId + } + if len(q.indexedDocumentRouting) > 0 { + params["routing"] = q.indexedDocumentRouting + } + if len(q.indexedDocumentPreference) > 0 { + params["preference"] = q.indexedDocumentPreference + } + if q.indexedDocumentVersion != nil { + params["version"] = *q.indexedDocumentVersion + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator_test.go new file mode 100644 index 000000000..8a22d4614 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator_test.go @@ -0,0 +1,68 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPercolatorQuery(t *testing.T) { + q := NewPercolatorQuery(). + Field("query"). + DocumentType("doctype"). + Document(map[string]interface{}{ + "message": "Some message", + }) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percolate":{"document":{"message":"Some message"},"document_type":"doctype","field":"query"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercolatorQueryWithDetails(t *testing.T) { + q := NewPercolatorQuery(). + Field("query"). + DocumentType("doctype"). + Document(map[string]interface{}{ + "message": "Some message", + }). + IndexedDocumentIndex("index"). + IndexedDocumentType("type"). + IndexedDocumentId("1"). + IndexedDocumentRouting("route"). + IndexedDocumentPreference("one"). + IndexedDocumentVersion(1) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percolate":{"document":{"message":"Some message"},"document_type":"doctype","field":"query","id":"1","index":"index","preference":"one","routing":"route","type":"type","version":1}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercolatorQueryWithMissingFields(t *testing.T) { + q := NewPercolatorQuery() // no Field, Document, or Query + _, err := q.Source() + if err == nil { + t.Fatal("expected error, got nil") + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go new file mode 100644 index 000000000..6d2d53b6c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go @@ -0,0 +1,67 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PrefixQuery matches documents that have fields containing terms +// with a specified prefix (not analyzed). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-prefix-query.html +type PrefixQuery struct { + name string + prefix string + boost *float64 + rewrite string + queryName string +} + +// NewPrefixQuery creates and initializes a new PrefixQuery. +func NewPrefixQuery(name string, prefix string) *PrefixQuery { + return &PrefixQuery{name: name, prefix: prefix} +} + +// Boost sets the boost for this query. +func (q *PrefixQuery) Boost(boost float64) *PrefixQuery { + q.boost = &boost + return q +} + +func (q *PrefixQuery) Rewrite(rewrite string) *PrefixQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *PrefixQuery) QueryName(queryName string) *PrefixQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *PrefixQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["prefix"] = query + + if q.boost == nil && q.rewrite == "" && q.queryName == "" { + query[q.name] = q.prefix + } else { + subQuery := make(map[string]interface{}) + subQuery["prefix"] = q.prefix + if q.boost != nil { + subQuery["boost"] = *q.boost + } + if q.rewrite != "" { + subQuery["rewrite"] = q.rewrite + } + if q.queryName != "" { + subQuery["_name"] = q.queryName + } + query[q.name] = subQuery + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix_test.go new file mode 100644 index 000000000..dcd47e2a1 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix_test.go @@ -0,0 +1,45 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPrefixQuery(t *testing.T) { + q := NewPrefixQuery("user", "ki") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"prefix":{"user":"ki"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPrefixQueryWithOptions(t *testing.T) { + q := NewPrefixQuery("user", "ki") + q = q.QueryName("my_query_name") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"prefix":{"user":{"_name":"my_query_name","prefix":"ki"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go new file mode 100644 index 000000000..6f14e29f2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go @@ -0,0 +1,359 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// QueryStringQuery uses the query parser in order to parse its content. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html +type QueryStringQuery struct { + queryString string + defaultField string + defaultOperator string + analyzer string + quoteAnalyzer string + quoteFieldSuffix string + autoGeneratePhraseQueries *bool + allowLeadingWildcard *bool + lowercaseExpandedTerms *bool + enablePositionIncrements *bool + analyzeWildcard *bool + locale string + boost *float64 + fuzziness string + fuzzyPrefixLength *int + fuzzyMaxExpansions *int + fuzzyRewrite string + phraseSlop *int + fields []string + fieldBoosts map[string]*float64 + useDisMax *bool + tieBreaker *float64 + rewrite string + minimumShouldMatch string + lenient *bool + queryName string + timeZone string + maxDeterminizedStates *int + escape *bool +} + +// NewQueryStringQuery creates and initializes a new QueryStringQuery. +func NewQueryStringQuery(queryString string) *QueryStringQuery { + return &QueryStringQuery{ + queryString: queryString, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } +} + +// DefaultField specifies the field to run against when no prefix field +// is specified. Only relevant when not explicitly adding fields the query +// string will run against. +func (q *QueryStringQuery) DefaultField(defaultField string) *QueryStringQuery { + q.defaultField = defaultField + return q +} + +// Field adds a field to run the query string against. +func (q *QueryStringQuery) Field(field string) *QueryStringQuery { + q.fields = append(q.fields, field) + return q +} + +// FieldWithBoost adds a field to run the query string against with a specific boost. +func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStringQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// UseDisMax specifies whether to combine queries using dis max or boolean +// query when more zhan one field is used with the query string. Defaults +// to dismax (true). +func (q *QueryStringQuery) UseDisMax(useDisMax bool) *QueryStringQuery { + q.useDisMax = &useDisMax + return q +} + +// TieBreaker is used when more than one field is used with the query string, +// and combined queries are using dismax. +func (q *QueryStringQuery) TieBreaker(tieBreaker float64) *QueryStringQuery { + q.tieBreaker = &tieBreaker + return q +} + +// DefaultOperator sets the boolean operator of the query parser used to +// parse the query string. +// +// In default mode (OR) terms without any modifiers +// are considered optional, e.g. "capital of Hungary" is equal to +// "capital OR of OR Hungary". +// +// In AND mode, terms are considered to be in conjunction. The above mentioned +// query is then parsed as "capital AND of AND Hungary". +func (q *QueryStringQuery) DefaultOperator(operator string) *QueryStringQuery { + q.defaultOperator = operator + return q +} + +// Analyzer is an optional analyzer used to analyze the query string. +// Note, if a field has search analyzer defined for it, then it will be used +// automatically. Defaults to the smart search analyzer. +func (q *QueryStringQuery) Analyzer(analyzer string) *QueryStringQuery { + q.analyzer = analyzer + return q +} + +// QuoteAnalyzer is an optional analyzer to be used to analyze the query string +// for phrase searches. Note, if a field has search analyzer defined for it, +// then it will be used automatically. Defaults to the smart search analyzer. +func (q *QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) *QueryStringQuery { + q.quoteAnalyzer = quoteAnalyzer + return q +} + +// AutoGeneratePhraseQueries indicates whether or not phrase queries will +// be automatically generated when the analyzer returns more then one term +// from whitespace delimited text. Set to false if phrase queries should only +// be generated when surrounded by double quotes. +func (q *QueryStringQuery) AutoGeneratePhraseQueries(autoGeneratePhraseQueries bool) *QueryStringQuery { + q.autoGeneratePhraseQueries = &autoGeneratePhraseQueries + return q +} + +// MaxDeterminizedState protects against too-difficult regular expression queries. +func (q *QueryStringQuery) MaxDeterminizedState(maxDeterminizedStates int) *QueryStringQuery { + q.maxDeterminizedStates = &maxDeterminizedStates + return q +} + +// AllowLeadingWildcard specifies whether leading wildcards should be allowed +// or not (defaults to true). +func (q *QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) *QueryStringQuery { + q.allowLeadingWildcard = &allowLeadingWildcard + return q +} + +// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy +// and range queries are automatically lower-cased or not. Default is true. +func (q *QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *QueryStringQuery { + q.lowercaseExpandedTerms = &lowercaseExpandedTerms + return q +} + +// EnablePositionIncrements indicates whether to enable position increments +// in result query. Defaults to true. +// +// When set, result phrase and multi-phrase queries will be aware of position +// increments. Useful when e.g. a StopFilter increases the position increment +// of the token that follows an omitted token. +func (q *QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) *QueryStringQuery { + q.enablePositionIncrements = &enablePositionIncrements + return q +} + +// Fuzziness sets the edit distance for fuzzy queries. Default is "AUTO". +func (q *QueryStringQuery) Fuzziness(fuzziness string) *QueryStringQuery { + q.fuzziness = fuzziness + return q +} + +// FuzzyPrefixLength sets the minimum prefix length for fuzzy queries. +// Default is 1. +func (q *QueryStringQuery) FuzzyPrefixLength(fuzzyPrefixLength int) *QueryStringQuery { + q.fuzzyPrefixLength = &fuzzyPrefixLength + return q +} + +func (q *QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) *QueryStringQuery { + q.fuzzyMaxExpansions = &fuzzyMaxExpansions + return q +} + +func (q *QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) *QueryStringQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +// PhraseSlop sets the default slop for phrases. If zero, then exact matches +// are required. Default value is zero. +func (q *QueryStringQuery) PhraseSlop(phraseSlop int) *QueryStringQuery { + q.phraseSlop = &phraseSlop + return q +} + +// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. +func (q *QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *QueryStringQuery { + q.analyzeWildcard = &analyzeWildcard + return q +} + +func (q *QueryStringQuery) Rewrite(rewrite string) *QueryStringQuery { + q.rewrite = rewrite + return q +} + +func (q *QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *QueryStringQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// Boost sets the boost for this query. +func (q *QueryStringQuery) Boost(boost float64) *QueryStringQuery { + q.boost = &boost + return q +} + +// QuoteFieldSuffix is an optional field name suffix to automatically +// try and add to the field searched when using quoted text. +func (q *QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) *QueryStringQuery { + q.quoteFieldSuffix = quoteFieldSuffix + return q +} + +// Lenient indicates whether the query string parser should be lenient +// when parsing field values. It defaults to the index setting and if not +// set, defaults to false. +func (q *QueryStringQuery) Lenient(lenient bool) *QueryStringQuery { + q.lenient = &lenient + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *QueryStringQuery) QueryName(queryName string) *QueryStringQuery { + q.queryName = queryName + return q +} + +func (q *QueryStringQuery) Locale(locale string) *QueryStringQuery { + q.locale = locale + return q +} + +// TimeZone can be used to automatically adjust to/from fields using a +// timezone. Only used with date fields, of course. +func (q *QueryStringQuery) TimeZone(timeZone string) *QueryStringQuery { + q.timeZone = timeZone + return q +} + +// Escape performs escaping of the query string. +func (q *QueryStringQuery) Escape(escape bool) *QueryStringQuery { + q.escape = &escape + return q +} + +// Source returns JSON for the query. +func (q *QueryStringQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["query_string"] = query + + query["query"] = q.queryString + + if q.defaultField != "" { + query["default_field"] = q.defaultField + } + + if len(q.fields) > 0 { + var fields []string + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + query["fields"] = fields + } + + if q.tieBreaker != nil { + query["tie_breaker"] = *q.tieBreaker + } + if q.useDisMax != nil { + query["use_dis_max"] = *q.useDisMax + } + if q.defaultOperator != "" { + query["default_operator"] = q.defaultOperator + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.quoteAnalyzer != "" { + query["quote_analyzer"] = q.quoteAnalyzer + } + if q.autoGeneratePhraseQueries != nil { + query["auto_generate_phrase_queries"] = *q.autoGeneratePhraseQueries + } + if q.maxDeterminizedStates != nil { + query["max_determinized_states"] = *q.maxDeterminizedStates + } + if q.allowLeadingWildcard != nil { + query["allow_leading_wildcard"] = *q.allowLeadingWildcard + } + if q.lowercaseExpandedTerms != nil { + query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms + } + if q.enablePositionIncrements != nil { + query["enable_position_increments"] = *q.enablePositionIncrements + } + if q.fuzziness != "" { + query["fuzziness"] = q.fuzziness + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.fuzzyPrefixLength != nil { + query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength + } + if q.fuzzyMaxExpansions != nil { + query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions + } + if q.fuzzyRewrite != "" { + query["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.phraseSlop != nil { + query["phrase_slop"] = *q.phraseSlop + } + if q.analyzeWildcard != nil { + query["analyze_wildcard"] = *q.analyzeWildcard + } + if q.rewrite != "" { + query["rewrite"] = q.rewrite + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.quoteFieldSuffix != "" { + query["quote_field_suffix"] = q.quoteFieldSuffix + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.locale != "" { + query["locale"] = q.locale + } + if q.timeZone != "" { + query["time_zone"] = q.timeZone + } + if q.escape != nil { + query["escape"] = *q.escape + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string_test.go new file mode 100644 index 000000000..5030c3382 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestQueryStringQuery(t *testing.T) { + q := NewQueryStringQuery(`this AND that OR thus`) + q = q.DefaultField("content") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query_string":{"default_field":"content","query":"this AND that OR thus"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestQueryStringQueryTimeZone(t *testing.T) { + q := NewQueryStringQuery(`tweet_date:[2015-01-01 TO 2017-12-31]`) + q = q.TimeZone("Europe/Berlin") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query_string":{"query":"tweet_date:[2015-01-01 TO 2017-12-31]","time_zone":"Europe/Berlin"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go new file mode 100644 index 000000000..e519d5ac2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go @@ -0,0 +1,144 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// RangeQuery matches documents with fields that have terms within a certain range. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html +type RangeQuery struct { + name string + from interface{} + to interface{} + timeZone string + includeLower bool + includeUpper bool + boost *float64 + queryName string + format string +} + +// NewRangeQuery creates and initializes a new RangeQuery. +func NewRangeQuery(name string) *RangeQuery { + return &RangeQuery{name: name, includeLower: true, includeUpper: true} +} + +// From indicates the from part of the RangeQuery. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) From(from interface{}) *RangeQuery { + q.from = from + return q +} + +// Gt indicates a greater-than value for the from part. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) Gt(from interface{}) *RangeQuery { + q.from = from + q.includeLower = false + return q +} + +// Gte indicates a greater-than-or-equal value for the from part. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) Gte(from interface{}) *RangeQuery { + q.from = from + q.includeLower = true + return q +} + +// To indicates the to part of the RangeQuery. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) To(to interface{}) *RangeQuery { + q.to = to + return q +} + +// Lt indicates a less-than value for the to part. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) Lt(to interface{}) *RangeQuery { + q.to = to + q.includeUpper = false + return q +} + +// Lte indicates a less-than-or-equal value for the to part. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) Lte(to interface{}) *RangeQuery { + q.to = to + q.includeUpper = true + return q +} + +// IncludeLower indicates whether the lower bound should be included or not. +// Defaults to true. +func (q *RangeQuery) IncludeLower(includeLower bool) *RangeQuery { + q.includeLower = includeLower + return q +} + +// IncludeUpper indicates whether the upper bound should be included or not. +// Defaults to true. +func (q *RangeQuery) IncludeUpper(includeUpper bool) *RangeQuery { + q.includeUpper = includeUpper + return q +} + +// Boost sets the boost for this query. +func (q *RangeQuery) Boost(boost float64) *RangeQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *RangeQuery) QueryName(queryName string) *RangeQuery { + q.queryName = queryName + return q +} + +// TimeZone is used for date fields. In that case, we can adjust the +// from/to fields using a timezone. +func (q *RangeQuery) TimeZone(timeZone string) *RangeQuery { + q.timeZone = timeZone + return q +} + +// Format is used for date fields. In that case, we can set the format +// to be used instead of the mapper format. +func (q *RangeQuery) Format(format string) *RangeQuery { + q.format = format + return q +} + +// Source returns JSON for the query. +func (q *RangeQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + rangeQ := make(map[string]interface{}) + source["range"] = rangeQ + + params := make(map[string]interface{}) + rangeQ[q.name] = params + + params["from"] = q.from + params["to"] = q.to + if q.timeZone != "" { + params["time_zone"] = q.timeZone + } + if q.format != "" { + params["format"] = q.format + } + if q.boost != nil { + params["boost"] = *q.boost + } + params["include_lower"] = q.includeLower + params["include_upper"] = q.includeUpper + + if q.queryName != "" { + rangeQ["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_range_test.go similarity index 52% rename from vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query_test.go rename to vendor/gopkg.in/olivere/elastic.v5/search_queries_range_test.go index 8f21ef9f0..86d018a86 100644 --- a/vendor/gopkg.in/olivere/elastic.v3/search_queries_template_query_test.go +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_range_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Copyright 2012-present Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. @@ -9,8 +9,9 @@ import ( "testing" ) -func TestTemplateQueryInlineTest(t *testing.T) { - q := NewTemplateQuery("\"match_{{template}}\": {}}\"").Vars(map[string]interface{}{"template": "all"}) +func TestRangeQuery(t *testing.T) { + q := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01").Boost(3) + q = q.QueryName("my_query") src, err := q.Source() if err != nil { t.Fatal(err) @@ -20,16 +21,17 @@ func TestTemplateQueryInlineTest(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"template":{"params":{"template":"all"},"query":"\"match_{{template}}\": {}}\""}}` + expected := `{"range":{"_name":"my_query","postDate":{"boost":3,"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } } -func TestTemplateQueryIndexedTest(t *testing.T) { - q := NewTemplateQuery("indexedTemplate"). - TemplateType("id"). - Vars(map[string]interface{}{"template": "all"}) +func TestRangeQueryWithTimeZone(t *testing.T) { + q := NewRangeQuery("born"). + Gte("2012-01-01"). + Lte("now"). + TimeZone("+1:00") src, err := q.Source() if err != nil { t.Fatal(err) @@ -39,16 +41,17 @@ func TestTemplateQueryIndexedTest(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"template":{"id":"indexedTemplate","params":{"template":"all"}}}` + expected := `{"range":{"born":{"from":"2012-01-01","include_lower":true,"include_upper":true,"time_zone":"+1:00","to":"now"}}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } } -func TestTemplateQueryFileTest(t *testing.T) { - q := NewTemplateQuery("storedTemplate"). - TemplateType("file"). - Vars(map[string]interface{}{"template": "all"}) +func TestRangeQueryWithFormat(t *testing.T) { + q := NewRangeQuery("born"). + Gte("2012/01/01"). + Lte("now"). + Format("yyyy/MM/dd") src, err := q.Source() if err != nil { t.Fatal(err) @@ -58,7 +61,7 @@ func TestTemplateQueryFileTest(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"template":{"file":"storedTemplate","params":{"template":"all"}}}` + expected := `{"range":{"born":{"format":"yyyy/MM/dd","from":"2012/01/01","include_lower":true,"include_upper":true,"to":"now"}}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string.go new file mode 100644 index 000000000..3f9685c41 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string.go @@ -0,0 +1,26 @@ +// Copyright 2012-present Oliver Eilhard, John Stanford. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "encoding/json" + +// RawStringQuery can be used to treat a string representation of an ES query +// as a Query. Example usage: +// q := RawStringQuery("{\"match_all\":{}}") +// db.Search().Query(q).From(1).Size(100).Do() +type RawStringQuery string + +// NewRawStringQuery ininitializes a new RawStringQuery. +// It is the same as RawStringQuery(q). +func NewRawStringQuery(q string) RawStringQuery { + return RawStringQuery(q) +} + +// Source returns the JSON encoded body +func (q RawStringQuery) Source() (interface{}, error) { + var f interface{} + err := json.Unmarshal([]byte(q), &f) + return f, err +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string_test.go new file mode 100644 index 000000000..5bb3dac41 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestRawStringQuery(t *testing.T) { + q := RawStringQuery(`{"match_all":{}}`) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match_all":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNewRawStringQuery(t *testing.T) { + q := NewRawStringQuery(`{"match_all":{}}`) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match_all":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go new file mode 100644 index 000000000..b09033519 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go @@ -0,0 +1,82 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// RegexpQuery allows you to use regular expression term queries. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html +type RegexpQuery struct { + name string + regexp string + flags string + boost *float64 + rewrite string + queryName string + maxDeterminizedStates *int +} + +// NewRegexpQuery creates and initializes a new RegexpQuery. +func NewRegexpQuery(name string, regexp string) *RegexpQuery { + return &RegexpQuery{name: name, regexp: regexp} +} + +// Flags sets the regexp flags. +func (q *RegexpQuery) Flags(flags string) *RegexpQuery { + q.flags = flags + return q +} + +// MaxDeterminizedStates protects against complex regular expressions. +func (q *RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) *RegexpQuery { + q.maxDeterminizedStates = &maxDeterminizedStates + return q +} + +// Boost sets the boost for this query. +func (q *RegexpQuery) Boost(boost float64) *RegexpQuery { + q.boost = &boost + return q +} + +func (q *RegexpQuery) Rewrite(rewrite string) *RegexpQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *RegexpQuery) QueryName(queryName string) *RegexpQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON-serializable query data. +func (q *RegexpQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["regexp"] = query + + x := make(map[string]interface{}) + x["value"] = q.regexp + if q.flags != "" { + x["flags"] = q.flags + } + if q.maxDeterminizedStates != nil { + x["max_determinized_states"] = *q.maxDeterminizedStates + } + if q.boost != nil { + x["boost"] = *q.boost + } + if q.rewrite != "" { + x["rewrite"] = q.rewrite + } + if q.queryName != "" { + x["name"] = q.queryName + } + query[q.name] = x + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp_test.go new file mode 100644 index 000000000..d30c0a36d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp_test.go @@ -0,0 +1,47 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestRegexpQuery(t *testing.T) { + q := NewRegexpQuery("name.first", "s.*y") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"regexp":{"name.first":{"value":"s.*y"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRegexpQueryWithOptions(t *testing.T) { + q := NewRegexpQuery("name.first", "s.*y"). + Boost(1.2). + Flags("INTERSECTION|COMPLEMENT|EMPTY"). + QueryName("my_query_name") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"regexp":{"name.first":{"boost":1.2,"flags":"INTERSECTION|COMPLEMENT|EMPTY","name":"my_query_name","value":"s.*y"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go new file mode 100644 index 000000000..9086bcb1b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go @@ -0,0 +1,51 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// ScriptQuery allows to define scripts as filters. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-query.html +type ScriptQuery struct { + script *Script + queryName string +} + +// NewScriptQuery creates and initializes a new ScriptQuery. +func NewScriptQuery(script *Script) *ScriptQuery { + return &ScriptQuery{ + script: script, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *ScriptQuery) QueryName(queryName string) *ScriptQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *ScriptQuery) Source() (interface{}, error) { + if q.script == nil { + return nil, errors.New("ScriptQuery expected a script") + } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["script"] = params + + src, err := q.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_script_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_script_test.go new file mode 100644 index 000000000..8bf9f8a11 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_script_test.go @@ -0,0 +1,45 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestScriptQuery(t *testing.T) { + q := NewScriptQuery(NewScript("doc['num1'.value > 1")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"script":{"script":"doc['num1'.value \u003e 1"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptQueryWithParams(t *testing.T) { + q := NewScriptQuery(NewScript("doc['num1'.value > 1")) + q = q.QueryName("MyQueryName") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"script":{"_name":"MyQueryName","script":"doc['num1'.value \u003e 1"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go new file mode 100644 index 000000000..203c35020 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go @@ -0,0 +1,185 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// SimpleQueryStringQuery is a query that uses the SimpleQueryParser +// to parse its context. Unlike the regular query_string query, +// the simple_query_string query will never throw an exception, +// and discards invalid parts of the query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html +type SimpleQueryStringQuery struct { + queryText string + analyzer string + operator string + fields []string + fieldBoosts map[string]*float64 + minimumShouldMatch string + flags string + boost *float64 + lowercaseExpandedTerms *bool + lenient *bool + analyzeWildcard *bool + locale string + queryName string +} + +// NewSimpleQueryStringQuery creates and initializes a new SimpleQueryStringQuery. +func NewSimpleQueryStringQuery(text string) *SimpleQueryStringQuery { + return &SimpleQueryStringQuery{ + queryText: text, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } +} + +// Field adds a field to run the query against. +func (q *SimpleQueryStringQuery) Field(field string) *SimpleQueryStringQuery { + q.fields = append(q.fields, field) + return q +} + +// Field adds a field to run the query against with a specific boost. +func (q *SimpleQueryStringQuery) FieldWithBoost(field string, boost float64) *SimpleQueryStringQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// Boost sets the boost for this query. +func (q *SimpleQueryStringQuery) Boost(boost float64) *SimpleQueryStringQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *SimpleQueryStringQuery) QueryName(queryName string) *SimpleQueryStringQuery { + q.queryName = queryName + return q +} + +// Analyzer specifies the analyzer to use for the query. +func (q *SimpleQueryStringQuery) Analyzer(analyzer string) *SimpleQueryStringQuery { + q.analyzer = analyzer + return q +} + +// DefaultOperator specifies the default operator for the query. +func (q *SimpleQueryStringQuery) DefaultOperator(defaultOperator string) *SimpleQueryStringQuery { + q.operator = defaultOperator + return q +} + +// Flags sets the flags for the query. +func (q *SimpleQueryStringQuery) Flags(flags string) *SimpleQueryStringQuery { + q.flags = flags + return q +} + +// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy +// and range queries are automatically lower-cased or not. Default is true. +func (q *SimpleQueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *SimpleQueryStringQuery { + q.lowercaseExpandedTerms = &lowercaseExpandedTerms + return q +} + +func (q *SimpleQueryStringQuery) Locale(locale string) *SimpleQueryStringQuery { + q.locale = locale + return q +} + +// Lenient indicates whether the query string parser should be lenient +// when parsing field values. It defaults to the index setting and if not +// set, defaults to false. +func (q *SimpleQueryStringQuery) Lenient(lenient bool) *SimpleQueryStringQuery { + q.lenient = &lenient + return q +} + +// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. +func (q *SimpleQueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *SimpleQueryStringQuery { + q.analyzeWildcard = &analyzeWildcard + return q +} + +func (q *SimpleQueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *SimpleQueryStringQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// Source returns JSON for the query. +func (q *SimpleQueryStringQuery) Source() (interface{}, error) { + // { + // "simple_query_string" : { + // "query" : "\"fried eggs\" +(eggplant | potato) -frittata", + // "analyzer" : "snowball", + // "fields" : ["body^5","_all"], + // "default_operator" : "and" + // } + // } + + source := make(map[string]interface{}) + + query := make(map[string]interface{}) + source["simple_query_string"] = query + + query["query"] = q.queryText + + if len(q.fields) > 0 { + var fields []string + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + query["fields"] = fields + } + + if q.flags != "" { + query["flags"] = q.flags + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.operator != "" { + query["default_operator"] = strings.ToLower(q.operator) + } + if q.lowercaseExpandedTerms != nil { + query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.analyzeWildcard != nil { + query["analyze_wildcard"] = *q.analyzeWildcard + } + if q.locale != "" { + query["locale"] = q.locale + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.boost != nil { + query["boost"] = *q.boost + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string_test.go new file mode 100644 index 000000000..6d4fe52a2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string_test.go @@ -0,0 +1,88 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestSimpleQueryStringQuery(t *testing.T) { + q := NewSimpleQueryStringQuery(`"fried eggs" +(eggplant | potato) -frittata`) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"simple_query_string":{"query":"\"fried eggs\" +(eggplant | potato) -frittata"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSimpleQueryStringQueryExec(t *testing.T) { + // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + searchResult, err := client.Search(). + Index(testIndexName). + Query(NewSimpleQueryStringQuery("+Golang +Elasticsearch")). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 1 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 1 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go new file mode 100644 index 000000000..c809959a8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go @@ -0,0 +1,58 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermQuery finds documents that contain the exact term specified +// in the inverted index. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-term-query.html +type TermQuery struct { + name string + value interface{} + boost *float64 + queryName string +} + +// NewTermQuery creates and initializes a new TermQuery. +func NewTermQuery(name string, value interface{}) *TermQuery { + return &TermQuery{name: name, value: value} +} + +// Boost sets the boost for this query. +func (q *TermQuery) Boost(boost float64) *TermQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermQuery) QueryName(queryName string) *TermQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *TermQuery) Source() (interface{}, error) { + // {"term":{"name":"value"}} + source := make(map[string]interface{}) + tq := make(map[string]interface{}) + source["term"] = tq + + if q.boost == nil && q.queryName == "" { + tq[q.name] = q.value + } else { + subQ := make(map[string]interface{}) + subQ["value"] = q.value + if q.boost != nil { + subQ["boost"] = *q.boost + } + if q.queryName != "" { + subQ["_name"] = q.queryName + } + tq[q.name] = subQ + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_term_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_term_test.go new file mode 100644 index 000000000..f800fa954 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_term_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermQuery(t *testing.T) { + q := NewTermQuery("user", "ki") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"term":{"user":"ki"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermQueryWithOptions(t *testing.T) { + q := NewTermQuery("user", "ki") + q = q.Boost(2.79) + q = q.QueryName("my_tq") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"term":{"user":{"_name":"my_tq","boost":2.79,"value":"ki"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go new file mode 100644 index 000000000..c95ea9307 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go @@ -0,0 +1,58 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsQuery filters documents that have fields that match any +// of the provided terms (not analyzed). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-terms-query.html +type TermsQuery struct { + name string + values []interface{} + queryName string + boost *float64 +} + +// NewTermsQuery creates and initializes a new TermsQuery. +func NewTermsQuery(name string, values ...interface{}) *TermsQuery { + q := &TermsQuery{ + name: name, + values: make([]interface{}, 0), + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +// Boost sets the boost for this query. +func (q *TermsQuery) Boost(boost float64) *TermsQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermsQuery) QueryName(queryName string) *TermsQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the term query. +func (q *TermsQuery) Source() (interface{}, error) { + // {"terms":{"name":["value1","value2"]}} + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["terms"] = params + params[q.name] = q.values + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_test.go new file mode 100644 index 000000000..8818de213 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermsQuery(t *testing.T) { + q := NewTermsQuery("user", "ki") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"user":["ki"]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermQuerysWithOptions(t *testing.T) { + q := NewTermsQuery("user", "ki", "ko") + q = q.Boost(2.79) + q = q.QueryName("my_tq") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"_name":"my_tq","boost":2.79,"user":["ki","ko"]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go new file mode 100644 index 000000000..7356c8e34 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go @@ -0,0 +1,26 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TypeQuery filters documents matching the provided document / mapping type. +// +// For details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html +type TypeQuery struct { + typ string +} + +func NewTypeQuery(typ string) *TypeQuery { + return &TypeQuery{typ: typ} +} + +// Source returns JSON for the query. +func (q *TypeQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["type"] = params + params["value"] = q.typ + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_type_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_type_test.go new file mode 100644 index 000000000..176b82abb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_type_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTypeQuery(t *testing.T) { + q := NewTypeQuery("my_type") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"type":{"value":"my_type"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go new file mode 100644 index 000000000..44e594675 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go @@ -0,0 +1,81 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// WildcardQuery matches documents that have fields matching a wildcard +// expression (not analyzed). Supported wildcards are *, which matches +// any character sequence (including the empty one), and ?, which matches +// any single character. Note this query can be slow, as it needs to iterate +// over many terms. In order to prevent extremely slow wildcard queries, +// a wildcard term should not start with one of the wildcards * or ?. +// The wildcard query maps to Lucene WildcardQuery. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html +type WildcardQuery struct { + name string + wildcard string + boost *float64 + rewrite string + queryName string +} + +// NewWildcardQuery creates and initializes a new WildcardQuery. +func NewWildcardQuery(name, wildcard string) *WildcardQuery { + return &WildcardQuery{ + name: name, + wildcard: wildcard, + } +} + +// Boost sets the boost for this query. +func (q *WildcardQuery) Boost(boost float64) *WildcardQuery { + q.boost = &boost + return q +} + +func (q *WildcardQuery) Rewrite(rewrite string) *WildcardQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the name of this query. +func (q *WildcardQuery) QueryName(queryName string) *WildcardQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable body of this query. +func (q *WildcardQuery) Source() (interface{}, error) { + // { + // "wildcard" : { + // "user" : { + // "wildcard" : "ki*y", + // "boost" : 1.0 + // } + // } + + source := make(map[string]interface{}) + + query := make(map[string]interface{}) + source["wildcard"] = query + + wq := make(map[string]interface{}) + query[q.name] = wq + + wq["wildcard"] = q.wildcard + + if q.boost != nil { + wq["boost"] = *q.boost + } + if q.rewrite != "" { + wq["rewrite"] = q.rewrite + } + if q.queryName != "" { + wq["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard_test.go new file mode 100644 index 000000000..658c513cc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard_test.go @@ -0,0 +1,68 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic_test + +import ( + "context" + "encoding/json" + "testing" + + "gopkg.in/olivere/elastic.v5" +) + +func ExampleWildcardQuery() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + + // Define wildcard query + q := elastic.NewWildcardQuery("user", "oli*er?").Boost(1.2) + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(q). // use wildcard query defined above + Do(context.TODO()) // execute + if err != nil { + // Handle error + panic(err) + } + _ = searchResult +} + +func TestWildcardQuery(t *testing.T) { + q := elastic.NewWildcardQuery("user", "ki*y??") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"wildcard":{"user":{"wildcard":"ki*y??"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestWildcardQueryWithBoost(t *testing.T) { + q := elastic.NewWildcardQuery("user", "ki*y??").Boost(1.2) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"wildcard":{"user":{"boost":1.2,"wildcard":"ki*y??"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_request.go b/vendor/gopkg.in/olivere/elastic.v5/search_request.go new file mode 100644 index 000000000..580a26313 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_request.go @@ -0,0 +1,178 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// SearchRequest combines a search request and its +// query details (see SearchSource). +// It is used in combination with MultiSearch. +type SearchRequest struct { + searchType string // default in ES is "query_then_fetch" + indices []string + types []string + routing *string + preference *string + requestCache *bool + scroll string + source interface{} +} + +// NewSearchRequest creates a new search request. +func NewSearchRequest() *SearchRequest { + return &SearchRequest{ + indices: make([]string, 0), + types: make([]string, 0), + } +} + +// SearchRequest must be one of "query_then_fetch", "query_and_fetch", +// "scan", "count", "dfs_query_then_fetch", or "dfs_query_and_fetch". +// Use one of the constants defined via SearchType. +func (r *SearchRequest) SearchType(searchType string) *SearchRequest { + r.searchType = searchType + return r +} + +func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest { + return r.SearchType("dfs_query_then_fetch") +} + +func (r *SearchRequest) SearchTypeDfsQueryAndFetch() *SearchRequest { + return r.SearchType("dfs_query_and_fetch") +} + +func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest { + return r.SearchType("query_then_fetch") +} + +func (r *SearchRequest) SearchTypeQueryAndFetch() *SearchRequest { + return r.SearchType("query_and_fetch") +} + +func (r *SearchRequest) SearchTypeScan() *SearchRequest { + return r.SearchType("scan") +} + +func (r *SearchRequest) SearchTypeCount() *SearchRequest { + return r.SearchType("count") +} + +func (r *SearchRequest) Index(indices ...string) *SearchRequest { + r.indices = append(r.indices, indices...) + return r +} + +func (r *SearchRequest) HasIndices() bool { + return len(r.indices) > 0 +} + +func (r *SearchRequest) Type(types ...string) *SearchRequest { + r.types = append(r.types, types...) + return r +} + +func (r *SearchRequest) Routing(routing string) *SearchRequest { + r.routing = &routing + return r +} + +func (r *SearchRequest) Routings(routings ...string) *SearchRequest { + if routings != nil { + routings := strings.Join(routings, ",") + r.routing = &routings + } else { + r.routing = nil + } + return r +} + +func (r *SearchRequest) Preference(preference string) *SearchRequest { + r.preference = &preference + return r +} + +func (r *SearchRequest) RequestCache(requestCache bool) *SearchRequest { + r.requestCache = &requestCache + return r +} + +func (r *SearchRequest) Scroll(scroll string) *SearchRequest { + r.scroll = scroll + return r +} + +func (r *SearchRequest) SearchSource(searchSource *SearchSource) *SearchRequest { + return r.Source(searchSource) +} + +func (r *SearchRequest) Source(source interface{}) *SearchRequest { + switch v := source.(type) { + case *SearchSource: + src, err := v.Source() + if err != nil { + // Do not do anything in case of an error + return r + } + r.source = src + default: + r.source = source + } + return r +} + +// header is used e.g. by MultiSearch to get information about the search header +// of one SearchRequest. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html +func (r *SearchRequest) header() interface{} { + h := make(map[string]interface{}) + if r.searchType != "" { + h["search_type"] = r.searchType + } + + switch len(r.indices) { + case 0: + case 1: + h["index"] = r.indices[0] + default: + h["indices"] = r.indices + } + + switch len(r.types) { + case 0: + case 1: + h["type"] = r.types[0] + default: + h["types"] = r.types + } + + if r.routing != nil && *r.routing != "" { + h["routing"] = *r.routing + } + + if r.preference != nil && *r.preference != "" { + h["preference"] = *r.preference + } + + if r.requestCache != nil { + h["request_cache"] = fmt.Sprintf("%v", *r.requestCache) + } + + if r.scroll != "" { + h["scroll"] = r.scroll + } + + return h +} + +// body is used by MultiSearch to get information about the search body +// of one SearchRequest. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html +func (r *SearchRequest) body() interface{} { + return r.source +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_request_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_request_test.go new file mode 100644 index 000000000..c64a44cb6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_request_test.go @@ -0,0 +1,48 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestSearchRequestIndex(t *testing.T) { + builder := NewSearchRequest().Index("test") + data, err := json.Marshal(builder.header()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"index":"test"}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchRequestIndices(t *testing.T) { + builder := NewSearchRequest().Index("test", "test2") + data, err := json.Marshal(builder.header()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices":["test","test2"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchRequestHasIndices(t *testing.T) { + builder := NewSearchRequest() + if builder.HasIndices() { + t.Errorf("expected HasIndices to return true; got %v", builder.HasIndices()) + } + builder = builder.Index("test", "test2") + if !builder.HasIndices() { + t.Errorf("expected HasIndices to return false; got %v", builder.HasIndices()) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_source.go b/vendor/gopkg.in/olivere/elastic.v5/search_source.go new file mode 100644 index 000000000..a162af8be --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_source.go @@ -0,0 +1,488 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// SearchSource enables users to build the search source. +// It resembles the SearchSourceBuilder in Elasticsearch. +type SearchSource struct { + query Query + postQuery Query + from int + size int + explain *bool + version *bool + sorters []Sorter + trackScores bool + minScore *float64 + timeout string + terminateAfter *int + storedFieldNames []string + docvalueFields []string + scriptFields []*ScriptField + fetchSourceContext *FetchSourceContext + aggregations map[string]Aggregation + highlight *Highlight + globalSuggestText string + suggesters []Suggester + rescores []*Rescore + defaultRescoreWindowSize *int + indexBoosts map[string]float64 + stats []string + innerHits map[string]*InnerHit +} + +// NewSearchSource initializes a new SearchSource. +func NewSearchSource() *SearchSource { + return &SearchSource{ + from: -1, + size: -1, + trackScores: false, + aggregations: make(map[string]Aggregation), + indexBoosts: make(map[string]float64), + innerHits: make(map[string]*InnerHit), + } +} + +// Query sets the query to use with this search source. +func (s *SearchSource) Query(query Query) *SearchSource { + s.query = query + return s +} + +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. +func (s *SearchSource) PostFilter(postFilter Query) *SearchSource { + s.postQuery = postFilter + return s +} + +// From index to start the search from. Defaults to 0. +func (s *SearchSource) From(from int) *SearchSource { + s.from = from + return s +} + +// Size is the number of search hits to return. Defaults to 10. +func (s *SearchSource) Size(size int) *SearchSource { + s.size = size + return s +} + +// MinScore sets the minimum score below which docs will be filtered out. +func (s *SearchSource) MinScore(minScore float64) *SearchSource { + s.minScore = &minScore + return s +} + +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). +func (s *SearchSource) Explain(explain bool) *SearchSource { + s.explain = &explain + return s +} + +// Version indicates whether each search hit should be returned with +// a version associated to it. +func (s *SearchSource) Version(version bool) *SearchSource { + s.version = &version + return s +} + +// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms". +func (s *SearchSource) Timeout(timeout string) *SearchSource { + s.timeout = timeout + return s +} + +// TimeoutInMillis controls how many milliseconds a search is allowed +// to take before it is canceled. +func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) + return s +} + +// TerminateAfter allows the request to stop after the given number +// of search hits are collected. +func (s *SearchSource) TerminateAfter(terminateAfter int) *SearchSource { + s.terminateAfter = &terminateAfter + return s +} + +// Sort adds a sort order. +func (s *SearchSource) Sort(field string, ascending bool) *SearchSource { + s.sorters = append(s.sorters, SortInfo{Field: field, Ascending: ascending}) + return s +} + +// SortWithInfo adds a sort order. +func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource { + s.sorters = append(s.sorters, info) + return s +} + +// SortBy adds a sort order. +func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource { + s.sorters = append(s.sorters, sorter...) + return s +} + +func (s *SearchSource) hasSort() bool { + return len(s.sorters) > 0 +} + +// TrackScores is applied when sorting and controls if scores will be +// tracked as well. Defaults to false. +func (s *SearchSource) TrackScores(trackScores bool) *SearchSource { + s.trackScores = trackScores + return s +} + +// Aggregation adds an aggreation to perform as part of the search. +func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource { + s.aggregations[name] = aggregation + return s +} + +// DefaultRescoreWindowSize sets the rescore window size for rescores +// that don't specify their window. +func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource { + s.defaultRescoreWindowSize = &defaultRescoreWindowSize + return s +} + +// Highlight adds highlighting to the search. +func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource { + s.highlight = highlight + return s +} + +// Highlighter returns the highlighter. +func (s *SearchSource) Highlighter() *Highlight { + if s.highlight == nil { + s.highlight = NewHighlight() + } + return s.highlight +} + +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. +func (s *SearchSource) GlobalSuggestText(text string) *SearchSource { + s.globalSuggestText = text + return s +} + +// Suggester adds a suggester to the search. +func (s *SearchSource) Suggester(suggester Suggester) *SearchSource { + s.suggesters = append(s.suggesters, suggester) + return s +} + +// Rescorer adds a rescorer to the search. +func (s *SearchSource) Rescorer(rescore *Rescore) *SearchSource { + s.rescores = append(s.rescores, rescore) + return s +} + +// ClearRescorers removes all rescorers from the search. +func (s *SearchSource) ClearRescorers() *SearchSource { + s.rescores = make([]*Rescore, 0) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource { + if s.fetchSourceContext == nil { + s.fetchSourceContext = NewFetchSourceContext(fetchSource) + } else { + s.fetchSourceContext.SetFetchSource(fetchSource) + } + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource { + s.fetchSourceContext = fetchSourceContext + return s +} + +// NoStoredFields indicates that no fields should be loaded, resulting in only +// id and type to be returned per field. +func (s *SearchSource) NoStoredFields() *SearchSource { + s.storedFieldNames = nil + return s +} + +// StoredField adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. +func (s *SearchSource) StoredField(storedFieldName string) *SearchSource { + s.storedFieldNames = append(s.storedFieldNames, storedFieldName) + return s +} + +// StoredFields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. +func (s *SearchSource) StoredFields(storedFieldNames ...string) *SearchSource { + s.storedFieldNames = append(s.storedFieldNames, storedFieldNames...) + return s +} + +// DocvalueField adds a single field to load from the field data cache +// and return as part of the search request. +func (s *SearchSource) DocvalueField(fieldDataField string) *SearchSource { + s.docvalueFields = append(s.docvalueFields, fieldDataField) + return s +} + +// DocvalueFields adds one or more fields to load from the field data cache +// and return as part of the search request. +func (s *SearchSource) DocvalueFields(docvalueFields ...string) *SearchSource { + s.docvalueFields = append(s.docvalueFields, docvalueFields...) + return s +} + +// ScriptField adds a single script field with the provided script. +func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource { + s.scriptFields = append(s.scriptFields, scriptField) + return s +} + +// ScriptFields adds one or more script fields with the provided scripts. +func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource { + s.scriptFields = append(s.scriptFields, scriptFields...) + return s +} + +// IndexBoost sets the boost that a specific index will receive when the +// query is executed against it. +func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource { + s.indexBoosts[index] = boost + return s +} + +// Stats group this request will be aggregated under. +func (s *SearchSource) Stats(statsGroup ...string) *SearchSource { + s.stats = append(s.stats, statsGroup...) + return s +} + +// InnerHit adds an inner hit to return with the result. +func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource { + s.innerHits[name] = innerHit + return s +} + +// Source returns the serializable JSON for the source builder. +func (s *SearchSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if s.from != -1 { + source["from"] = s.from + } + if s.size != -1 { + source["size"] = s.size + } + if s.timeout != "" { + source["timeout"] = s.timeout + } + if s.terminateAfter != nil { + source["terminate_after"] = *s.terminateAfter + } + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } + if s.postQuery != nil { + src, err := s.postQuery.Source() + if err != nil { + return nil, err + } + source["post_filter"] = src + } + if s.minScore != nil { + source["min_score"] = *s.minScore + } + if s.version != nil { + source["version"] = *s.version + } + if s.explain != nil { + source["explain"] = *s.explain + } + if s.fetchSourceContext != nil { + src, err := s.fetchSourceContext.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + + if s.storedFieldNames != nil { + switch len(s.storedFieldNames) { + case 1: + source["stored_fields"] = s.storedFieldNames[0] + default: + source["stored_fields"] = s.storedFieldNames + } + } + + if len(s.docvalueFields) > 0 { + source["docvalue_fields"] = s.docvalueFields + } + + if len(s.scriptFields) > 0 { + sfmap := make(map[string]interface{}) + for _, scriptField := range s.scriptFields { + src, err := scriptField.Source() + if err != nil { + return nil, err + } + sfmap[scriptField.FieldName] = src + } + source["script_fields"] = sfmap + } + + if len(s.sorters) > 0 { + var sortarr []interface{} + for _, sorter := range s.sorters { + src, err := sorter.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } + + if s.trackScores { + source["track_scores"] = s.trackScores + } + + if len(s.indexBoosts) > 0 { + source["indices_boost"] = s.indexBoosts + } + + if len(s.aggregations) > 0 { + aggsMap := make(map[string]interface{}) + for name, aggregate := range s.aggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + source["aggregations"] = aggsMap + } + + if s.highlight != nil { + src, err := s.highlight.Source() + if err != nil { + return nil, err + } + source["highlight"] = src + } + + if len(s.suggesters) > 0 { + suggesters := make(map[string]interface{}) + for _, s := range s.suggesters { + src, err := s.Source(false) + if err != nil { + return nil, err + } + suggesters[s.Name()] = src + } + if s.globalSuggestText != "" { + suggesters["text"] = s.globalSuggestText + } + source["suggest"] = suggesters + } + + if len(s.rescores) > 0 { + // Strip empty rescores from request + var rescores []*Rescore + for _, r := range s.rescores { + if !r.IsEmpty() { + rescores = append(rescores, r) + } + } + + if len(rescores) == 1 { + rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize + src, err := rescores[0].Source() + if err != nil { + return nil, err + } + source["rescore"] = src + } else { + var slice []interface{} + for _, r := range rescores { + r.defaultRescoreWindowSize = s.defaultRescoreWindowSize + src, err := r.Source() + if err != nil { + return nil, err + } + slice = append(slice, src) + } + source["rescore"] = slice + } + } + + if len(s.stats) > 0 { + source["stats"] = s.stats + } + + if len(s.innerHits) > 0 { + // Top-level inner hits + // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits + // "inner_hits": { + // "": { + // "": { + // "": { + // , + // [,"inner_hits" : { []+ } ]? + // } + // } + // }, + // [,"" : { ... } ]* + // } + m := make(map[string]interface{}) + for name, hit := range s.innerHits { + if hit.path != "" { + src, err := hit.Source() + if err != nil { + return nil, err + } + path := make(map[string]interface{}) + path[hit.path] = src + m[name] = map[string]interface{}{ + "path": path, + } + } else if hit.typ != "" { + src, err := hit.Source() + if err != nil { + return nil, err + } + typ := make(map[string]interface{}) + typ[hit.typ] = src + m[name] = map[string]interface{}{ + "type": typ, + } + } else { + // TODO the Java client throws here, because either path or typ must be specified + } + } + source["inner_hits"] = m + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_source_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_source_test.go new file mode 100644 index 000000000..5c54e5453 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_source_test.go @@ -0,0 +1,259 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSearchSourceMatchAllQuery(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceNoStoredFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).NoStoredFields() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceStoredFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).StoredFields("message", "tags") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}},"stored_fields":["message","tags"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceFetchSourceDisabled(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).FetchSource(false) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_source":false,"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceFetchSourceByWildcards(t *testing.T) { + matchAllQ := NewMatchAllQuery() + fsc := NewFetchSourceContext(true).Include("obj1.*", "obj2.*").Exclude("*.description") + builder := NewSearchSource().Query(matchAllQ).FetchSourceContext(fsc) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_source":{"excludes":["*.description"],"includes":["obj1.*","obj2.*"]},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceDocvalueFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).DocvalueFields("test1", "test2") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"docvalue_fields":["test1","test2"],"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceScriptFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + sf1 := NewScriptField("test1", NewScript("doc['my_field_name'].value * 2")) + sf2 := NewScriptField("test2", NewScript("doc['my_field_name'].value * factor").Param("factor", 3.1415927)) + builder := NewSearchSource().Query(matchAllQ).ScriptFields(sf1, sf2) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}},"script_fields":{"test1":{"script":"doc['my_field_name'].value * 2"},"test2":{"script":{"inline":"doc['my_field_name'].value * factor","params":{"factor":3.1415927}}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourcePostFilter(t *testing.T) { + matchAllQ := NewMatchAllQuery() + pf := NewTermQuery("tag", "important") + builder := NewSearchSource().Query(matchAllQ).PostFilter(pf) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"post_filter":{"term":{"tag":"important"}},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceHighlight(t *testing.T) { + matchAllQ := NewMatchAllQuery() + hl := NewHighlight().Field("content") + builder := NewSearchSource().Query(matchAllQ).Highlight(hl) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"highlight":{"fields":{"content":{}}},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceRescoring(t *testing.T) { + matchAllQ := NewMatchAllQuery() + rescorerQuery := NewMatchQuery("field1", "the quick brown fox").Type("phrase").Slop(2) + rescorer := NewQueryRescorer(rescorerQuery) + rescorer = rescorer.QueryWeight(0.7) + rescorer = rescorer.RescoreQueryWeight(1.2) + rescore := NewRescore().WindowSize(50).Rescorer(rescorer) + builder := NewSearchSource().Query(matchAllQ).Rescorer(rescore) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}},"rescore":{"query":{"query_weight":0.7,"rescore_query":{"match":{"field1":{"query":"the quick brown fox","slop":2,"type":"phrase"}}},"rescore_query_weight":1.2},"window_size":50}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceIndexBoost(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).IndexBoost("index1", 1.4).IndexBoost("index2", 1.3) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices_boost":{"index1":1.4,"index2":1.3},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceMixDifferentSorters(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ). + Sort("a", false). + SortWithInfo(SortInfo{Field: "b", Ascending: true}). + SortBy(NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number")) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}},"sort":[{"a":{"order":"desc"}},{"b":{"order":"asc"}},{"_script":{"script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceInnerHits(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ). + InnerHit("comments", NewInnerHit().Type("comment").Query(NewMatchQuery("user", "olivere"))). + InnerHit("views", NewInnerHit().Path("view")) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"inner_hits":{"comments":{"type":{"comment":{"query":{"match":{"user":{"query":"olivere"}}}}}},"views":{"path":{"view":{}}}},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_suggester_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_suggester_test.go new file mode 100644 index 000000000..94c3a6779 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_suggester_test.go @@ -0,0 +1,241 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestTermSuggester(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + tsName := "my-suggestions" + ts := NewTermSuggester(tsName) + ts = ts.Text("Goolang") + ts = ts.Field("message") + + searchResult, err := client.Search(). + Index(testIndexName). + Query(NewMatchAllQuery()). + Suggester(ts). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Suggest == nil { + t.Errorf("expected SearchResult.Suggest != nil; got nil") + } + mySuggestions, found := searchResult.Suggest[tsName] + if !found { + t.Errorf("expected to find SearchResult.Suggest[%s]; got false", tsName) + } + if mySuggestions == nil { + t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", tsName) + } + + if len(mySuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) + } + mySuggestion := mySuggestions[0] + if mySuggestion.Text != "goolang" { + t.Errorf("expected Text = 'goolang'; got %s", mySuggestion.Text) + } + if mySuggestion.Offset != 0 { + t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) + } + if mySuggestion.Length != 7 { + t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) + } + if len(mySuggestion.Options) != 1 { + t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) + } + myOption := mySuggestion.Options[0] + if myOption.Text != "golang" { + t.Errorf("expected Text = 'golang'; got %s", myOption.Text) + } +} + +func TestPhraseSuggester(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + phraseSuggesterName := "my-suggestions" + ps := NewPhraseSuggester(phraseSuggesterName) + ps = ps.Text("Goolang") + ps = ps.Field("message") + + searchResult, err := client.Search(). + Index(testIndexName). + Query(NewMatchAllQuery()). + Suggester(ps). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Suggest == nil { + t.Errorf("expected SearchResult.Suggest != nil; got nil") + } + mySuggestions, found := searchResult.Suggest[phraseSuggesterName] + if !found { + t.Errorf("expected to find SearchResult.Suggest[%s]; got false", phraseSuggesterName) + } + if mySuggestions == nil { + t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", phraseSuggesterName) + } + + if len(mySuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) + } + mySuggestion := mySuggestions[0] + if mySuggestion.Text != "Goolang" { + t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text) + } + if mySuggestion.Offset != 0 { + t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) + } + if mySuggestion.Length != 7 { + t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) + } +} + +func TestCompletionSuggester(t *testing.T) { + client := setupTestClientAndCreateIndex(t) // , SetTraceLog(log.New(os.Stdout, "", 0))) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Suggest: NewSuggestField("Golang", "Elasticsearch"), + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Suggest: NewSuggestField("Another unrelated topic."), + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Suggest: NewSuggestField("Cycling is fun."), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + suggesterName := "my-suggestions" + cs := NewCompletionSuggester(suggesterName) + cs = cs.Text("Golang") + cs = cs.Field("suggest_field") + + searchResult, err := client.Search(). + Index(testIndexName). + Query(NewMatchAllQuery()). + Suggester(cs). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Suggest == nil { + t.Errorf("expected SearchResult.Suggest != nil; got nil") + } + mySuggestions, found := searchResult.Suggest[suggesterName] + if !found { + t.Errorf("expected to find SearchResult.Suggest[%s]; got false") + } + if mySuggestions == nil { + t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", suggesterName) + } + + if len(mySuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) + } + mySuggestion := mySuggestions[0] + if mySuggestion.Text != "Golang" { + t.Errorf("expected Text = 'Golang'; got %s", mySuggestion.Text) + } + if mySuggestion.Offset != 0 { + t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) + } + if mySuggestion.Length != 6 { + t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) + } + if len(mySuggestion.Options) != 1 { + t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) + } + myOption := mySuggestion.Options[0] + if myOption.Text != "Golang" { + t.Errorf("expected Text = 'Golang'; got %s", myOption.Text) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_test.go new file mode 100644 index 000000000..e78ad1bcf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_test.go @@ -0,0 +1,1025 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "reflect" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestSearchMatchAll(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + // Match all should return all documents + searchResult, err := client.Search(). + Index(testIndexName). + Query(NewMatchAllQuery()). + Size(100). + Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if got, want := searchResult.Hits.TotalHits, int64(12); got != want { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got) + } + if got, want := len(searchResult.Hits.Hits), 12; got != want { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestSearchMatchAllWithRequestCacheDisabled(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + // Match all should return all documents, with request cache disabled + searchResult, err := client.Search(). + Index(testIndexName). + Query(NewMatchAllQuery()). + Size(100). + Pretty(true). + RequestCache(false). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if got, want := searchResult.Hits.TotalHits, int64(12); got != want { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got) + } + if got, want := len(searchResult.Hits.Hits), 12; got != want { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got) + } +} + +func BenchmarkSearchMatchAll(b *testing.B) { + client := setupTestClientAndCreateIndexAndAddDocs(b) + + for n := 0; n < b.N; n++ { + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO()) + if err != nil { + b.Fatal(err) + } + if searchResult.Hits == nil { + b.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits == 0 { + b.Errorf("expected SearchResult.Hits.TotalHits > %d; got %d", 0, searchResult.Hits.TotalHits) + } + } +} + +func TestSearchResultTotalHits(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + count, err := client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + all := NewMatchAllQuery() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + got := searchResult.TotalHits() + if got != count { + t.Fatalf("expected %d hits; got: %d", count, got) + } + + // No hits + searchResult = &SearchResult{} + got = searchResult.TotalHits() + if got != 0 { + t.Errorf("expected %d hits; got: %d", 0, got) + } +} + +func TestSearchResultEach(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + all := NewMatchAllQuery() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Iterate over non-ptr type + var aTweet tweet + count := 0 + for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { + count++ + _, ok := item.(tweet) + if !ok { + t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item)) + } + } + if count == 0 { + t.Errorf("expected to find some hits; got: %d", count) + } + + // Iterate over ptr-type + count = 0 + var aTweetPtr *tweet + for _, item := range searchResult.Each(reflect.TypeOf(aTweetPtr)) { + count++ + tw, ok := item.(*tweet) + if !ok { + t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item)) + } + if tw == nil { + t.Fatal("expected hit to not be nil") + } + } + if count == 0 { + t.Errorf("expected to find some hits; got: %d", count) + } + + // Does not iterate when no hits are found + searchResult = &SearchResult{Hits: nil} + count = 0 + for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { + count++ + _ = item + } + if count != 0 { + t.Errorf("expected to not find any hits; got: %d", count) + } + searchResult = &SearchResult{Hits: &SearchHits{Hits: make([]*SearchHit, 0)}} + count = 0 + for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { + count++ + _ = item + } + if count != 0 { + t.Errorf("expected to not find any hits; got: %d", count) + } +} + +func TestSearchSorting(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Sort("created", false). + Timeout("1s"). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestSearchSortingBySorters(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + SortBy(NewFieldSort("created").Desc(), NewScoreSort()). + Timeout("1s"). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestSearchSpecificFields(t *testing.T) { + // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + StoredFields("message"). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + if hit.Source != nil { + t.Fatalf("expected SearchResult.Hits.Hit.Source to be nil; got: %q", hit.Source) + } + if hit.Fields == nil { + t.Fatal("expected SearchResult.Hits.Hit.Fields to be != nil") + } + field, found := hit.Fields["message"] + if !found { + t.Errorf("expected SearchResult.Hits.Hit.Fields[%s] to be found", "message") + } + fields, ok := field.([]interface{}) + if !ok { + t.Errorf("expected []interface{}; got: %v", reflect.TypeOf(fields)) + } + if len(fields) != 1 { + t.Errorf("expected a field with 1 entry; got: %d", len(fields)) + } + message, ok := fields[0].(string) + if !ok { + t.Errorf("expected a string; got: %v", reflect.TypeOf(fields[0])) + } + if message == "" { + t.Errorf("expected a message; got: %q", message) + } + } +} + +func TestSearchExplain(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Explain(true). + Timeout("1s"). + // Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + if hit.Explanation == nil { + t.Fatal("expected search explanation") + } + if hit.Explanation.Value <= 0.0 { + t.Errorf("expected explanation value to be > 0.0; got: %v", hit.Explanation.Value) + } + if hit.Explanation.Description == "" { + t.Errorf("expected explanation description != %q; got: %q", "", hit.Explanation.Description) + } + } +} + +func TestSearchSource(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Set up the request JSON manually to pass to the search service via Source() + source := map[string]interface{}{ + "query": map[string]interface{}{ + "match_all": map[string]interface{}{}, + }, + } + + searchResult, err := client.Search(). + Index(testIndexName). + Source(source). // sets the JSON request + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } +} + +func TestSearchRawString(t *testing.T) { + // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + query := RawStringQuery(`{"match_all":{}}`) + searchResult, err := client.Search(). + Index(testIndexName). + Query(query). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } +} + +func TestSearchSearchSource(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Set up the search source manually and pass it to the search service via SearchSource() + ss := NewSearchSource().Query(NewMatchAllQuery()).From(0).Size(2) + + // One can use ss.Source() to get to the raw interface{} that will be used + // as the search request JSON by the SearchService. + + searchResult, err := client.Search(). + Index(testIndexName). + SearchSource(ss). // sets the SearchSource + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 2 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits)) + } +} + +func TestSearchInnerHitsOnHasChild(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Check for valid ES version + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.5.0" { + t.Skip("InnerHits feature is only available for Elasticsearch 1.5+") + return + } + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + comment2a := comment{User: "sandrae", Comment: "What does that even mean?"} + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + comment3a := comment{User: "nico", Comment: "You bet."} + comment3b := comment{User: "olivere", Comment: "It sure is."} + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + bq := NewBoolQuery() + bq = bq.Must(NewMatchAllQuery()) + bq = bq.Filter(NewHasChildQuery("comment", NewMatchAllQuery()). + InnerHit(NewInnerHit().Name("comments"))) + + searchResult, err := client.Search(). + Index(testIndexName). + Query(bq). + Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 2 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 2, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 2 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits)) + } + + hit := searchResult.Hits.Hits[0] + if hit.Id != "t2" { + t.Fatalf("expected tweet %q; got: %q", "t2", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found := hit.InnerHits["comments"] + if !found { + t.Fatalf("expected inner hits for name %q", "comments") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "c2a" { + t.Fatalf("expected inner hit with id %q; got: %q", "c2a", innerHits.Hits.Hits[0].Id) + } + + hit = searchResult.Hits.Hits[1] + if hit.Id != "t3" { + t.Fatalf("expected tweet %q; got: %q", "t3", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found = hit.InnerHits["comments"] + if !found { + t.Fatalf("expected inner hits for name %q", "comments") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 2 { + t.Fatalf("expected %d inner hits; got: %d", 2, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "c3a" { + t.Fatalf("expected inner hit with id %q; got: %q", "c3a", innerHits.Hits.Hits[0].Id) + } + if innerHits.Hits.Hits[1].Id != "c3b" { + t.Fatalf("expected inner hit with id %q; got: %q", "c3b", innerHits.Hits.Hits[1].Id) + } +} + +func TestSearchInnerHitsOnHasParent(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Check for valid ES version + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.5.0" { + t.Skip("InnerHits feature is only available for Elasticsearch 1.5+") + return + } + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + comment2a := comment{User: "sandrae", Comment: "What does that even mean?"} + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + comment3a := comment{User: "nico", Comment: "You bet."} + comment3b := comment{User: "olivere", Comment: "It sure is."} + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + bq := NewBoolQuery() + bq = bq.Must(NewMatchAllQuery()) + bq = bq.Filter(NewHasParentQuery("tweet", NewMatchAllQuery()). + InnerHit(NewInnerHit().Name("tweets"))) + + searchResult, err := client.Search(). + Index(testIndexName). + Query(bq). + Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + hit := searchResult.Hits.Hits[0] + if hit.Id != "c2a" { + t.Fatalf("expected tweet %q; got: %q", "c2a", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found := hit.InnerHits["tweets"] + if !found { + t.Fatalf("expected inner hits for name %q", "tweets") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "t2" { + t.Fatalf("expected inner hit with id %q; got: %q", "t2", innerHits.Hits.Hits[0].Id) + } + + hit = searchResult.Hits.Hits[1] + if hit.Id != "c3a" { + t.Fatalf("expected tweet %q; got: %q", "c3a", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found = hit.InnerHits["tweets"] + if !found { + t.Fatalf("expected inner hits for name %q", "tweets") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "t3" { + t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id) + } + + hit = searchResult.Hits.Hits[2] + if hit.Id != "c3b" { + t.Fatalf("expected tweet %q; got: %q", "c3b", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found = hit.InnerHits["tweets"] + if !found { + t.Fatalf("expected inner hits for name %q", "tweets") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "t3" { + t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id) + } +} + +func TestSearchBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Types []string + Expected string + }{ + { + []string{}, + []string{}, + "/_search", + }, + { + []string{"index1"}, + []string{}, + "/index1/_search", + }, + { + []string{"index1", "index2"}, + []string{}, + "/index1%2Cindex2/_search", + }, + { + []string{}, + []string{"type1"}, + "/_all/type1/_search", + }, + { + []string{"index1"}, + []string{"type1"}, + "/index1/type1/_search", + }, + { + []string{"index1", "index2"}, + []string{"type1", "type2"}, + "/index1%2Cindex2/type1%2Ctype2/_search", + }, + { + []string{}, + []string{"type1", "type2"}, + "/_all/type1%2Ctype2/_search", + }, + } + + for i, test := range tests { + path, _, err := client.Search().Index(test.Indices...).Type(test.Types...).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/setup_test.go b/vendor/gopkg.in/olivere/elastic.v5/setup_test.go new file mode 100644 index 000000000..74513c029 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/setup_test.go @@ -0,0 +1,263 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "log" + "math/rand" + "os" + "time" + + "golang.org/x/net/context" +) + +const ( + testIndexName = "elastic-test" + testIndexName2 = "elastic-test2" + testMapping = ` +{ + "settings":{ + "number_of_shards":1, + "number_of_replicas":0 + }, + "mappings":{ + "_default_": { + "_all": { + "enabled": true + } + }, + "tweet":{ + "properties":{ + "user":{ + "type":"keyword" + }, + "message":{ + "type":"text", + "store": true, + "fielddata": true + }, + "tags":{ + "type":"keyword" + }, + "location":{ + "type":"geo_point" + }, + "suggest_field":{ + "type":"completion" + } + } + }, + "comment":{ + "_parent": { + "type": "tweet" + } + }, + "order":{ + "properties":{ + "article":{ + "type":"text" + }, + "manufacturer":{ + "type":"keyword" + }, + "price":{ + "type":"float" + }, + "time":{ + "type":"date", + "format": "YYYY-MM-dd" + } + } + }, + "doctype":{ + "properties":{ + "message":{ + "type":"text", + "store": true, + "fielddata": true + } + } + }, + "queries":{ + "properties": { + "query": { + "type": "percolator" + } + } + } + } +} +` +) + +type tweet struct { + User string `json:"user"` + Message string `json:"message"` + Retweets int `json:"retweets"` + Image string `json:"image,omitempty"` + Created time.Time `json:"created,omitempty"` + Tags []string `json:"tags,omitempty"` + Location string `json:"location,omitempty"` + Suggest *SuggestField `json:"suggest_field,omitempty"` +} + +func (t tweet) String() string { + return fmt.Sprintf("tweet{User:%q,Message:%q,Retweets:%d}", t.User, t.Message, t.Retweets) +} + +type comment struct { + User string `json:"user"` + Comment string `json:"comment"` + Created time.Time `json:"created,omitempty"` +} + +func (c comment) String() string { + return fmt.Sprintf("comment{User:%q,Comment:%q}", c.User, c.Comment) +} + +type order struct { + Article string `json:"article"` + Manufacturer string `json:"manufacturer"` + Price float64 `json:"price"` + Time string `json:"time,omitempty"` +} + +func (o order) String() string { + return fmt.Sprintf("order{Article:%q,Manufacturer:%q,Price:%v,Time:%v}", o.Article, o.Manufacturer, o.Price, o.Time) +} + +// doctype is required for Percolate tests. +type doctype struct { + Message string `json:"message"` +} + +// queries is required for Percolate tests. +type queries struct { + Query string `json:"query"` +} + +func isTravis() bool { + return os.Getenv("TRAVIS") != "" +} + +func travisGoVersion() string { + return os.Getenv("TRAVIS_GO_VERSION") +} + +type logger interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fail() + FailNow() + Log(args ...interface{}) + Logf(format string, args ...interface{}) +} + +func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) { + var err error + + client, err = NewClient(options...) + if err != nil { + t.Fatal(err) + } + + client.DeleteIndex(testIndexName).Do(context.TODO()) + client.DeleteIndex(testIndexName2).Do(context.TODO()) + + return client +} + +func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Client { + client := setupTestClient(t, options...) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if createIndex == nil { + t.Errorf("expected result to be != nil; got: %v", createIndex) + } + + // Create second index + createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if createIndex2 == nil { + t.Errorf("expected result to be != nil; got: %v", createIndex2) + } + + return client +} + +func setupTestClientAndCreateIndexAndLog(t logger, options ...ClientOptionFunc) *Client { + return setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) +} + +func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFunc) *Client { + client := setupTestClientAndCreateIndex(t, options...) + + // Add tweets + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + comment1 := comment{User: "nico", Comment: "You bet."} + + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Add orders + var orders []order + orders = append(orders, order{Article: "Apple MacBook", Manufacturer: "Apple", Price: 1290, Time: "2015-01-18"}) + orders = append(orders, order{Article: "Paper", Manufacturer: "Canon", Price: 100, Time: "2015-03-01"}) + orders = append(orders, order{Article: "Apple iPad", Manufacturer: "Apple", Price: 499, Time: "2015-04-12"}) + orders = append(orders, order{Article: "Dell XPS 13", Manufacturer: "Dell", Price: 1600, Time: "2015-04-18"}) + orders = append(orders, order{Article: "Apple Watch", Manufacturer: "Apple", Price: 349, Time: "2015-04-29"}) + orders = append(orders, order{Article: "Samsung TV", Manufacturer: "Samsung", Price: 790, Time: "2015-05-03"}) + orders = append(orders, order{Article: "Hoodie", Manufacturer: "h&m", Price: 49, Time: "2015-06-03"}) + orders = append(orders, order{Article: "T-Shirt", Manufacturer: "h&m", Price: 19, Time: "2015-06-18"}) + for i, o := range orders { + id := fmt.Sprintf("%d", i) + _, err = client.Index().Index(testIndexName).Type("order").Id(id).BodyJson(&o).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + return client +} + +var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomString(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/sort.go b/vendor/gopkg.in/olivere/elastic.v5/sort.go new file mode 100644 index 000000000..1817c191a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/sort.go @@ -0,0 +1,501 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// -- Sorter -- + +// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html. +type Sorter interface { + Source() (interface{}, error) +} + +// -- SortInfo -- + +// SortInfo contains information about sorting a field. +type SortInfo struct { + Sorter + Field string + Ascending bool + Missing interface{} + IgnoreUnmapped *bool + SortMode string + NestedFilter Query + NestedPath string +} + +func (info SortInfo) Source() (interface{}, error) { + prop := make(map[string]interface{}) + if info.Ascending { + prop["order"] = "asc" + } else { + prop["order"] = "desc" + } + if info.Missing != nil { + prop["missing"] = info.Missing + } + if info.IgnoreUnmapped != nil { + prop["ignore_unmapped"] = *info.IgnoreUnmapped + } + if info.SortMode != "" { + prop["mode"] = info.SortMode + } + if info.NestedFilter != nil { + src, err := info.NestedFilter.Source() + if err != nil { + return nil, err + } + prop["nested_filter"] = src + } + if info.NestedPath != "" { + prop["nested_path"] = info.NestedPath + } + source := make(map[string]interface{}) + source[info.Field] = prop + return source, nil +} + +// -- SortByDoc -- + +// SortByDoc sorts by the "_doc" field, as described in +// https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-scroll.html. +// +// Example: +// ss := elastic.NewSearchSource() +// ss = ss.SortBy(elastic.SortByDoc{}) +type SortByDoc struct { + Sorter +} + +// Source returns the JSON-serializable data. +func (s SortByDoc) Source() (interface{}, error) { + return "_doc", nil +} + +// -- ScoreSort -- + +// ScoreSort sorts by relevancy score. +type ScoreSort struct { + Sorter + ascending bool +} + +// NewScoreSort creates a new ScoreSort. +func NewScoreSort() *ScoreSort { + return &ScoreSort{ascending: false} // Descending by default! +} + +// Order defines whether sorting ascending (default) or descending. +func (s *ScoreSort) Order(ascending bool) *ScoreSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s *ScoreSort) Asc() *ScoreSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s *ScoreSort) Desc() *ScoreSort { + s.ascending = false + return s +} + +// Source returns the JSON-serializable data. +func (s *ScoreSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_score"] = x + if s.ascending { + x["reverse"] = true + } + return source, nil +} + +// -- FieldSort -- + +// FieldSort sorts by a given field. +type FieldSort struct { + Sorter + fieldName string + ascending bool + missing interface{} + ignoreUnmapped *bool + unmappedType *string + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewFieldSort creates a new FieldSort. +func NewFieldSort(fieldName string) *FieldSort { + return &FieldSort{ + fieldName: fieldName, + ascending: true, + } +} + +// FieldName specifies the name of the field to be used for sorting. +func (s *FieldSort) FieldName(fieldName string) *FieldSort { + s.fieldName = fieldName + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s *FieldSort) Order(ascending bool) *FieldSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s *FieldSort) Asc() *FieldSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s *FieldSort) Desc() *FieldSort { + s.ascending = false + return s +} + +// Missing sets the value to be used when a field is missing in a document. +// You can also use "_last" or "_first" to sort missing last or first +// respectively. +func (s *FieldSort) Missing(missing interface{}) *FieldSort { + s.missing = missing + return s +} + +// IgnoreUnmapped specifies what happens if the field does not exist in +// the index. Set it to true to ignore, or set it to false to not ignore (default). +func (s *FieldSort) IgnoreUnmapped(ignoreUnmapped bool) *FieldSort { + s.ignoreUnmapped = &ignoreUnmapped + return s +} + +// UnmappedType sets the type to use when the current field is not mapped +// in an index. +func (s *FieldSort) UnmappedType(typ string) *FieldSort { + s.unmappedType = &typ + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min, max, sum, and avg. +func (s *FieldSort) SortMode(sortMode string) *FieldSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s *FieldSort) NestedFilter(nestedFilter Query) *FieldSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s *FieldSort) NestedPath(nestedPath string) *FieldSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s *FieldSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source[s.fieldName] = x + if s.ascending { + x["order"] = "asc" + } else { + x["order"] = "desc" + } + if s.missing != nil { + x["missing"] = s.missing + } + if s.ignoreUnmapped != nil { + x["ignore_unmapped"] = *s.ignoreUnmapped + } + if s.unmappedType != nil { + x["unmapped_type"] = *s.unmappedType + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} + +// -- GeoDistanceSort -- + +// GeoDistanceSort allows for sorting by geographic distance. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. +type GeoDistanceSort struct { + Sorter + fieldName string + points []*GeoPoint + geohashes []string + geoDistance *string + unit string + ascending bool + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewGeoDistanceSort creates a new sorter for geo distances. +func NewGeoDistanceSort(fieldName string) *GeoDistanceSort { + return &GeoDistanceSort{ + fieldName: fieldName, + points: make([]*GeoPoint, 0), + geohashes: make([]string, 0), + ascending: true, + } +} + +// FieldName specifies the name of the (geo) field to use for sorting. +func (s *GeoDistanceSort) FieldName(fieldName string) *GeoDistanceSort { + s.fieldName = fieldName + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s *GeoDistanceSort) Order(ascending bool) *GeoDistanceSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s *GeoDistanceSort) Asc() *GeoDistanceSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s *GeoDistanceSort) Desc() *GeoDistanceSort { + s.ascending = false + return s +} + +// Point specifies a point to create the range distance aggregations from. +func (s *GeoDistanceSort) Point(lat, lon float64) *GeoDistanceSort { + s.points = append(s.points, GeoPointFromLatLon(lat, lon)) + return s +} + +// Points specifies the geo point(s) to create the range distance aggregations from. +func (s *GeoDistanceSort) Points(points ...*GeoPoint) *GeoDistanceSort { + s.points = append(s.points, points...) + return s +} + +// GeoHashes specifies the geo point to create the range distance aggregations from. +func (s *GeoDistanceSort) GeoHashes(geohashes ...string) *GeoDistanceSort { + s.geohashes = append(s.geohashes, geohashes...) + return s +} + +// GeoDistance represents how to compute the distance. +// It can be sloppy_arc (default), arc, or plane. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. +func (s *GeoDistanceSort) GeoDistance(geoDistance string) *GeoDistanceSort { + s.geoDistance = &geoDistance + return s +} + +// Unit specifies the distance unit to use. It defaults to km. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#distance-units +// for details. +func (s *GeoDistanceSort) Unit(unit string) *GeoDistanceSort { + s.unit = unit + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min, max, sum, and avg. +func (s *GeoDistanceSort) SortMode(sortMode string) *GeoDistanceSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s *GeoDistanceSort) NestedFilter(nestedFilter Query) *GeoDistanceSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s *GeoDistanceSort) NestedPath(nestedPath string) *GeoDistanceSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s *GeoDistanceSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_geo_distance"] = x + + // Points + var ptarr []interface{} + for _, pt := range s.points { + ptarr = append(ptarr, pt.Source()) + } + for _, geohash := range s.geohashes { + ptarr = append(ptarr, geohash) + } + x[s.fieldName] = ptarr + + if s.unit != "" { + x["unit"] = s.unit + } + if s.geoDistance != nil { + x["distance_type"] = *s.geoDistance + } + + if !s.ascending { + x["reverse"] = true + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} + +// -- ScriptSort -- + +// ScriptSort sorts by a custom script. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#modules-scripting +// for details about scripting. +type ScriptSort struct { + Sorter + script *Script + typ string + ascending bool + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewScriptSort creates and initializes a new ScriptSort. +// You must provide a script and a type, e.g. "string" or "number". +func NewScriptSort(script *Script, typ string) *ScriptSort { + return &ScriptSort{ + script: script, + typ: typ, + ascending: true, + } +} + +// Type sets the script type, which can be either "string" or "number". +func (s *ScriptSort) Type(typ string) *ScriptSort { + s.typ = typ + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s *ScriptSort) Order(ascending bool) *ScriptSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s *ScriptSort) Asc() *ScriptSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s *ScriptSort) Desc() *ScriptSort { + s.ascending = false + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min or max. +func (s *ScriptSort) SortMode(sortMode string) *ScriptSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s *ScriptSort) NestedFilter(nestedFilter Query) *ScriptSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s *ScriptSort) NestedPath(nestedPath string) *ScriptSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s *ScriptSort) Source() (interface{}, error) { + if s.script == nil { + return nil, errors.New("ScriptSort expected a script") + } + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_script"] = x + + src, err := s.script.Source() + if err != nil { + return nil, err + } + x["script"] = src + + x["type"] = s.typ + + if !s.ascending { + x["reverse"] = true + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/sort_test.go b/vendor/gopkg.in/olivere/elastic.v5/sort_test.go new file mode 100644 index 000000000..68ab355d1 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/sort_test.go @@ -0,0 +1,238 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSortInfo(t *testing.T) { + builder := SortInfo{Field: "grade", Ascending: false} + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"grade":{"order":"desc"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSortInfoComplex(t *testing.T) { + builder := SortInfo{ + Field: "price", + Ascending: false, + Missing: "_last", + SortMode: "avg", + NestedFilter: NewTermQuery("product.color", "blue"), + NestedPath: "variant", + } + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"price":{"missing":"_last","mode":"avg","nested_filter":{"term":{"product.color":"blue"}},"nested_path":"variant","order":"desc"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScoreSort(t *testing.T) { + builder := NewScoreSort() + if builder.ascending != false { + t.Error("expected score sorter to be ascending by default") + } + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_score":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScoreSortOrderAscending(t *testing.T) { + builder := NewScoreSort().Asc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_score":{"reverse":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScoreSortOrderDescending(t *testing.T) { + builder := NewScoreSort().Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_score":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldSort(t *testing.T) { + builder := NewFieldSort("grade") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"grade":{"order":"asc"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldSortOrderDesc(t *testing.T) { + builder := NewFieldSort("grade").Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"grade":{"order":"desc"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldSortComplex(t *testing.T) { + builder := NewFieldSort("price").Desc(). + SortMode("avg"). + Missing("_last"). + UnmappedType("product"). + NestedFilter(NewTermQuery("product.color", "blue")). + NestedPath("variant") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"price":{"missing":"_last","mode":"avg","nested_filter":{"term":{"product.color":"blue"}},"nested_path":"variant","order":"desc","unmapped_type":"product"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceSort(t *testing.T) { + builder := NewGeoDistanceSort("pin.location"). + Point(-70, 40). + Order(true). + Unit("km"). + SortMode("min"). + GeoDistance("sloppy_arc") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceSortOrderDesc(t *testing.T) { + builder := NewGeoDistanceSort("pin.location"). + Point(-70, 40). + Unit("km"). + SortMode("min"). + GeoDistance("sloppy_arc"). + Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","pin.location":[{"lat":-70,"lon":40}],"reverse":true,"unit":"km"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} +func TestScriptSort(t *testing.T) { + builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Order(true) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_script":{"script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptSortOrderDesc(t *testing.T) { + builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_script":{"reverse":true,"script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggest.go b/vendor/gopkg.in/olivere/elastic.v5/suggest.go new file mode 100644 index 000000000..6dee75762 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggest.go @@ -0,0 +1,159 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// SuggestService returns suggestions for text. +// See https://www.elastic.co/guide/en/elasticsearch/reference/master/search-suggesters.html. +type SuggestService struct { + client *Client + pretty bool + routing string + preference string + index []string + suggesters []Suggester +} + +// NewSuggestService creates a new instance of SuggestService. +func NewSuggestService(client *Client) *SuggestService { + builder := &SuggestService{ + client: client, + } + return builder +} + +// Index adds one or more indices to use for the suggestion request. +func (s *SuggestService) Index(index ...string) *SuggestService { + s.index = append(s.index, index...) + return s +} + +// Pretty asks Elasticsearch to return indented JSON. +func (s *SuggestService) Pretty(pretty bool) *SuggestService { + s.pretty = pretty + return s +} + +// Routing specifies the routing value. +func (s *SuggestService) Routing(routing string) *SuggestService { + s.routing = routing + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *SuggestService) Preference(preference string) *SuggestService { + s.preference = preference + return s +} + +// Suggester adds a suggester to the request. +func (s *SuggestService) Suggester(suggester Suggester) *SuggestService { + s.suggesters = append(s.suggesters, suggester) + return s +} + +// buildURL builds the URL for the operation. +func (s *SuggestService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_suggest", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_suggest" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + return path, params, nil +} + +// Do executes the request. +func (s *SuggestService) Do(ctx context.Context) (SuggestResult, error) { + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Set body + body := make(map[string]interface{}) + for _, s := range s.suggesters { + src, err := s.Source(false) + if err != nil { + return nil, err + } + body[s.Name()] = src + } + + // Get response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // There is a _shard object that cannot be deserialized. + // So we use json.RawMessage instead. + var suggestions map[string]*json.RawMessage + if err := s.client.decoder.Decode(res.Body, &suggestions); err != nil { + return nil, err + } + + ret := make(SuggestResult) + for name, result := range suggestions { + if name != "_shards" { + var sug []Suggestion + if err := s.client.decoder.Decode(*result, &sug); err != nil { + return nil, err + } + ret[name] = sug + } + } + + return ret, nil +} + +// SuggestResult is the outcome of SuggestService.Do. +type SuggestResult map[string][]Suggestion + +// Suggestion is a single suggester outcome. +type Suggestion struct { + Text string `json:"text"` + Offset int `json:"offset"` + Length int `json:"length"` + Options []suggestionOption `json:"options"` +} + +type suggestionOption struct { + Text string `json:"text"` + Score float64 `json:"score"` + Freq int `json:"freq"` + Payload interface{} `json:"payload"` + CollateMatch bool `json:"collate_match"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggest_field.go b/vendor/gopkg.in/olivere/elastic.v5/suggest_field.go new file mode 100644 index 000000000..5cfa39371 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggest_field.go @@ -0,0 +1,83 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" +) + +// SuggestField can be used by the caller to specify a suggest field +// at index time. For a detailed example, see e.g. +// http://www.elasticsearch.org/blog/you-complete-me/. +type SuggestField struct { + inputs []string + weight int + contextQueries []SuggesterContextQuery +} + +func NewSuggestField(input ...string) *SuggestField { + return &SuggestField{ + inputs: input, + weight: -1, + } +} + +func (f *SuggestField) Input(input ...string) *SuggestField { + if f.inputs == nil { + f.inputs = make([]string, 0) + } + f.inputs = append(f.inputs, input...) + return f +} + +func (f *SuggestField) Weight(weight int) *SuggestField { + f.weight = weight + return f +} + +func (f *SuggestField) ContextQuery(queries ...SuggesterContextQuery) *SuggestField { + f.contextQueries = append(f.contextQueries, queries...) + return f +} + +// MarshalJSON encodes SuggestField into JSON. +func (f *SuggestField) MarshalJSON() ([]byte, error) { + source := make(map[string]interface{}) + + if f.inputs != nil { + switch len(f.inputs) { + case 1: + source["input"] = f.inputs[0] + default: + source["input"] = f.inputs + } + } + + if f.weight >= 0 { + source["weight"] = f.weight + } + + switch len(f.contextQueries) { + case 0: + case 1: + src, err := f.contextQueries[0].Source() + if err != nil { + return nil, err + } + source["context"] = src + default: + var ctxq []interface{} + for _, query := range f.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + source["context"] = ctxq + } + + return json.Marshal(source) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggest_field_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggest_field_test.go new file mode 100644 index 000000000..c57c71755 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggest_field_test.go @@ -0,0 +1,29 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSuggestField(t *testing.T) { + field := NewSuggestField(). + Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch"). + Weight(1). + ContextQuery( + NewSuggesterCategoryMapping("color").FieldName("color_field").DefaultValues("red", "green", "blue"), + NewSuggesterGeoMapping("location").Precision("5m").Neighbors(true).DefaultLocations(GeoPointFromLatLon(52.516275, 13.377704)), + ) + data, err := json.Marshal(field) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"context":[{"color":{"default":["red","green","blue"],"path":"color_field","type":"category"}},{"location":{"default":{"lat":52.516275,"lon":13.377704},"neighbors":true,"precision":["5m"],"type":"geo"}}],"input":["Welcome to Golang and Elasticsearch.","Golang and Elasticsearch"],"weight":1}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggest_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggest_test.go new file mode 100644 index 000000000..f43922132 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggest_test.go @@ -0,0 +1,163 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestSuggestBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_suggest", + }, + { + []string{"index1"}, + "/index1/_suggest", + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_suggest", + }, + } + + for i, test := range tests { + path, _, err := client.Suggest().Index(test.Indices...).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestSuggestService(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + Location: "48.1333,11.5667", // lat,lon + Suggest: NewSuggestField(). + Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch"). + Weight(0), + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + Location: "48.1189,11.4289", // lat,lon + Suggest: NewSuggestField(). + Input("Another unrelated topic.", "Golang topic."). + Weight(1), + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + Location: "47.7167,11.7167", // lat,lon + Suggest: NewSuggestField(). + Input("Cycling is fun."), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Test _suggest endpoint + termSuggesterName := "my-term-suggester" + termSuggester := NewTermSuggester(termSuggesterName).Text("Goolang").Field("message") + phraseSuggesterName := "my-phrase-suggester" + phraseSuggester := NewPhraseSuggester(phraseSuggesterName).Text("Goolang").Field("message") + completionSuggesterName := "my-completion-suggester" + completionSuggester := NewCompletionSuggester(completionSuggesterName).Text("Go").Field("suggest_field") + + result, err := client.Suggest(). + Index(testIndexName). + Suggester(termSuggester). + Suggester(phraseSuggester). + Suggester(completionSuggester). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Errorf("expected result != nil; got nil") + } + if len(result) != 3 { + t.Errorf("expected 3 suggester results; got %d", len(result)) + } + + termSuggestions, found := result[termSuggesterName] + if !found { + t.Errorf("expected to find Suggest[%s]; got false", termSuggesterName) + } + if termSuggestions == nil { + t.Errorf("expected Suggest[%s] != nil; got nil", termSuggesterName) + } + if len(termSuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(termSuggestions)) + } + + phraseSuggestions, found := result[phraseSuggesterName] + if !found { + t.Errorf("expected to find Suggest[%s]; got false", phraseSuggesterName) + } + if phraseSuggestions == nil { + t.Errorf("expected Suggest[%s] != nil; got nil", phraseSuggesterName) + } + if len(phraseSuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(phraseSuggestions)) + } + + completionSuggestions, found := result[completionSuggesterName] + if !found { + t.Errorf("expected to find Suggest[%s]; got false", completionSuggesterName) + } + if completionSuggestions == nil { + t.Errorf("expected Suggest[%s] != nil; got nil", completionSuggesterName) + } + if len(completionSuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(completionSuggestions)) + } + if len(completionSuggestions[0].Options) != 2 { + t.Errorf("expected 2 suggestion options; got %d", len(completionSuggestions[0].Options)) + } + if have, want := completionSuggestions[0].Options[0].Text, "Golang topic."; have != want { + t.Errorf("expected Suggest[%s][0].Options[0].Text == %q; got %q", completionSuggesterName, want, have) + } + if have, want := completionSuggestions[0].Options[1].Text, "Golang and Elasticsearch"; have != want { + t.Errorf("expected Suggest[%s][0].Options[1].Text == %q; got %q", completionSuggesterName, want, have) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester.go b/vendor/gopkg.in/olivere/elastic.v5/suggester.go new file mode 100644 index 000000000..f7dc48f90 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester.go @@ -0,0 +1,15 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Represents the generic suggester interface. +// A suggester's only purpose is to return the +// source of the query as a JSON-serializable +// object. Returning a map[string]interface{} +// will do. +type Suggester interface { + Name() string + Source(includeName bool) (interface{}, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go new file mode 100644 index 000000000..b85953209 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go @@ -0,0 +1,138 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// CompletionSuggester is a fast suggester for e.g. type-ahead completion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html +// for more details. +type CompletionSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery +} + +// Creates a new completion suggester. +func NewCompletionSuggester(name string) *CompletionSuggester { + return &CompletionSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *CompletionSuggester) Name() string { + return q.name +} + +func (q *CompletionSuggester) Text(text string) *CompletionSuggester { + q.text = text + return q +} + +func (q *CompletionSuggester) Field(field string) *CompletionSuggester { + q.field = field + return q +} + +func (q *CompletionSuggester) Analyzer(analyzer string) *CompletionSuggester { + q.analyzer = analyzer + return q +} + +func (q *CompletionSuggester) Size(size int) *CompletionSuggester { + q.size = &size + return q +} + +func (q *CompletionSuggester) ShardSize(shardSize int) *CompletionSuggester { + q.shardSize = &shardSize + return q +} + +func (q *CompletionSuggester) ContextQuery(query SuggesterContextQuery) *CompletionSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *CompletionSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +// completionSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the completion element. +type completionSuggesterRequest struct { + Text string `json:"text"` + Completion interface{} `json:"completion"` +} + +// Creates the source for the completion suggester. +func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) { + cs := &completionSuggesterRequest{} + + if q.text != "" { + cs.Text = q.text + } + + suggester := make(map[string]interface{}) + cs.Completion = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make(map[string]interface{}) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + // Merge the dictionary into ctxq + m, ok := src.(map[string]interface{}) + if !ok { + return nil, errors.New("elastic: context query is not a map") + } + for k, v := range m { + ctxq[k] = v + } + } + suggester["context"] = ctxq + } + + // TODO(oe) Add completion-suggester specific parameters here + + if !includeName { + return cs, nil + } + + source := make(map[string]interface{}) + source[q.name] = cs + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go new file mode 100644 index 000000000..871688149 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go @@ -0,0 +1,179 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FuzzyFuzzyCompletionSuggester is a FuzzyCompletionSuggester that allows fuzzy +// completion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html +// for details, and +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html#fuzzy +// for details about the fuzzy completion suggester. +type FuzzyCompletionSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + fuzziness interface{} + fuzzyTranspositions *bool + fuzzyMinLength *int + fuzzyPrefixLength *int + unicodeAware *bool +} + +// Fuzziness defines the fuzziness which is used in FuzzyCompletionSuggester. +type Fuzziness struct { +} + +// Creates a new completion suggester. +func NewFuzzyCompletionSuggester(name string) *FuzzyCompletionSuggester { + return &FuzzyCompletionSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *FuzzyCompletionSuggester) Name() string { + return q.name +} + +func (q *FuzzyCompletionSuggester) Text(text string) *FuzzyCompletionSuggester { + q.text = text + return q +} + +func (q *FuzzyCompletionSuggester) Field(field string) *FuzzyCompletionSuggester { + q.field = field + return q +} + +func (q *FuzzyCompletionSuggester) Analyzer(analyzer string) *FuzzyCompletionSuggester { + q.analyzer = analyzer + return q +} + +func (q *FuzzyCompletionSuggester) Size(size int) *FuzzyCompletionSuggester { + q.size = &size + return q +} + +func (q *FuzzyCompletionSuggester) ShardSize(shardSize int) *FuzzyCompletionSuggester { + q.shardSize = &shardSize + return q +} + +func (q *FuzzyCompletionSuggester) ContextQuery(query SuggesterContextQuery) *FuzzyCompletionSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *FuzzyCompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *FuzzyCompletionSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +// Fuzziness defines the strategy used to describe what "fuzzy" actually +// means for the suggester, e.g. 1, 2, "0", "1..2", ">4", or "AUTO". +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#fuzziness +// for a detailed description. +func (q *FuzzyCompletionSuggester) Fuzziness(fuzziness interface{}) *FuzzyCompletionSuggester { + q.fuzziness = fuzziness + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyTranspositions(fuzzyTranspositions bool) *FuzzyCompletionSuggester { + q.fuzzyTranspositions = &fuzzyTranspositions + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyMinLength(minLength int) *FuzzyCompletionSuggester { + q.fuzzyMinLength = &minLength + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyPrefixLength(prefixLength int) *FuzzyCompletionSuggester { + q.fuzzyPrefixLength = &prefixLength + return q +} + +func (q *FuzzyCompletionSuggester) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggester { + q.unicodeAware = &unicodeAware + return q +} + +// Creates the source for the completion suggester. +func (q *FuzzyCompletionSuggester) Source(includeName bool) (interface{}, error) { + cs := &completionSuggesterRequest{} + + if q.text != "" { + cs.Text = q.text + } + + suggester := make(map[string]interface{}) + cs.Completion = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + var ctxq []interface{} + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Fuzzy Completion Suggester fields + fuzzy := make(map[string]interface{}) + suggester["fuzzy"] = fuzzy + if q.fuzziness != nil { + fuzzy["fuzziness"] = q.fuzziness + } + if q.fuzzyTranspositions != nil { + fuzzy["transpositions"] = *q.fuzzyTranspositions + } + if q.fuzzyMinLength != nil { + fuzzy["min_length"] = *q.fuzzyMinLength + } + if q.fuzzyPrefixLength != nil { + fuzzy["prefix_length"] = *q.fuzzyPrefixLength + } + if q.unicodeAware != nil { + fuzzy["unicode_aware"] = *q.unicodeAware + } + + if !includeName { + return cs, nil + } + + source := make(map[string]interface{}) + source[q.name] = cs + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy_test.go new file mode 100644 index 000000000..aae1db11b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy_test.go @@ -0,0 +1,50 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFuzzyCompletionSuggesterSource(t *testing.T) { + s := NewFuzzyCompletionSuggester("song-suggest"). + Text("n"). + Field("suggest"). + Fuzziness(2) + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":2}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFuzzyCompletionSuggesterWithStringFuzzinessSource(t *testing.T) { + s := NewFuzzyCompletionSuggester("song-suggest"). + Text("n"). + Field("suggest"). + Fuzziness("1..4") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":"1..4"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_test.go new file mode 100644 index 000000000..6b7c3f420 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_test.go @@ -0,0 +1,52 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCompletionSuggesterSource(t *testing.T) { + s := NewCompletionSuggester("song-suggest"). + Text("n"). + Field("suggest") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCompletionSuggesterSourceWithMultipleContexts(t *testing.T) { + s := NewCompletionSuggester("song-suggest"). + Text("n"). + Field("suggest"). + ContextQueries( + NewSuggesterCategoryQuery("artist", "Sting"), + NewSuggesterCategoryQuery("label", "BMG"), + ) + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"song-suggest":{"text":"n","completion":{"context":{"artist":"Sting","label":"BMG"},"field":"suggest"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context.go new file mode 100644 index 000000000..caf477669 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context.go @@ -0,0 +1,11 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SuggesterContextQuery is used to define context information within +// a suggestion request. +type SuggesterContextQuery interface { + Source() (interface{}, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go new file mode 100644 index 000000000..67a82edc3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// -- SuggesterCategoryMapping -- + +// SuggesterCategoryMapping provides a mapping for a category context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_mapping. +type SuggesterCategoryMapping struct { + name string + fieldName string + defaultValues []string +} + +// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping. +func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping { + return &SuggesterCategoryMapping{ + name: name, + defaultValues: make([]string, 0), + } +} + +func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping { + q.defaultValues = append(q.defaultValues, values...) + return q +} + +func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping { + q.fieldName = fieldName + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterCategoryMapping) Source() (interface{}, error) { + source := make(map[string]interface{}) + + x := make(map[string]interface{}) + source[q.name] = x + + x["type"] = "category" + + switch len(q.defaultValues) { + case 0: + x["default"] = q.defaultValues + case 1: + x["default"] = q.defaultValues[0] + default: + x["default"] = q.defaultValues + } + + if q.fieldName != "" { + x["path"] = q.fieldName + } + return source, nil +} + +// -- SuggesterCategoryQuery -- + +// SuggesterCategoryQuery provides querying a category context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_query. +type SuggesterCategoryQuery struct { + name string + values []string +} + +// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery. +func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery { + q := &SuggesterCategoryQuery{ + name: name, + values: make([]string, 0), + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery { + q.values = append(q.values, values...) + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterCategoryQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + switch len(q.values) { + case 0: + source[q.name] = q.values + case 1: + source[q.name] = q.values[0] + default: + source[q.name] = q.values + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category_test.go new file mode 100644 index 000000000..3a013d642 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category_test.go @@ -0,0 +1,97 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSuggesterCategoryMapping(t *testing.T) { + q := NewSuggesterCategoryMapping("color").DefaultValues("red") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":{"default":"red","type":"category"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryMappingWithTwoDefaultValues(t *testing.T) { + q := NewSuggesterCategoryMapping("color").DefaultValues("red", "orange") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":{"default":["red","orange"],"type":"category"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryMappingWithFieldName(t *testing.T) { + q := NewSuggesterCategoryMapping("color"). + DefaultValues("red", "orange"). + FieldName("color_field") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":{"default":["red","orange"],"path":"color_field","type":"category"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryQuery(t *testing.T) { + q := NewSuggesterCategoryQuery("color", "red") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":"red"}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryQueryWithTwoValues(t *testing.T) { + q := NewSuggesterCategoryQuery("color", "red", "yellow") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":["red","yellow"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go new file mode 100644 index 000000000..a895855cc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go @@ -0,0 +1,130 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// -- SuggesterGeoMapping -- + +// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_mapping. +type SuggesterGeoMapping struct { + name string + defaultLocations []*GeoPoint + precision []string + neighbors *bool + fieldName string +} + +// NewSuggesterGeoMapping creates a new SuggesterGeoMapping. +func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping { + return &SuggesterGeoMapping{ + name: name, + } +} + +func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping { + q.defaultLocations = append(q.defaultLocations, locations...) + return q +} + +func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping { + q.precision = append(q.precision, precision...) + return q +} + +func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping { + q.neighbors = &neighbors + return q +} + +func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping { + q.fieldName = fieldName + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterGeoMapping) Source() (interface{}, error) { + source := make(map[string]interface{}) + + x := make(map[string]interface{}) + source[q.name] = x + + x["type"] = "geo" + + if len(q.precision) > 0 { + x["precision"] = q.precision + } + if q.neighbors != nil { + x["neighbors"] = *q.neighbors + } + + switch len(q.defaultLocations) { + case 0: + case 1: + x["default"] = q.defaultLocations[0].Source() + default: + var arr []interface{} + for _, p := range q.defaultLocations { + arr = append(arr, p.Source()) + } + x["default"] = arr + } + + if q.fieldName != "" { + x["path"] = q.fieldName + } + return source, nil +} + +// -- SuggesterGeoQuery -- + +// SuggesterGeoQuery provides querying a geolocation context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_query +type SuggesterGeoQuery struct { + name string + location *GeoPoint + precision []string +} + +// NewSuggesterGeoQuery creates a new SuggesterGeoQuery. +func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery { + return &SuggesterGeoQuery{ + name: name, + location: location, + precision: make([]string, 0), + } +} + +func (q *SuggesterGeoQuery) Precision(precision ...string) *SuggesterGeoQuery { + q.precision = append(q.precision, precision...) + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterGeoQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if len(q.precision) == 0 { + if q.location != nil { + source[q.name] = q.location.Source() + } + } else { + x := make(map[string]interface{}) + source[q.name] = x + + if q.location != nil { + x["value"] = q.location.Source() + } + + switch len(q.precision) { + case 0: + case 1: + x["precision"] = q.precision[0] + default: + x["precision"] = q.precision + } + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo_test.go new file mode 100644 index 000000000..b1ab2f495 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo_test.go @@ -0,0 +1,48 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSuggesterGeoMapping(t *testing.T) { + q := NewSuggesterGeoMapping("location"). + Precision("1km", "5m"). + Neighbors(true). + FieldName("pin"). + DefaultLocations(GeoPointFromLatLon(0.0, 0.0)) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterGeoQuery(t *testing.T) { + q := NewSuggesterGeoQuery("location", GeoPointFromLatLon(11.5, 62.71)).Precision("1km") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"location":{"precision":"1km","value":{"lat":11.5,"lon":62.71}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go new file mode 100644 index 000000000..989e40482 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go @@ -0,0 +1,546 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PhraseSuggester provides an API to access word alternatives +// on a per token basis within a certain string distance. +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/master/search-suggesters-phrase.html. +type PhraseSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + // fields specific to a phrase suggester + maxErrors *float64 + separator *string + realWordErrorLikelihood *float64 + confidence *float64 + generators map[string][]CandidateGenerator + gramSize *int + smoothingModel SmoothingModel + forceUnigrams *bool + tokenLimit *int + preTag, postTag *string + collateQuery *string + collatePreference *string + collateParams map[string]interface{} + collatePrune *bool +} + +// NewPhraseSuggester creates a new PhraseSuggester. +func NewPhraseSuggester(name string) *PhraseSuggester { + return &PhraseSuggester{ + name: name, + collateParams: make(map[string]interface{}), + } +} + +func (q *PhraseSuggester) Name() string { + return q.name +} + +func (q *PhraseSuggester) Text(text string) *PhraseSuggester { + q.text = text + return q +} + +func (q *PhraseSuggester) Field(field string) *PhraseSuggester { + q.field = field + return q +} + +func (q *PhraseSuggester) Analyzer(analyzer string) *PhraseSuggester { + q.analyzer = analyzer + return q +} + +func (q *PhraseSuggester) Size(size int) *PhraseSuggester { + q.size = &size + return q +} + +func (q *PhraseSuggester) ShardSize(shardSize int) *PhraseSuggester { + q.shardSize = &shardSize + return q +} + +func (q *PhraseSuggester) ContextQuery(query SuggesterContextQuery) *PhraseSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) *PhraseSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +func (q *PhraseSuggester) GramSize(gramSize int) *PhraseSuggester { + if gramSize >= 1 { + q.gramSize = &gramSize + } + return q +} + +func (q *PhraseSuggester) MaxErrors(maxErrors float64) *PhraseSuggester { + q.maxErrors = &maxErrors + return q +} + +func (q *PhraseSuggester) Separator(separator string) *PhraseSuggester { + q.separator = &separator + return q +} + +func (q *PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float64) *PhraseSuggester { + q.realWordErrorLikelihood = &realWordErrorLikelihood + return q +} + +func (q *PhraseSuggester) Confidence(confidence float64) *PhraseSuggester { + q.confidence = &confidence + return q +} + +func (q *PhraseSuggester) CandidateGenerator(generator CandidateGenerator) *PhraseSuggester { + if q.generators == nil { + q.generators = make(map[string][]CandidateGenerator) + } + typ := generator.Type() + if _, found := q.generators[typ]; !found { + q.generators[typ] = make([]CandidateGenerator, 0) + } + q.generators[typ] = append(q.generators[typ], generator) + return q +} + +func (q *PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) *PhraseSuggester { + for _, g := range generators { + q = q.CandidateGenerator(g) + } + return q +} + +func (q *PhraseSuggester) ClearCandidateGenerator() *PhraseSuggester { + q.generators = nil + return q +} + +func (q *PhraseSuggester) ForceUnigrams(forceUnigrams bool) *PhraseSuggester { + q.forceUnigrams = &forceUnigrams + return q +} + +func (q *PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) *PhraseSuggester { + q.smoothingModel = smoothingModel + return q +} + +func (q *PhraseSuggester) TokenLimit(tokenLimit int) *PhraseSuggester { + q.tokenLimit = &tokenLimit + return q +} + +func (q *PhraseSuggester) Highlight(preTag, postTag string) *PhraseSuggester { + q.preTag = &preTag + q.postTag = &postTag + return q +} + +func (q *PhraseSuggester) CollateQuery(collateQuery string) *PhraseSuggester { + q.collateQuery = &collateQuery + return q +} + +func (q *PhraseSuggester) CollatePreference(collatePreference string) *PhraseSuggester { + q.collatePreference = &collatePreference + return q +} + +func (q *PhraseSuggester) CollateParams(collateParams map[string]interface{}) *PhraseSuggester { + q.collateParams = collateParams + return q +} + +func (q *PhraseSuggester) CollatePrune(collatePrune bool) *PhraseSuggester { + q.collatePrune = &collatePrune + return q +} + +// phraseSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the simple_phrase element. +type phraseSuggesterRequest struct { + Text string `json:"text"` + Phrase interface{} `json:"phrase"` +} + +// Source generates the source for the phrase suggester. +func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) { + ps := &phraseSuggesterRequest{} + + if q.text != "" { + ps.Text = q.text + } + + suggester := make(map[string]interface{}) + ps.Phrase = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + var ctxq []interface{} + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Phase-specified parameters + if q.realWordErrorLikelihood != nil { + suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood + } + if q.confidence != nil { + suggester["confidence"] = *q.confidence + } + if q.separator != nil { + suggester["separator"] = *q.separator + } + if q.maxErrors != nil { + suggester["max_errors"] = *q.maxErrors + } + if q.gramSize != nil { + suggester["gram_size"] = *q.gramSize + } + if q.forceUnigrams != nil { + suggester["force_unigrams"] = *q.forceUnigrams + } + if q.tokenLimit != nil { + suggester["token_limit"] = *q.tokenLimit + } + if q.generators != nil && len(q.generators) > 0 { + for typ, generators := range q.generators { + var arr []interface{} + for _, g := range generators { + src, err := g.Source() + if err != nil { + return nil, err + } + arr = append(arr, src) + } + suggester[typ] = arr + } + } + if q.smoothingModel != nil { + src, err := q.smoothingModel.Source() + if err != nil { + return nil, err + } + x := make(map[string]interface{}) + x[q.smoothingModel.Type()] = src + suggester["smoothing"] = x + } + if q.preTag != nil { + hl := make(map[string]string) + hl["pre_tag"] = *q.preTag + if q.postTag != nil { + hl["post_tag"] = *q.postTag + } + suggester["highlight"] = hl + } + if q.collateQuery != nil { + collate := make(map[string]interface{}) + suggester["collate"] = collate + if q.collateQuery != nil { + collate["query"] = *q.collateQuery + } + if q.collatePreference != nil { + collate["preference"] = *q.collatePreference + } + if len(q.collateParams) > 0 { + collate["params"] = q.collateParams + } + if q.collatePrune != nil { + collate["prune"] = *q.collatePrune + } + } + + if !includeName { + return ps, nil + } + + source := make(map[string]interface{}) + source[q.name] = ps + return source, nil +} + +// -- Smoothing models -- + +type SmoothingModel interface { + Type() string + Source() (interface{}, error) +} + +// StupidBackoffSmoothingModel implements a stupid backoff smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type StupidBackoffSmoothingModel struct { + discount float64 +} + +func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel { + return &StupidBackoffSmoothingModel{ + discount: discount, + } +} + +func (sm *StupidBackoffSmoothingModel) Type() string { + return "stupid_backoff" +} + +func (sm *StupidBackoffSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["discount"] = sm.discount + return source, nil +} + +// -- + +// LaplaceSmoothingModel implements a laplace smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type LaplaceSmoothingModel struct { + alpha float64 +} + +func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel { + return &LaplaceSmoothingModel{ + alpha: alpha, + } +} + +func (sm *LaplaceSmoothingModel) Type() string { + return "laplace" +} + +func (sm *LaplaceSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["alpha"] = sm.alpha + return source, nil +} + +// -- + +// LinearInterpolationSmoothingModel implements a linear interpolation +// smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type LinearInterpolationSmoothingModel struct { + trigramLamda float64 + bigramLambda float64 + unigramLambda float64 +} + +func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel { + return &LinearInterpolationSmoothingModel{ + trigramLamda: trigramLamda, + bigramLambda: bigramLambda, + unigramLambda: unigramLambda, + } +} + +func (sm *LinearInterpolationSmoothingModel) Type() string { + return "linear_interpolation" +} + +func (sm *LinearInterpolationSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["trigram_lambda"] = sm.trigramLamda + source["bigram_lambda"] = sm.bigramLambda + source["unigram_lambda"] = sm.unigramLambda + return source, nil +} + +// -- CandidateGenerator -- + +type CandidateGenerator interface { + Type() string + Source() (interface{}, error) +} + +// DirectCandidateGenerator implements a direct candidate generator. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type DirectCandidateGenerator struct { + field string + preFilter *string + postFilter *string + suggestMode *string + accuracy *float64 + size *int + sort *string + stringDistance *string + maxEdits *int + maxInspections *int + maxTermFreq *float64 + prefixLength *int + minWordLength *int + minDocFreq *float64 +} + +func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator { + return &DirectCandidateGenerator{ + field: field, + } +} + +func (g *DirectCandidateGenerator) Type() string { + return "direct_generator" +} + +func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator { + g.field = field + return g +} + +func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator { + g.preFilter = &preFilter + return g +} + +func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator { + g.postFilter = &postFilter + return g +} + +func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator { + g.suggestMode = &suggestMode + return g +} + +func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator { + g.accuracy = &accuracy + return g +} + +func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator { + g.size = &size + return g +} + +func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator { + g.sort = &sort + return g +} + +func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator { + g.stringDistance = &stringDistance + return g +} + +func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator { + g.maxEdits = &maxEdits + return g +} + +func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator { + g.maxInspections = &maxInspections + return g +} + +func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator { + g.maxTermFreq = &maxTermFreq + return g +} + +func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator { + g.prefixLength = &prefixLength + return g +} + +func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator { + g.minWordLength = &minWordLength + return g +} + +func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator { + g.minDocFreq = &minDocFreq + return g +} + +func (g *DirectCandidateGenerator) Source() (interface{}, error) { + source := make(map[string]interface{}) + if g.field != "" { + source["field"] = g.field + } + if g.suggestMode != nil { + source["suggest_mode"] = *g.suggestMode + } + if g.accuracy != nil { + source["accuracy"] = *g.accuracy + } + if g.size != nil { + source["size"] = *g.size + } + if g.sort != nil { + source["sort"] = *g.sort + } + if g.stringDistance != nil { + source["string_distance"] = *g.stringDistance + } + if g.maxEdits != nil { + source["max_edits"] = *g.maxEdits + } + if g.maxInspections != nil { + source["max_inspections"] = *g.maxInspections + } + if g.maxTermFreq != nil { + source["max_term_freq"] = *g.maxTermFreq + } + if g.prefixLength != nil { + source["prefix_length"] = *g.prefixLength + } + if g.minWordLength != nil { + source["min_word_length"] = *g.minWordLength + } + if g.minDocFreq != nil { + source["min_doc_freq"] = *g.minDocFreq + } + if g.preFilter != nil { + source["pre_filter"] = *g.preFilter + } + if g.postFilter != nil { + source["post_filter"] = *g.postFilter + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase_test.go new file mode 100644 index 000000000..fbcc676fe --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase_test.go @@ -0,0 +1,169 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPhraseSuggesterSource(t *testing.T) { + s := NewPhraseSuggester("name"). + Text("Xor the Got-Jewel"). + Analyzer("body"). + Field("bigram"). + Size(1). + RealWordErrorLikelihood(0.95). + MaxErrors(0.5). + GramSize(2). + Highlight("", "") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPhraseSuggesterSourceWithContextQuery(t *testing.T) { + geomapQ := NewSuggesterGeoMapping("location"). + Precision("1km", "5m"). + Neighbors(true). + FieldName("pin"). + DefaultLocations(GeoPointFromLatLon(0.0, 0.0)) + + s := NewPhraseSuggester("name"). + Text("Xor the Got-Jewel"). + Analyzer("body"). + Field("bigram"). + Size(1). + RealWordErrorLikelihood(0.95). + MaxErrors(0.5). + GramSize(2). + Highlight("", ""). + ContextQuery(geomapQ) + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","context":{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}},"field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPhraseSuggesterComplexSource(t *testing.T) { + g1 := NewDirectCandidateGenerator("body"). + SuggestMode("always"). + MinWordLength(1) + + g2 := NewDirectCandidateGenerator("reverse"). + SuggestMode("always"). + MinWordLength(1). + PreFilter("reverse"). + PostFilter("reverse") + + s := NewPhraseSuggester("simple_phrase"). + Text("Xor the Got-Jewel"). + Analyzer("body"). + Field("bigram"). + Size(4). + RealWordErrorLikelihood(0.95). + Confidence(2.0). + GramSize(2). + CandidateGenerators(g1, g2). + CollateQuery(`"match":{"{{field_name}}" : "{{suggestion}}"}`). + CollateParams(map[string]interface{}{"field_name": "title"}). + CollatePreference("_primary"). + CollatePrune(true) + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"simple_phrase":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","collate":{"params":{"field_name":"title"},"preference":"_primary","prune":true,"query":"\"match\":{\"{{field_name}}\" : \"{{suggestion}}\"}"},"confidence":2,"direct_generator":[{"field":"body","min_word_length":1,"suggest_mode":"always"},{"field":"reverse","min_word_length":1,"post_filter":"reverse","pre_filter":"reverse","suggest_mode":"always"}],"field":"bigram","gram_size":2,"real_word_error_likelihood":0.95,"size":4}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPhraseStupidBackoffSmoothingModel(t *testing.T) { + s := NewStupidBackoffSmoothingModel(0.42) + src, err := s.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + // The source does NOT include the smoothing model type! + expected := `{"discount":0.42}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } + if s.Type() != "stupid_backoff" { + t.Errorf("expected %q, got: %q", "stupid_backoff", s.Type()) + } +} + +func TestPhraseLaplaceSmoothingModel(t *testing.T) { + s := NewLaplaceSmoothingModel(0.63) + src, err := s.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + // The source does NOT include the smoothing model type! + expected := `{"alpha":0.63}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } + if s.Type() != "laplace" { + t.Errorf("expected %q, got: %q", "laplace", s.Type()) + } +} + +func TestLinearInterpolationSmoothingModel(t *testing.T) { + s := NewLinearInterpolationSmoothingModel(0.3, 0.2, 0.05) + src, err := s.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + // The source does NOT include the smoothing model type! + expected := `{"bigram_lambda":0.2,"trigram_lambda":0.3,"unigram_lambda":0.05}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } + if s.Type() != "linear_interpolation" { + t.Errorf("expected %q, got: %q", "linear_interpolation", s.Type()) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_term.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_term.go new file mode 100644 index 000000000..fb5987306 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_term.go @@ -0,0 +1,233 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermSuggester suggests terms based on edit distance. +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/master/search-suggesters-term.html. +type TermSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + // fields specific to term suggester + suggestMode string + accuracy *float64 + sort string + stringDistance string + maxEdits *int + maxInspections *int + maxTermFreq *float64 + prefixLength *int + minWordLength *int + minDocFreq *float64 +} + +// NewTermSuggester creates a new TermSuggester. +func NewTermSuggester(name string) *TermSuggester { + return &TermSuggester{ + name: name, + } +} + +func (q *TermSuggester) Name() string { + return q.name +} + +func (q *TermSuggester) Text(text string) *TermSuggester { + q.text = text + return q +} + +func (q *TermSuggester) Field(field string) *TermSuggester { + q.field = field + return q +} + +func (q *TermSuggester) Analyzer(analyzer string) *TermSuggester { + q.analyzer = analyzer + return q +} + +func (q *TermSuggester) Size(size int) *TermSuggester { + q.size = &size + return q +} + +func (q *TermSuggester) ShardSize(shardSize int) *TermSuggester { + q.shardSize = &shardSize + return q +} + +func (q *TermSuggester) ContextQuery(query SuggesterContextQuery) *TermSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *TermSuggester) ContextQueries(queries ...SuggesterContextQuery) *TermSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +func (q *TermSuggester) SuggestMode(suggestMode string) *TermSuggester { + q.suggestMode = suggestMode + return q +} + +func (q *TermSuggester) Accuracy(accuracy float64) *TermSuggester { + q.accuracy = &accuracy + return q +} + +func (q *TermSuggester) Sort(sort string) *TermSuggester { + q.sort = sort + return q +} + +func (q *TermSuggester) StringDistance(stringDistance string) *TermSuggester { + q.stringDistance = stringDistance + return q +} + +func (q *TermSuggester) MaxEdits(maxEdits int) *TermSuggester { + q.maxEdits = &maxEdits + return q +} + +func (q *TermSuggester) MaxInspections(maxInspections int) *TermSuggester { + q.maxInspections = &maxInspections + return q +} + +func (q *TermSuggester) MaxTermFreq(maxTermFreq float64) *TermSuggester { + q.maxTermFreq = &maxTermFreq + return q +} + +func (q *TermSuggester) PrefixLength(prefixLength int) *TermSuggester { + q.prefixLength = &prefixLength + return q +} + +func (q *TermSuggester) MinWordLength(minWordLength int) *TermSuggester { + q.minWordLength = &minWordLength + return q +} + +func (q *TermSuggester) MinDocFreq(minDocFreq float64) *TermSuggester { + q.minDocFreq = &minDocFreq + return q +} + +// termSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the term element. +type termSuggesterRequest struct { + Text string `json:"text"` + Term interface{} `json:"term"` +} + +// Source generates the source for the term suggester. +func (q *TermSuggester) Source(includeName bool) (interface{}, error) { + // "suggest" : { + // "my-suggest-1" : { + // "text" : "the amsterdma meetpu", + // "term" : { + // "field" : "body" + // } + // }, + // "my-suggest-2" : { + // "text" : "the rottredam meetpu", + // "term" : { + // "field" : "title", + // } + // } + // } + ts := &termSuggesterRequest{} + if q.text != "" { + ts.Text = q.text + } + + suggester := make(map[string]interface{}) + ts.Term = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, len(q.contextQueries)) + for i, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq[i] = src + } + suggester["context"] = ctxq + } + + // Specific to term suggester + if q.suggestMode != "" { + suggester["suggest_mode"] = q.suggestMode + } + if q.accuracy != nil { + suggester["accuracy"] = *q.accuracy + } + if q.sort != "" { + suggester["sort"] = q.sort + } + if q.stringDistance != "" { + suggester["string_distance"] = q.stringDistance + } + if q.maxEdits != nil { + suggester["max_edits"] = *q.maxEdits + } + if q.maxInspections != nil { + suggester["max_inspections"] = *q.maxInspections + } + if q.maxTermFreq != nil { + suggester["max_term_freq"] = *q.maxTermFreq + } + if q.prefixLength != nil { + suggester["prefix_len"] = *q.prefixLength + } + if q.minWordLength != nil { + suggester["min_word_len"] = *q.minWordLength + } + if q.minDocFreq != nil { + suggester["min_doc_freq"] = *q.minDocFreq + } + + if !includeName { + return ts, nil + } + + source := make(map[string]interface{}) + source[q.name] = ts + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_term_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_term_test.go new file mode 100644 index 000000000..bb10f03e2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_term_test.go @@ -0,0 +1,29 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermSuggesterSource(t *testing.T) { + s := NewTermSuggester("name"). + Text("n"). + Field("suggest") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":{"text":"n","term":{"field":"suggest"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go b/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go new file mode 100644 index 000000000..56a26f894 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go @@ -0,0 +1,146 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// TasksCancelService can cancel long-running tasks. +// It is supported as of Elasticsearch 2.3.0. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/master/tasks-cancel.html +// for details. +type TasksCancelService struct { + client *Client + pretty bool + taskId *int64 + actions []string + nodeId []string + parentNode string + parentTask *int64 +} + +// NewTasksCancelService creates a new TasksCancelService. +func NewTasksCancelService(client *Client) *TasksCancelService { + return &TasksCancelService{ + client: client, + actions: make([]string, 0), + nodeId: make([]string, 0), + } +} + +// TaskId specifies the task to cancel. Set to -1 to cancel all tasks. +func (s *TasksCancelService) TaskId(taskId int64) *TasksCancelService { + s.taskId = &taskId + return s +} + +// Actions is a list of actions that should be cancelled. Leave empty to cancel all. +func (s *TasksCancelService) Actions(actions []string) *TasksCancelService { + s.actions = actions + return s +} + +// NodeId is a list of node IDs or names to limit the returned information; +// use `_local` to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *TasksCancelService) NodeId(nodeId []string) *TasksCancelService { + s.nodeId = nodeId + return s +} + +// ParentNode specifies to cancel tasks with specified parent node. +func (s *TasksCancelService) ParentNode(parentNode string) *TasksCancelService { + s.parentNode = parentNode + return s +} + +// ParentTask specifies to cancel tasks with specified parent task id. +// Set to -1 to cancel all. +func (s *TasksCancelService) ParentTask(parentTask int64) *TasksCancelService { + s.parentTask = &parentTask + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TasksCancelService) Pretty(pretty bool) *TasksCancelService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *TasksCancelService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if s.taskId != nil { + path, err = uritemplates.Expand("/_tasks/{task_id}/_cancel", map[string]string{ + "task_id": fmt.Sprintf("%d", *s.taskId), + }) + } else { + path = "/_tasks/_cancel" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.actions) > 0 { + params.Set("actions", strings.Join(s.actions, ",")) + } + if len(s.nodeId) > 0 { + params.Set("node_id", strings.Join(s.nodeId, ",")) + } + if s.parentNode != "" { + params.Set("parent_node", s.parentNode) + } + if s.parentTask != nil { + params.Set("parent_task", fmt.Sprintf("%v", *s.parentTask)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TasksCancelService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *TasksCancelService) Do(ctx context.Context) (*TasksListResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TasksListResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel_test.go b/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel_test.go new file mode 100644 index 000000000..c9d863394 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel_test.go @@ -0,0 +1,51 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestTasksCancelBuildURL(t *testing.T) { + client := setupTestClient(t) + + // Cancel all + got, _, err := client.TasksCancel().buildURL() + if err != nil { + t.Fatal(err) + } + want := "/_tasks/_cancel" + if got != want { + t.Errorf("want %q; got %q", want, got) + } + + // Cancel specific task + got, _, err = client.TasksCancel().TaskId(42).buildURL() + if err != nil { + t.Fatal(err) + } + want = "/_tasks/42/_cancel" + if got != want { + t.Errorf("want %q; got %q", want, got) + } +} + +/* +func TestTasksCancel(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "2.3.0" { + t.Skipf("Elasticsearch %v does not support Tasks Management API yet", esversion) + } + res, err := client.TasksCancel("1").Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("response is nil") + } +} +*/ diff --git a/vendor/gopkg.in/olivere/elastic.v5/tasks_list.go b/vendor/gopkg.in/olivere/elastic.v5/tasks_list.go new file mode 100644 index 000000000..685c031ea --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/tasks_list.go @@ -0,0 +1,215 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// TasksListService retrieves the list of currently executing tasks +// on one ore more nodes in the cluster. It is part of the Task Management API +// documented at http://www.elastic.co/guide/en/elasticsearch/reference/master/tasks-list.html. +// +// It is supported as of Elasticsearch 2.3.0. +type TasksListService struct { + client *Client + pretty bool + taskId []int64 + actions []string + detailed *bool + nodeId []string + parentNode string + parentTask *int64 + waitForCompletion *bool +} + +// NewTasksListService creates a new TasksListService. +func NewTasksListService(client *Client) *TasksListService { + return &TasksListService{ + client: client, + taskId: make([]int64, 0), + actions: make([]string, 0), + nodeId: make([]string, 0), + } +} + +// TaskId indicates to returns the task(s) with specified id(s). +func (s *TasksListService) TaskId(taskId ...int64) *TasksListService { + s.taskId = append(s.taskId, taskId...) + return s +} + +// Actions is a list of actions that should be returned. Leave empty to return all. +func (s *TasksListService) Actions(actions ...string) *TasksListService { + s.actions = append(s.actions, actions...) + return s +} + +// Detailed indicates whether to return detailed task information (default: false). +func (s *TasksListService) Detailed(detailed bool) *TasksListService { + s.detailed = &detailed + return s +} + +// NodeId is a list of node IDs or names to limit the returned information; +// use `_local` to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *TasksListService) NodeId(nodeId ...string) *TasksListService { + s.nodeId = append(s.nodeId, nodeId...) + return s +} + +// ParentNode returns tasks with specified parent node. +func (s *TasksListService) ParentNode(parentNode string) *TasksListService { + s.parentNode = parentNode + return s +} + +// ParentTask returns tasks with specified parent task id. Set to -1 to return all. +func (s *TasksListService) ParentTask(parentTask int64) *TasksListService { + s.parentTask = &parentTask + return s +} + +// WaitForCompletion indicates whether to wait for the matching tasks +// to complete (default: false). +func (s *TasksListService) WaitForCompletion(waitForCompletion bool) *TasksListService { + s.waitForCompletion = &waitForCompletion + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TasksListService) Pretty(pretty bool) *TasksListService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *TasksListService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.taskId) > 0 { + var tasks []string + for _, taskId := range s.taskId { + tasks = append(tasks, fmt.Sprintf("%d", taskId)) + } + path, err = uritemplates.Expand("/_tasks/{task_id}", map[string]string{ + "task_id": strings.Join(tasks, ","), + }) + } else { + path = "/_tasks" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.actions) > 0 { + params.Set("actions", strings.Join(s.actions, ",")) + } + if s.detailed != nil { + params.Set("detailed", fmt.Sprintf("%v", *s.detailed)) + } + if len(s.nodeId) > 0 { + params.Set("node_id", strings.Join(s.nodeId, ",")) + } + if s.parentNode != "" { + params.Set("parent_node", s.parentNode) + } + if s.parentTask != nil { + params.Set("parent_task", fmt.Sprintf("%v", *s.parentTask)) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TasksListService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *TasksListService) Do(ctx context.Context) (*TasksListResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TasksListResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// TasksListResponse is the response of TasksListService.Do. +type TasksListResponse struct { + TaskFailures []*TaskOperationFailure `json:"task_failures"` + NodeFailures []*FailedNodeException `json:"node_failures"` + // Nodes returns the tasks per node. The key is the node id. + Nodes map[string]*DiscoveryNode `json:"nodes"` +} + +type TaskOperationFailure struct { + TaskId int64 `json:"task_id"` + NodeId string `json:"node_id"` + Status string `json:"status"` + Reason *ErrorDetails `json:"reason"` +} + +type FailedNodeException struct { + *ErrorDetails + NodeId string `json:"node_id"` +} + +type DiscoveryNode struct { + Name string `json:"name"` + TransportAddress string `json:"transport_address"` + Host string `json:"host"` + IP string `json:"ip"` + Attributes map[string]interface{} `json:"attributes"` + // Tasks returns the tasks by its id (as a string). + Tasks map[string]*TaskInfo `json:"tasks"` +} + +type TaskInfo struct { + Node string `json:"node"` + Id int64 `json:"id"` // the task id + Type string `json:"type"` + Action string `json:"action"` + Status interface{} `json:"status"` + Description interface{} `json:"description"` + StartTime string `json:"start_time"` + StartTimeInMillis int64 `json:"start_time_in_millis"` + RunningTime string `json:"running_time"` + RunningTimeInNanos int64 `json:"running_time_in_nanos"` + ParentTaskId string `json:"parent_task_id"` // like "YxJnVYjwSBm_AUbzddTajQ:12356" +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/tasks_list_test.go b/vendor/gopkg.in/olivere/elastic.v5/tasks_list_test.go new file mode 100644 index 000000000..9ee80545e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/tasks_list_test.go @@ -0,0 +1,66 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestTasksListBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + TaskId []int64 + Expected string + }{ + { + []int64{}, + "/_tasks", + }, + { + []int64{42}, + "/_tasks/42", + }, + { + []int64{42, 37}, + "/_tasks/42%2C37", + }, + } + + for i, test := range tests { + path, _, err := client.TasksList().TaskId(test.TaskId...).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestTasksList(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "2.3.0" { + t.Skipf("Elasticsearch %v does not support Tasks Management API yet", esversion) + } + + res, err := client.TasksList().Pretty(true).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("response is nil") + } + if len(res.Nodes) == 0 { + t.Fatalf("expected at least 1 node; got: %d", len(res.Nodes)) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/termvectors.go b/vendor/gopkg.in/olivere/elastic.v5/termvectors.go new file mode 100644 index 000000000..244169556 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/termvectors.go @@ -0,0 +1,460 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// TermvectorsService returns information and statistics on terms in the +// fields of a particular document. The document could be stored in the +// index or artificially provided by the user. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.1/docs-termvectors.html +// for documentation. +type TermvectorsService struct { + client *Client + pretty bool + id string + index string + typ string + dfs *bool + doc interface{} + fieldStatistics *bool + fields []string + filter *TermvectorsFilterSettings + perFieldAnalyzer map[string]string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool + version interface{} + versionType string + bodyJson interface{} + bodyString string +} + +// NewTermvectorsService creates a new TermvectorsService. +func NewTermvectorsService(client *Client) *TermvectorsService { + return &TermvectorsService{ + client: client, + } +} + +// Index in which the document resides. +func (s *TermvectorsService) Index(index string) *TermvectorsService { + s.index = index + return s +} + +// Type of the document. +func (s *TermvectorsService) Type(typ string) *TermvectorsService { + s.typ = typ + return s +} + +// Id of the document. +func (s *TermvectorsService) Id(id string) *TermvectorsService { + s.id = id + return s +} + +// Dfs specifies if distributed frequencies should be returned instead +// shard frequencies. +func (s *TermvectorsService) Dfs(dfs bool) *TermvectorsService { + s.dfs = &dfs + return s +} + +// Doc is the document to analyze. +func (s *TermvectorsService) Doc(doc interface{}) *TermvectorsService { + s.doc = doc + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies +// and sum of total term frequencies should be returned. +func (s *TermvectorsService) FieldStatistics(fieldStatistics bool) *TermvectorsService { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields a list of fields to return. +func (s *TermvectorsService) Fields(fields ...string) *TermvectorsService { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +// Filter adds terms filter settings. +func (s *TermvectorsService) Filter(filter *TermvectorsFilterSettings) *TermvectorsService { + s.filter = filter + return s +} + +// PerFieldAnalyzer allows to specify a different analyzer than the one +// at the field. +func (s *TermvectorsService) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *TermvectorsService { + s.perFieldAnalyzer = perFieldAnalyzer + return s +} + +// Offsets specifies if term offsets should be returned. +func (s *TermvectorsService) Offsets(offsets bool) *TermvectorsService { + s.offsets = &offsets + return s +} + +// Parent id of documents. +func (s *TermvectorsService) Parent(parent string) *TermvectorsService { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. +func (s *TermvectorsService) Payloads(payloads bool) *TermvectorsService { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. +func (s *TermvectorsService) Positions(positions bool) *TermvectorsService { + s.positions = &positions + return s +} + +// Preference specify the node or shard the operation +// should be performed on (default: random). +func (s *TermvectorsService) Preference(preference string) *TermvectorsService { + s.preference = preference + return s +} + +// Realtime specifies if request is real-time as opposed to +// near-real-time (default: true). +func (s *TermvectorsService) Realtime(realtime bool) *TermvectorsService { + s.realtime = &realtime + return s +} + +// Routing is a specific routing value. +func (s *TermvectorsService) Routing(routing string) *TermvectorsService { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency +// should be returned. +func (s *TermvectorsService) TermStatistics(termStatistics bool) *TermvectorsService { + s.termStatistics = &termStatistics + return s +} + +// Version an explicit version number for concurrency control. +func (s *TermvectorsService) Version(version interface{}) *TermvectorsService { + s.version = version + return s +} + +// VersionType specifies a version type ("internal", "external", "external_gte", or "force"). +func (s *TermvectorsService) VersionType(versionType string) *TermvectorsService { + s.versionType = versionType + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TermvectorsService) Pretty(pretty bool) *TermvectorsService { + s.pretty = pretty + return s +} + +// BodyJson defines the body parameters. See documentation. +func (s *TermvectorsService) BodyJson(body interface{}) *TermvectorsService { + s.bodyJson = body + return s +} + +// BodyString defines the body parameters as a string. See documentation. +func (s *TermvectorsService) BodyString(body string) *TermvectorsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *TermvectorsService) buildURL() (string, url.Values, error) { + var pathParam = map[string]string{ + "index": s.index, + "type": s.typ, + } + var path string + var err error + + // Build URL + if s.id != "" { + pathParam["id"] = s.id + path, err = uritemplates.Expand("/{index}/{type}/{id}/_termvectors", pathParam) + } else { + path, err = uritemplates.Expand("/{index}/{type}/_termvectors", pathParam) + } + + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.dfs != nil { + params.Set("dfs", fmt.Sprintf("%v", *s.dfs)) + } + if s.fieldStatistics != nil { + params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics)) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.offsets != nil { + params.Set("offsets", fmt.Sprintf("%v", *s.offsets)) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.payloads != nil { + params.Set("payloads", fmt.Sprintf("%v", *s.payloads)) + } + if s.positions != nil { + params.Set("positions", fmt.Sprintf("%v", *s.positions)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.termStatistics != nil { + params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TermvectorsService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *TermvectorsService) Do(ctx context.Context) (*TermvectorsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } else { + data := make(map[string]interface{}) + if s.doc != nil { + data["doc"] = s.doc + } + if len(s.perFieldAnalyzer) > 0 { + data["per_field_analyzer"] = s.perFieldAnalyzer + } + if s.filter != nil { + src, err := s.filter.Source() + if err != nil { + return nil, err + } + data["filter"] = src + } + if len(data) > 0 { + body = data + } + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TermvectorsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Filter settings -- + +// TermvectorsFilterSettings adds additional filters to a Termsvector request. +// It allows to filter terms based on their tf-idf scores. +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.1/docs-termvectors.html#_terms_filtering +// for more information. +type TermvectorsFilterSettings struct { + maxNumTerms *int64 + minTermFreq *int64 + maxTermFreq *int64 + minDocFreq *int64 + maxDocFreq *int64 + minWordLength *int64 + maxWordLength *int64 +} + +// NewTermvectorsFilterSettings creates and initializes a new TermvectorsFilterSettings struct. +func NewTermvectorsFilterSettings() *TermvectorsFilterSettings { + return &TermvectorsFilterSettings{} +} + +// MaxNumTerms specifies the maximum number of terms the must be returned per field. +func (fs *TermvectorsFilterSettings) MaxNumTerms(value int64) *TermvectorsFilterSettings { + fs.maxNumTerms = &value + return fs +} + +// MinTermFreq ignores words with less than this frequency in the source doc. +func (fs *TermvectorsFilterSettings) MinTermFreq(value int64) *TermvectorsFilterSettings { + fs.minTermFreq = &value + return fs +} + +// MaxTermFreq ignores words with more than this frequency in the source doc. +func (fs *TermvectorsFilterSettings) MaxTermFreq(value int64) *TermvectorsFilterSettings { + fs.maxTermFreq = &value + return fs +} + +// MinDocFreq ignores terms which do not occur in at least this many docs. +func (fs *TermvectorsFilterSettings) MinDocFreq(value int64) *TermvectorsFilterSettings { + fs.minDocFreq = &value + return fs +} + +// MaxDocFreq ignores terms which occur in more than this many docs. +func (fs *TermvectorsFilterSettings) MaxDocFreq(value int64) *TermvectorsFilterSettings { + fs.maxDocFreq = &value + return fs +} + +// MinWordLength specifies the minimum word length below which words will be ignored. +func (fs *TermvectorsFilterSettings) MinWordLength(value int64) *TermvectorsFilterSettings { + fs.minWordLength = &value + return fs +} + +// MaxWordLength specifies the maximum word length above which words will be ignored. +func (fs *TermvectorsFilterSettings) MaxWordLength(value int64) *TermvectorsFilterSettings { + fs.maxWordLength = &value + return fs +} + +// Source returns JSON for the query. +func (fs *TermvectorsFilterSettings) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fs.maxNumTerms != nil { + source["max_num_terms"] = *fs.maxNumTerms + } + if fs.minTermFreq != nil { + source["min_term_freq"] = *fs.minTermFreq + } + if fs.maxTermFreq != nil { + source["max_term_freq"] = *fs.maxTermFreq + } + if fs.minDocFreq != nil { + source["min_doc_freq"] = *fs.minDocFreq + } + if fs.maxDocFreq != nil { + source["max_doc_freq"] = *fs.maxDocFreq + } + if fs.minWordLength != nil { + source["min_word_length"] = *fs.minWordLength + } + if fs.maxWordLength != nil { + source["max_word_length"] = *fs.maxWordLength + } + return source, nil +} + +// -- Response types -- + +type TokenInfo struct { + StartOffset int64 `json:"start_offset"` + EndOffset int64 `json:"end_offset"` + Position int64 `json:"position"` + Payload string `json:"payload"` +} + +type TermsInfo struct { + DocFreq int64 `json:"doc_freq"` + Score float64 `json:"score"` + TermFreq int64 `json:"term_freq"` + Ttf int64 `json:"ttf"` + Tokens []TokenInfo `json:"tokens"` +} + +type FieldStatistics struct { + DocCount int64 `json:"doc_count"` + SumDocFreq int64 `json:"sum_doc_freq"` + SumTtf int64 `json:"sum_ttf"` +} + +type TermVectorsFieldInfo struct { + FieldStatistics FieldStatistics `json:"field_statistics"` + Terms map[string]TermsInfo `json:"terms"` +} + +// TermvectorsResponse is the response of TermvectorsService.Do. +type TermvectorsResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id,omitempty"` + Version int `json:"_version"` + Found bool `json:"found"` + Took int64 `json:"took"` + TermVectors map[string]TermVectorsFieldInfo `json:"term_vectors"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/termvectors_test.go b/vendor/gopkg.in/olivere/elastic.v5/termvectors_test.go new file mode 100644 index 000000000..fb0ede146 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/termvectors_test.go @@ -0,0 +1,157 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "context" + "testing" + "time" +) + +func TestTermVectorsBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Index string + Type string + Id string + Expected string + }{ + { + "twitter", + "tweet", + "", + "/twitter/tweet/_termvectors", + }, + { + "twitter", + "tweet", + "1", + "/twitter/tweet/1/_termvectors", + }, + } + + for _, test := range tests { + builder := client.TermVectors(test.Index, test.Type) + if test.Id != "" { + builder = builder.Id(test.Id) + } + path, _, err := builder.buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestTermVectorsWithId(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Refresh("true"). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // TermVectors by specifying ID + field := "Message" + result, err := client.TermVectors(testIndexName, "tweet"). + Id("1"). + Fields(field). + FieldStatistics(true). + TermStatistics(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("expected to return information and statistics") + } + if !result.Found { + t.Errorf("expected found to be %v; got: %v", true, result.Found) + } +} + +func TestTermVectorsWithDoc(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Travis lags sometimes + if isTravis() { + time.Sleep(2 * time.Second) + } + + // TermVectors by specifying Doc + var doc = map[string]interface{}{ + "fullname": "John Doe", + "text": "twitter test test test", + } + var perFieldAnalyzer = map[string]string{ + "fullname": "keyword", + } + + result, err := client.TermVectors(testIndexName, "tweet"). + Doc(doc). + PerFieldAnalyzer(perFieldAnalyzer). + FieldStatistics(true). + TermStatistics(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("expected to return information and statistics") + } + if !result.Found { + t.Errorf("expected found to be %v; got: %v", true, result.Found) + } +} + +func TestTermVectorsWithFilter(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Travis lags sometimes + if isTravis() { + time.Sleep(2 * time.Second) + } + + // TermVectors by specifying Doc + var doc = map[string]interface{}{ + "fullname": "John Doe", + "text": "twitter test test test", + } + var perFieldAnalyzer = map[string]string{ + "fullname": "keyword", + } + + result, err := client.TermVectors(testIndexName, "tweet"). + Doc(doc). + PerFieldAnalyzer(perFieldAnalyzer). + FieldStatistics(true). + TermStatistics(true). + Filter(NewTermvectorsFilterSettings().MinTermFreq(1)). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("expected to return information and statistics") + } + if !result.Found { + t.Errorf("expected found to be %v; got: %v", true, result.Found) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/update.go b/vendor/gopkg.in/olivere/elastic.v5/update.go new file mode 100644 index 000000000..c7bb2833e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/update.go @@ -0,0 +1,293 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// UpdateService updates a document in Elasticsearch. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-update.html +// for details. +type UpdateService struct { + client *Client + index string + typ string + id string + routing string + parent string + script *Script + fields []string + version *int64 + versionType string + retryOnConflict *int + refresh string + waitForActiveShards string + upsert interface{} + scriptedUpsert *bool + docAsUpsert *bool + detectNoop *bool + doc interface{} + timeout string + pretty bool +} + +// NewUpdateService creates the service to update documents in Elasticsearch. +func NewUpdateService(client *Client) *UpdateService { + builder := &UpdateService{ + client: client, + fields: make([]string, 0), + } + return builder +} + +// Index is the name of the Elasticsearch index (required). +func (b *UpdateService) Index(name string) *UpdateService { + b.index = name + return b +} + +// Type is the type of the document (required). +func (b *UpdateService) Type(typ string) *UpdateService { + b.typ = typ + return b +} + +// Id is the identifier of the document to update (required). +func (b *UpdateService) Id(id string) *UpdateService { + b.id = id + return b +} + +// Routing specifies a specific routing value. +func (b *UpdateService) Routing(routing string) *UpdateService { + b.routing = routing + return b +} + +// Parent sets the id of the parent document. +func (b *UpdateService) Parent(parent string) *UpdateService { + b.parent = parent + return b +} + +// Script is the script definition. +func (b *UpdateService) Script(script *Script) *UpdateService { + b.script = script + return b +} + +// RetryOnConflict specifies how many times the operation should be retried +// when a conflict occurs (default: 0). +func (b *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService { + b.retryOnConflict = &retryOnConflict + return b +} + +// Fields is a list of fields to return in the response. +func (b *UpdateService) Fields(fields ...string) *UpdateService { + b.fields = make([]string, 0, len(fields)) + b.fields = append(b.fields, fields...) + return b +} + +// Version defines the explicit version number for concurrency control. +func (b *UpdateService) Version(version int64) *UpdateService { + b.version = &version + return b +} + +// VersionType is one of "internal" or "force". +func (b *UpdateService) VersionType(versionType string) *UpdateService { + b.versionType = versionType + return b +} + +// Refresh the index after performing the update. +func (b *UpdateService) Refresh(refresh string) *UpdateService { + b.refresh = refresh + return b +} + +// WaitForActiveShards sets the number of shard copies that must be active before +// proceeding with the update operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than +// or equal to the total number of copies for the shard (number of replicas + 1). +func (b *UpdateService) WaitForActiveShards(waitForActiveShards string) *UpdateService { + b.waitForActiveShards = waitForActiveShards + return b +} + +// Doc allows for updating a partial document. +func (b *UpdateService) Doc(doc interface{}) *UpdateService { + b.doc = doc + return b +} + +// Upsert can be used to index the document when it doesn't exist yet. +// Use this e.g. to initialize a document with a default value. +func (b *UpdateService) Upsert(doc interface{}) *UpdateService { + b.upsert = doc + return b +} + +// DocAsUpsert can be used to insert the document if it doesn't already exist. +func (b *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService { + b.docAsUpsert = &docAsUpsert + return b +} + +// DetectNoop will instruct Elasticsearch to check if changes will occur +// when updating via Doc. It there aren't any changes, the request will +// turn into a no-op. +func (b *UpdateService) DetectNoop(detectNoop bool) *UpdateService { + b.detectNoop = &detectNoop + return b +} + +// ScriptedUpsert should be set to true if the referenced script +// (defined in Script or ScriptId) should be called to perform an insert. +// The default is false. +func (b *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService { + b.scriptedUpsert = &scriptedUpsert + return b +} + +// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms". +func (b *UpdateService) Timeout(timeout string) *UpdateService { + b.timeout = timeout + return b +} + +// Pretty instructs to return human readable, prettified JSON. +func (b *UpdateService) Pretty(pretty bool) *UpdateService { + b.pretty = pretty + return b +} + +// url returns the URL part of the document request. +func (b *UpdateService) url() (string, url.Values, error) { + // Build url + path := "/{index}/{type}/{id}/_update" + path, err := uritemplates.Expand(path, map[string]string{ + "index": b.index, + "type": b.typ, + "id": b.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Parameters + params := make(url.Values) + if b.pretty { + params.Set("pretty", "true") + } + if b.routing != "" { + params.Set("routing", b.routing) + } + if b.parent != "" { + params.Set("parent", b.parent) + } + if b.timeout != "" { + params.Set("timeout", b.timeout) + } + if b.refresh != "" { + params.Set("refresh", b.refresh) + } + if b.waitForActiveShards != "" { + params.Set("wait_for_active_shards", b.waitForActiveShards) + } + if len(b.fields) > 0 { + params.Set("fields", strings.Join(b.fields, ",")) + } + if b.version != nil { + params.Set("version", fmt.Sprintf("%d", *b.version)) + } + if b.versionType != "" { + params.Set("version_type", b.versionType) + } + if b.retryOnConflict != nil { + params.Set("retry_on_conflict", fmt.Sprintf("%v", *b.retryOnConflict)) + } + + return path, params, nil +} + +// body returns the body part of the document request. +func (b *UpdateService) body() (interface{}, error) { + source := make(map[string]interface{}) + + if b.script != nil { + src, err := b.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + + if b.scriptedUpsert != nil { + source["scripted_upsert"] = *b.scriptedUpsert + } + + if b.upsert != nil { + source["upsert"] = b.upsert + } + + if b.doc != nil { + source["doc"] = b.doc + } + if b.docAsUpsert != nil { + source["doc_as_upsert"] = *b.docAsUpsert + } + if b.detectNoop != nil { + source["detect_noop"] = *b.detectNoop + } + + return source, nil +} + +// Do executes the update operation. +func (b *UpdateService) Do(ctx context.Context) (*UpdateResponse, error) { + path, params, err := b.url() + if err != nil { + return nil, err + } + + // Get body of the request + body, err := b.body() + if err != nil { + return nil, err + } + + // Get response + res, err := b.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(UpdateResponse) + if err := b.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// UpdateResponse is the result of updating a document in Elasticsearch. +type UpdateResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` + GetResult *GetResult `json:"get"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/update_by_query.go b/vendor/gopkg.in/olivere/elastic.v5/update_by_query.go new file mode 100644 index 000000000..a4e7bf25b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/update_by_query.go @@ -0,0 +1,651 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// UpdateByQueryService is documented at https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html. +type UpdateByQueryService struct { + client *Client + pretty bool + index []string + typ []string + script *Script + query Query + body interface{} + xSource []string + xSourceExclude []string + xSourceInclude []string + allowNoIndices *bool + analyzeWildcard *bool + analyzer string + conflicts string + defaultOperator string + docvalueFields []string + df string + expandWildcards string + explain *bool + fielddataFields []string + from *int + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + pipeline string + preference string + q string + refresh string + requestCache *bool + requestsPerSecond *int + routing []string + scroll string + scrollSize *int + searchTimeout string + searchType string + size *int + sort []string + stats []string + storedFields []string + suggestField string + suggestMode string + suggestSize *int + suggestText string + terminateAfter *int + timeout string + trackScores *bool + version *bool + versionType *bool + waitForActiveShards string + waitForCompletion *bool +} + +// NewUpdateByQueryService creates a new UpdateByQueryService. +func NewUpdateByQueryService(client *Client) *UpdateByQueryService { + return &UpdateByQueryService{ + client: client, + } +} + +// Index is a list of index names to search; use `_all` or empty string to +// perform the operation on all indices. +func (s *UpdateByQueryService) Index(index ...string) *UpdateByQueryService { + s.index = append(s.index, index...) + return s +} + +// Type is a list of document types to search; leave empty to perform +// the operation on all types. +func (s *UpdateByQueryService) Type(typ ...string) *UpdateByQueryService { + s.typ = append(s.typ, typ...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *UpdateByQueryService) Pretty(pretty bool) *UpdateByQueryService { + s.pretty = pretty + return s +} + +// Script sets an update script. +func (s *UpdateByQueryService) Script(script *Script) *UpdateByQueryService { + s.script = script + return s +} + +// Body specifies the body of the request. It overrides data being specified via +// SearchService or Script. +func (s *UpdateByQueryService) Body(body string) *UpdateByQueryService { + s.body = body + return s +} + +// XSource is true or false to return the _source field or not, +// or a list of fields to return. +func (s *UpdateByQueryService) XSource(xSource ...string) *UpdateByQueryService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// XSourceExclude represents a list of fields to exclude from the returned _source field. +func (s *UpdateByQueryService) XSourceExclude(xSourceExclude ...string) *UpdateByQueryService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// XSourceInclude represents a list of fields to extract and return from the _source field. +func (s *UpdateByQueryService) XSourceInclude(xSourceInclude ...string) *UpdateByQueryService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or when +// no indices have been specified). +func (s *UpdateByQueryService) AllowNoIndices(allowNoIndices bool) *UpdateByQueryService { + s.allowNoIndices = &allowNoIndices + return s +} + +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *UpdateByQueryService) AnalyzeWildcard(analyzeWildcard bool) *UpdateByQueryService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer specifies the analyzer to use for the query string. +func (s *UpdateByQueryService) Analyzer(analyzer string) *UpdateByQueryService { + s.analyzer = analyzer + return s +} + +// Conflicts indicates what to do when the process detects version conflicts. +// Possible values are "proceed" and "abort". +func (s *UpdateByQueryService) Conflicts(conflicts string) *UpdateByQueryService { + s.conflicts = conflicts + return s +} + +// AbortOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("abort"). +func (s *UpdateByQueryService) AbortOnVersionConflict() *UpdateByQueryService { + s.conflicts = "abort" + return s +} + +// ProceedOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("proceed"). +func (s *UpdateByQueryService) ProceedOnVersionConflict() *UpdateByQueryService { + s.conflicts = "proceed" + return s +} + +// DefaultOperator is the default operator for query string query (AND or OR). +func (s *UpdateByQueryService) DefaultOperator(defaultOperator string) *UpdateByQueryService { + s.defaultOperator = defaultOperator + return s +} + +// DF specifies the field to use as default where no field prefix is given in the query string. +func (s *UpdateByQueryService) DF(df string) *UpdateByQueryService { + s.df = df + return s +} + +// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit. +func (s *UpdateByQueryService) DocvalueFields(docvalueFields ...string) *UpdateByQueryService { + s.docvalueFields = docvalueFields + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *UpdateByQueryService) ExpandWildcards(expandWildcards string) *UpdateByQueryService { + s.expandWildcards = expandWildcards + return s +} + +// Explain specifies whether to return detailed information about score +// computation as part of a hit. +func (s *UpdateByQueryService) Explain(explain bool) *UpdateByQueryService { + s.explain = &explain + return s +} + +// FielddataFields is a list of fields to return as the field data +// representation of a field for each hit. +func (s *UpdateByQueryService) FielddataFields(fielddataFields ...string) *UpdateByQueryService { + s.fielddataFields = append(s.fielddataFields, fielddataFields...) + return s +} + +// From is the starting offset (default: 0). +func (s *UpdateByQueryService) From(from int) *UpdateByQueryService { + s.from = &from + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *UpdateByQueryService) IgnoreUnavailable(ignoreUnavailable bool) *UpdateByQueryService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *UpdateByQueryService) Lenient(lenient bool) *UpdateByQueryService { + s.lenient = &lenient + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *UpdateByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *UpdateByQueryService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// Pipeline specifies the ingest pipeline to set on index requests made by this action (default: none). +func (s *UpdateByQueryService) Pipeline(pipeline string) *UpdateByQueryService { + s.pipeline = pipeline + return s +} + +// Preference specifies the node or shard the operation should be performed on +// (default: random). +func (s *UpdateByQueryService) Preference(preference string) *UpdateByQueryService { + s.preference = preference + return s +} + +// Q specifies the query in the Lucene query string syntax. +func (s *UpdateByQueryService) Q(q string) *UpdateByQueryService { + s.q = q + return s +} + +// Query sets a query definition using the Query DSL. +func (s *UpdateByQueryService) Query(query Query) *UpdateByQueryService { + s.query = query + return s +} + +// Refresh indicates whether the effected indexes should be refreshed. +func (s *UpdateByQueryService) Refresh(refresh string) *UpdateByQueryService { + s.refresh = refresh + return s +} + +// RequestCache specifies if request cache should be used for this request +// or not, defaults to index level setting. +func (s *UpdateByQueryService) RequestCache(requestCache bool) *UpdateByQueryService { + s.requestCache = &requestCache + return s +} + +// RequestsPerSecond sets the throttle on this request in sub-requests per second. +// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. +func (s *UpdateByQueryService) RequestsPerSecond(requestsPerSecond int) *UpdateByQueryService { + s.requestsPerSecond = &requestsPerSecond + return s +} + +// Routing is a list of specific routing values. +func (s *UpdateByQueryService) Routing(routing ...string) *UpdateByQueryService { + s.routing = append(s.routing, routing...) + return s +} + +// Scroll specifies how long a consistent view of the index should be maintained +// for scrolled search. +func (s *UpdateByQueryService) Scroll(scroll string) *UpdateByQueryService { + s.scroll = scroll + return s +} + +// ScrollSize is the size on the scroll request powering the update_by_query. +func (s *UpdateByQueryService) ScrollSize(scrollSize int) *UpdateByQueryService { + s.scrollSize = &scrollSize + return s +} + +// SearchTimeout defines an explicit timeout for each search request. +// Defaults to no timeout. +func (s *UpdateByQueryService) SearchTimeout(searchTimeout string) *UpdateByQueryService { + s.searchTimeout = searchTimeout + return s +} + +// SearchType is the search operation type. Possible values are +// "query_then_fetch" and "dfs_query_then_fetch". +func (s *UpdateByQueryService) SearchType(searchType string) *UpdateByQueryService { + s.searchType = searchType + return s +} + +// Size represents the number of hits to return (default: 10). +func (s *UpdateByQueryService) Size(size int) *UpdateByQueryService { + s.size = &size + return s +} + +// Sort is a list of : pairs. +func (s *UpdateByQueryService) Sort(sort ...string) *UpdateByQueryService { + s.sort = append(s.sort, sort...) + return s +} + +// SortByField adds a sort order. +func (s *UpdateByQueryService) SortByField(field string, ascending bool) *UpdateByQueryService { + if ascending { + s.sort = append(s.sort, fmt.Sprintf("%s:asc", field)) + } else { + s.sort = append(s.sort, fmt.Sprintf("%s:desc", field)) + } + return s +} + +// Stats specifies specific tag(s) of the request for logging and statistical purposes. +func (s *UpdateByQueryService) Stats(stats ...string) *UpdateByQueryService { + s.stats = append(s.stats, stats...) + return s +} + +// StoredFields specifies the list of stored fields to return as part of a hit. +func (s *UpdateByQueryService) StoredFields(storedFields ...string) *UpdateByQueryService { + s.storedFields = storedFields + return s +} + +// SuggestField specifies which field to use for suggestions. +func (s *UpdateByQueryService) SuggestField(suggestField string) *UpdateByQueryService { + s.suggestField = suggestField + return s +} + +// SuggestMode specifies the suggest mode. Possible values are +// "missing", "popular", and "always". +func (s *UpdateByQueryService) SuggestMode(suggestMode string) *UpdateByQueryService { + s.suggestMode = suggestMode + return s +} + +// SuggestSize specifies how many suggestions to return in response. +func (s *UpdateByQueryService) SuggestSize(suggestSize int) *UpdateByQueryService { + s.suggestSize = &suggestSize + return s +} + +// SuggestText specifies the source text for which the suggestions should be returned. +func (s *UpdateByQueryService) SuggestText(suggestText string) *UpdateByQueryService { + s.suggestText = suggestText + return s +} + +// TerminateAfter indicates the maximum number of documents to collect +// for each shard, upon reaching which the query execution will terminate early. +func (s *UpdateByQueryService) TerminateAfter(terminateAfter int) *UpdateByQueryService { + s.terminateAfter = &terminateAfter + return s +} + +// Timeout is the time each individual bulk request should wait for shards +// that are unavailable. +func (s *UpdateByQueryService) Timeout(timeout string) *UpdateByQueryService { + s.timeout = timeout + return s +} + +// TimeoutInMillis sets the timeout in milliseconds. +func (s *UpdateByQueryService) TimeoutInMillis(timeoutInMillis int) *UpdateByQueryService { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) + return s +} + +// TrackScores indicates whether to calculate and return scores even if +// they are not used for sorting. +func (s *UpdateByQueryService) TrackScores(trackScores bool) *UpdateByQueryService { + s.trackScores = &trackScores + return s +} + +// Version specifies whether to return document version as part of a hit. +func (s *UpdateByQueryService) Version(version bool) *UpdateByQueryService { + s.version = &version + return s +} + +// VersionType indicates if the document increment the version number (internal) +// on hit or not (reindex). +func (s *UpdateByQueryService) VersionType(versionType bool) *UpdateByQueryService { + s.versionType = &versionType + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active before proceeding +// with the update by query operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal +// to the total number of copies for the shard (number of replicas + 1). +func (s *UpdateByQueryService) WaitForActiveShards(waitForActiveShards string) *UpdateByQueryService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// WaitForCompletion indicates if the request should block until the reindex is complete. +func (s *UpdateByQueryService) WaitForCompletion(waitForCompletion bool) *UpdateByQueryService { + s.waitForCompletion = &waitForCompletion + return s +} + +// buildURL builds the URL for the operation. +func (s *UpdateByQueryService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_update_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else { + path, err = uritemplates.Expand("/{index}/_update_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) + } + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.conflicts != "" { + params.Set("conflicts", s.conflicts) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.explain != nil { + params.Set("explain", fmt.Sprintf("%v", *s.explain)) + } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) + } + if len(s.docvalueFields) > 0 { + params.Set("docvalue_fields", strings.Join(s.docvalueFields, ",")) + } + if len(s.fielddataFields) > 0 { + params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) + } + if s.from != nil { + params.Set("from", fmt.Sprintf("%d", *s.from)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.pipeline != "" { + params.Set("pipeline", s.pipeline) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if len(s.routing) > 0 { + params.Set("routing", strings.Join(s.routing, ",")) + } + if s.scroll != "" { + params.Set("scroll", s.scroll) + } + if s.scrollSize != nil { + params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize)) + } + if s.searchTimeout != "" { + params.Set("search_timeout", s.searchTimeout) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) + } + if s.size != nil { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if len(s.sort) > 0 { + params.Set("sort", strings.Join(s.sort, ",")) + } + if len(s.stats) > 0 { + params.Set("stats", strings.Join(s.stats, ",")) + } + if s.suggestField != "" { + params.Set("suggest_field", s.suggestField) + } + if s.suggestMode != "" { + params.Set("suggest_mode", s.suggestMode) + } + if s.suggestSize != nil { + params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize)) + } + if s.suggestText != "" { + params.Set("suggest_text", s.suggestText) + } + if s.terminateAfter != nil { + params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.trackScores != nil { + params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", *s.version)) + } + if s.versionType != nil { + params.Set("version_type", fmt.Sprintf("%v", *s.versionType)) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + if s.requestsPerSecond != nil { + params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *UpdateByQueryService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// getBody returns the body part of the document request. +func (s *UpdateByQueryService) getBody() (interface{}, error) { + if s.body != nil { + return s.body, nil + } + source := make(map[string]interface{}) + if s.script != nil { + src, err := s.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } + return source, nil +} + +// Do executes the operation. +func (s *UpdateByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body, err := s.getBody() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response (BulkIndexByScrollResponse is defined in DeleteByQuery) + ret := new(BulkIndexByScrollResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/update_by_query_test.go b/vendor/gopkg.in/olivere/elastic.v5/update_by_query_test.go new file mode 100644 index 000000000..8f84758ad --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/update_by_query_test.go @@ -0,0 +1,148 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestUpdateByQueryBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Types []string + Expected string + ExpectErr bool + }{ + { + []string{}, + []string{}, + "", + true, + }, + { + []string{"index1"}, + []string{}, + "/index1/_update_by_query", + false, + }, + { + []string{"index1", "index2"}, + []string{}, + "/index1%2Cindex2/_update_by_query", + false, + }, + { + []string{}, + []string{"type1"}, + "", + true, + }, + { + []string{"index1"}, + []string{"type1"}, + "/index1/type1/_update_by_query", + false, + }, + { + []string{"index1", "index2"}, + []string{"type1", "type2"}, + "/index1%2Cindex2/type1%2Ctype2/_update_by_query", + false, + }, + } + + for i, test := range tests { + builder := client.UpdateByQuery().Index(test.Indices...).Type(test.Types...) + err := builder.Validate() + if err != nil { + if !test.ExpectErr { + t.Errorf("case #%d: %v", i+1, err) + continue + } + } else { + // err == nil + if test.ExpectErr { + t.Errorf("case #%d: expected error", i+1) + continue + } + path, _, _ := builder.buildURL() + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } + } +} + +func TestUpdateByQueryBodyWithQuery(t *testing.T) { + client := setupTestClient(t) + out, err := client.UpdateByQuery().Query(NewTermQuery("user", "olivere")).getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"query":{"term":{"user":"olivere"}}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestUpdateByQueryBodyWithQueryAndScript(t *testing.T) { + client := setupTestClient(t) + out, err := client.UpdateByQuery(). + Query(NewTermQuery("user", "olivere")). + Script(NewScriptInline("ctx._source.likes++")). + getBody() + if err != nil { + t.Fatal(err) + } + b, err := json.Marshal(out) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `{"query":{"term":{"user":"olivere"}},"script":{"inline":"ctx._source.likes++"}}` + if got != want { + t.Fatalf("\ngot %s\nwant %s", got, want) + } +} + +func TestUpdateByQuery(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "2.3.0" { + t.Skipf("Elasticsearch %v does not support update-by-query yet", esversion) + } + + sourceCount, err := client.Count(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + res, err := client.UpdateByQuery(testIndexName).ProceedOnVersionConflict().Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("response is nil") + } + if res.Updated != sourceCount { + t.Fatalf("expected %d; got: %d", sourceCount, res.Updated) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/update_test.go b/vendor/gopkg.in/olivere/elastic.v5/update_test.go new file mode 100644 index 000000000..79fe415dd --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/update_test.go @@ -0,0 +1,233 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/url" + "testing" +) + +func TestUpdateViaScript(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Script(NewScript("ctx._source.tags += tag").Params(map[string]interface{}{"tag": "blue"}).Lang("groovy")) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"inline":"ctx._source.tags += tag","lang":"groovy","params":{"tag":"blue"}}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptId(t *testing.T) { + client := setupTestClient(t) + + scriptParams := map[string]interface{}{ + "pageViewEvent": map[string]interface{}{ + "url": "foo.com/bar", + "response": 404, + "time": "2014-01-01 12:32", + }, + } + script := NewScriptId("my_web_session_summariser").Params(scriptParams) + + update := client.Update(). + Index("sessions").Type("session").Id("dh3sgudg8gsrgl"). + Script(script). + ScriptedUpsert(true). + Upsert(map[string]interface{}{}) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"id":"my_web_session_summariser","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptFile(t *testing.T) { + client := setupTestClient(t) + + scriptParams := map[string]interface{}{ + "pageViewEvent": map[string]interface{}{ + "url": "foo.com/bar", + "response": 404, + "time": "2014-01-01 12:32", + }, + } + script := NewScriptFile("update_script").Params(scriptParams) + + update := client.Update(). + Index("sessions").Type("session").Id("dh3sgudg8gsrgl"). + Script(script). + ScriptedUpsert(true). + Upsert(map[string]interface{}{}) + + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"file":"update_script","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptAndUpsert(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Script(NewScript("ctx._source.counter += count").Params(map[string]interface{}{"count": 4})). + Upsert(map[string]interface{}{"counter": 1}) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"inline":"ctx._source.counter += count","params":{"count":4}},"upsert":{"counter":1}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaDoc(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Doc(map[string]interface{}{"name": "new_name"}). + DetectNoop(true) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"detect_noop":true,"doc":{"name":"new_name"}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaDocAndUpsert(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Doc(map[string]interface{}{"name": "new_name"}). + DocAsUpsert(true). + Timeout("1s"). + Refresh("true") + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{"refresh": []string{"true"}, "timeout": []string{"1s"}} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"doc":{"name":"new_name"},"doc_as_upsert":true}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} diff --git a/vendor/github.com/nats-io/go-nats/LICENSE b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/LICENSE similarity index 94% rename from vendor/github.com/nats-io/go-nats/LICENSE rename to vendor/gopkg.in/olivere/elastic.v5/uritemplates/LICENSE index 4cfd668f2..de9c88cb6 100644 --- a/vendor/github.com/nats-io/go-nats/LICENSE +++ b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/LICENSE @@ -1,6 +1,4 @@ -The MIT License (MIT) - -Copyright (c) 2012-2016 Apcera Inc. +Copyright (c) 2013 Joshua Tacoma Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/vendor/gopkg.in/olivere/elastic.v5/uritemplates/uritemplates.go b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/uritemplates.go new file mode 100644 index 000000000..8a84813fe --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/uritemplates.go @@ -0,0 +1,359 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// +// To use uritemplates, parse a template string and expand it with a value +// map: +// +// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") +// values := make(map[string]interface{}) +// values["user"] = "jtacoma" +// values["repo"] = "uritemplates" +// expanded, _ := template.ExpandString(values) +// fmt.Printf(expanded) +// +package uritemplates + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) (escaped string) { + if allowReserved { + escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return escaped +} + +// A UriTemplate is a parsed representation of a URI template. +type UriTemplate struct { + raw string + parts []templatePart +} + +// Parse parses a URI template string into a UriTemplate object. +func Parse(rawtemplate string) (template *UriTemplate, err error) { + template = new(UriTemplate) + template.raw = rawtemplate + split := strings.Split(rawtemplate, "{") + template.parts = make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + err = errors.New("unexpected }") + break + } + template.parts[i].raw = s + } else { + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + err = errors.New("malformed template") + break + } + expression := subsplit[0] + template.parts[i*2-1], err = parseExpression(expression) + if err != nil { + break + } + template.parts[i*2].raw = subsplit[1] + } + } + if err != nil { + template = nil + } + return template, err +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (self *UriTemplate) Expand(value interface{}) (string, error) { + values, ismap := value.(map[string]interface{}) + if !ismap { + if m, ismap := struct2map(value); !ismap { + return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") + } else { + return self.Expand(m) + } + } + var buf bytes.Buffer + for _, p := range self.parts { + err := p.expand(&buf, values) + if err != nil { + return "", err + } + } + return buf.String(), nil +} + +func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { + if len(self.raw) > 0 { + buf.WriteString(self.raw) + return nil + } + var zeroLen = buf.Len() + buf.WriteString(self.first) + var firstLen = buf.Len() + for _, term := range self.terms { + value, exists := values[term.name] + if !exists { + continue + } + if buf.Len() != firstLen { + buf.WriteString(self.sep) + } + switch v := value.(type) { + case string: + self.expandString(buf, term, v) + case []interface{}: + self.expandArray(buf, term, v) + case map[string]interface{}: + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, v) + default: + if m, ismap := struct2map(value); ismap { + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, m) + } else { + str := fmt.Sprintf("%v", value) + self.expandString(buf, term, str) + } + } + } + if buf.Len() == firstLen { + original := buf.Bytes()[:zeroLen] + buf.Reset() + buf.Write(original) + } + return nil +} + +func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if self.named { + buf.WriteString(name) + if empty { + buf.WriteString(self.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + self.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, self.allowReserved)) +} + +func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { + if len(a) == 0 { + return + } else if !t.explode { + self.expandName(buf, t.name, false) + } + for i, value := range a { + if t.explode && i > 0 { + buf.WriteString(self.sep) + } else if i > 0 { + buf.WriteString(",") + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + if self.named && t.explode { + self.expandName(buf, t.name, len(s) == 0) + } + buf.WriteString(escape(s, self.allowReserved)) + } +} + +func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { + if len(m) == 0 { + return + } + if !t.explode { + self.expandName(buf, t.name, len(m) == 0) + } + var firstLen = buf.Len() + for k, value := range m { + if firstLen != buf.Len() { + if t.explode { + buf.WriteString(self.sep) + } else { + buf.WriteString(",") + } + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if t.explode { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune('=') + buf.WriteString(escape(s, self.allowReserved)) + } else { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune(',') + buf.WriteString(escape(s, self.allowReserved)) + } + } +} + +func struct2map(v interface{}) (map[string]interface{}, bool) { + value := reflect.ValueOf(v) + switch value.Type().Kind() { + case reflect.Ptr: + return struct2map(value.Elem().Interface()) + case reflect.Struct: + m := make(map[string]interface{}) + for i := 0; i < value.NumField(); i++ { + tag := value.Type().Field(i).Tag + var name string + if strings.Contains(string(tag), ":") { + name = tag.Get("uri") + } else { + name = strings.TrimSpace(string(tag)) + } + if len(name) == 0 { + name = value.Type().Field(i).Name + } + m[name] = value.Field(i).Interface() + } + return m, true + } + return nil, false +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils.go b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils.go new file mode 100644 index 000000000..399ef4623 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils.go @@ -0,0 +1,13 @@ +package uritemplates + +func Expand(path string, expansions map[string]string) (string, error) { + template, err := Parse(path) + if err != nil { + return "", err + } + values := make(map[string]interface{}) + for k, v := range expansions { + values[k] = v + } + return template.Expand(values) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils_test.go b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils_test.go new file mode 100644 index 000000000..633949b6f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils_test.go @@ -0,0 +1,105 @@ +package uritemplates + +import ( + "testing" +) + +type ExpandTest struct { + in string + expansions map[string]string + want string +} + +var expandTests = []ExpandTest{ + // #0: no expansions + { + "http://www.golang.org/", + map[string]string{}, + "http://www.golang.org/", + }, + // #1: one expansion, no escaping + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red", + }, + "http://www.golang.org/red/delete", + }, + // #2: one expansion, with hex escapes + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red/blue", + }, + "http://www.golang.org/red%2Fblue/delete", + }, + // #3: one expansion, with space + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red or blue", + }, + "http://www.golang.org/red%20or%20blue/delete", + }, + // #4: expansion not found + { + "http://www.golang.org/{object}/delete", + map[string]string{ + "bucket": "red or blue", + }, + "http://www.golang.org//delete", + }, + // #5: multiple expansions + { + "http://www.golang.org/{one}/{two}/{three}/get", + map[string]string{ + "one": "ONE", + "two": "TWO", + "three": "THREE", + }, + "http://www.golang.org/ONE/TWO/THREE/get", + }, + // #6: utf-8 characters + { + "http://www.golang.org/{bucket}/get", + map[string]string{ + "bucket": "£100", + }, + "http://www.golang.org/%C2%A3100/get", + }, + // #7: punctuations + { + "http://www.golang.org/{bucket}/get", + map[string]string{ + "bucket": `/\@:,.*~`, + }, + "http://www.golang.org/%2F%5C%40%3A%2C.%2A~/get", + }, + // #8: mis-matched brackets + { + "http://www.golang.org/{bucket/get", + map[string]string{ + "bucket": "red", + }, + "", + }, + // #9: "+" prefix for suppressing escape + // See also: http://tools.ietf.org/html/rfc6570#section-3.2.3 + { + "http://www.golang.org/{+topic}", + map[string]string{ + "topic": "/topics/myproject/mytopic", + }, + // The double slashes here look weird, but it's intentional + "http://www.golang.org//topics/myproject/mytopic", + }, +} + +func TestExpand(t *testing.T) { + for i, test := range expandTests { + got, _ := Expand(test.in, test.expansions) + if got != test.want { + t.Errorf("got %q expected %q in test %d", got, test.want, i) + } + } +}