diff --git a/.clabot b/.clabot
new file mode 100644
index 000000000..79c7a7670
--- /dev/null
+++ b/.clabot
@@ -0,0 +1,19 @@
+{
+ "message": "Thank you for your interest in contributing to the Harvest project! We require contributors to sign our [Corporate contributor license agreement (CCLA)](https://github.com/NetApp/harvest/blob/main/CONTRIBUTING.md#creating-a-pull-request), and we don\"t have the user(s) {{usersWithoutCLA}} on file. In order for us to review and merge your code, please follow the instructions in step 6 of [creating a pull request](https://github.com/NetApp/harvest/blob/main/CONTRIBUTING.md#creating-a-pull-request). \nAfter signing the CCLA, you can ask us to recheck this PR by posting `@cla-bot check` as a comment to the PR.",
+ "label": "cla-signed",
+ "contributors": [
+ "$$$ Harvest Dev $$$",
+ "ybizeul",
+ "cgrinds",
+ "georgmey",
+ "schmots1",
+ "vgratian",
+ "rahulguptajss",
+ "mrydeen",
+ "ruanruijuan",
+ "hardikl",
+ "sridevimm",
+ "$$$ External $$$",
+ "chrishenzie"
+ ]
+}
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index e4bdefc21..8510647d1 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -2,7 +2,7 @@
name: Bug report
about: Create a report to help us improve
title: ''
-labels: needs-triage
+labels: status/needs-triage
assignees: ''
---
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 000000000..eb93b2d55
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,71 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ main ]
+ schedule:
+ - cron: '36 11 * * 6'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'go' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
+ # Learn more:
+ # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v1
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v1
+
+ # âšī¸ Command-line programs to run using the OS shell.
+ # đ https://git.io/JvXDl
+
+ # âī¸ If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v1
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
index 380372a69..cd594af2a 100644
--- a/.github/workflows/go.yml
+++ b/.github/workflows/go.yml
@@ -1,10 +1,14 @@
-name: Build and Test
+name: Build, Test, License
on:
push:
- branches: [ main ]
+ branches:
+ - main
+ - 'release/**'
pull_request:
- branches: [ main ]
+ branches:
+ - main
+ - 'release/**'
jobs:
@@ -27,3 +31,23 @@ jobs:
- name: Check code formatting using gofmt
uses: Jerome1337/gofmt-action@v1.0.4
+ license_check:
+ name: License check
+ if: '!github.event.deleted'
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Install Go
+ uses: actions/setup-go@v2
+ with:
+ go-version: 1.15
+ - name: Install wwhrd
+ env:
+ GO111MODULE: 'off'
+ run: go get -u github.com/frapposelli/wwhrd
+ - name: go mod vendor
+ env:
+ GO111MODULE: 'on'
+ run: go mod vendor
+ - name: wwhrd check
+ run: wwhrd check
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
deleted file mode 100644
index fe4e25c6a..000000000
--- a/.github/workflows/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Detect poorly formatted Go code
-
-name: Check code formatting using gofmt
-
-# Controls when the action will run.
-on:
- # Triggers the workflow on push or pull request events but only for the main branch
- push:
- branches: [ main ]
- pull_request:
- branches: [ main ]
-
- # Allows you to run this workflow manually from the Actions tab
- workflow_dispatch:
-
-# A workflow run is made up of one or more jobs that can run sequentially or in parallel
-jobs:
- # This workflow contains a single job called "build"
- build:
- # The type of runner that the job will run on
- runs-on: ubuntu-latest
-
- # Steps represent a sequence of tasks that will be executed as part of the job
- steps:
- # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- - uses: actions/checkout@v2
-
- - name: Check code formatting using gofmt
- uses: Jerome1337/gofmt-action@v1.0.4
diff --git a/.gitignore b/.gitignore
index ae158c49a..9376c15a2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,4 +17,5 @@
# build directories
bin/
-dist/
\ No newline at end of file
+dist/
+cert/
\ No newline at end of file
diff --git a/.wwhrd.yml b/.wwhrd.yml
new file mode 100644
index 000000000..3b7e54ee2
--- /dev/null
+++ b/.wwhrd.yml
@@ -0,0 +1,9 @@
+# https://github.com/frapposelli/wwhrd
+# Check vendored licenses in your Go project
+---
+allowlist:
+ - Apache-2.0
+ - MIT
+ - BSD-2-Clause
+ - BSD-3-Clause
+ - MPL-2.0
diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md
index 53cd3f349..9a2c38d1b 100644
--- a/ARCHITECTURE.md
+++ b/ARCHITECTURE.md
@@ -2,7 +2,7 @@
This document describes the high-level architecture of Harvest. If you want to familiarize yourself with the code base, you're in the right place!
-Harvest has a strong emphasize modular design, the core code-base is isolated from the code-base of secondary components - the philosophy is: adding new collectors, exporters or plugins should be simple.
+Harvest has a strong emphasis on modular design, the core code-base is isolated from the code-base of secondary components - the philosophy is: adding new collectors, exporters or plugins should be simple.
## Bird's Eye View
@@ -10,7 +10,7 @@ Harvest consists of several processes/packages. All, except Poller, are short-li
-* **harvest**: the main executable and entry-point for the user, it's task is mainly to trigger the other processes
+* **harvest**: the main executable and entry-point for the user, its task is mainly to trigger the other processes
* **manager**: starts, stops and shows the status of pollers
* **poller**: daemon process that polls a target system
* **config**: helps to validate and configure harvest
@@ -44,7 +44,7 @@ One of the tasks of the Poller is to parse CLI flags and configuration files and
For exporters, *Params*, is the exact parameters of the exporter as defined in `harvest.yml`. For collectors, *Params*, is a top-down merge of:
* poller parameters from `harvest.yml` (can include `addr`, `auth_style`, etc.)
-* collector default template (can include poll frequency, list of counters, etc)
+* collector default template (can include poll frequency, list of counters, etc.)
* collector custom template (same)
Since the Poller is agnostic about the system collectors will poll, it is the user's (and developer's) responsibility to make sure required parameters are available in their right place.
@@ -53,7 +53,7 @@ Since the Poller is agnostic about the system collectors will poll, it is the us
Collectors are responsible for collecting metrics from a data source and writing them into a Matrix instance.
-Collectors are "object-oriented", which means that metrics are grouped together by the logical unit that they describe (such as volume, node, process, file). If there are more than one objects defined for a collector, then for each object a new instance of the collector will be created (example of such "multi-object" collectors are [Zapi](cmd/collectors/zapi/) and [ZapiPerf](cmd/collectors/zapiperf/). This means that the user only needs to add a new template file if they want to collector a new object.
+Collectors are "object-oriented", which means that metrics are grouped together by the logical unit that they describe (such as volume, node, process, file). If there are more than one objects defined for a collector, then for each object a new instance of the collector will be created (example of such "multi-object" collectors are [Zapi](cmd/collectors/zapi/) and [ZapiPerf](cmd/collectors/zapiperf/). This means that the user only needs to add a new template file if they want to collect a new object.
Most of the auxiliary jobs that a collector needs to do (such as initializing, running on scheduled time, reporting status to Poller, updating metadata and handling errors) are implemented by the AbstractCollector. Writing a new collector, most of the times, only requires implementing the `PollData()` method.
@@ -82,7 +82,7 @@ This package is in a semi-frozen, stable state and will not change (much) in the
### Tree
-The Tree data structure ([*node.Node](pkg/tree/node/node.go)) is used for unstructured and untyped data. It provides read/write methods that are independent of the underlaying data format (`xml`, `yaml`, `json`). It is mainly used for API calls and for storing configuration files.
+The Tree data structure ([*node.Node](pkg/tree/node/node.go)) is used for unstructured and untyped data. It provides read/write methods that are independent of the underlying data format (`xml`, `yaml`, `json`). It is mainly used for API calls and for storing configuration files.
Often collectors will receive an XML from their target system, parse it into a Tree, then extract meaningful information and write it into the Matrix.
@@ -93,7 +93,7 @@ Unlike, the Matrix, this package is not in a stable state and will likely need a
This section describes the directories of the project and how source files are organized:
### `/`
-The root directories contains scripts for building Harvest:
+The root directories contain scripts for building Harvest:
* `MakeFile` - script for building and installing Harvest
* `package` - script for building distribution packages (uses the subdirectories `deb/` and `rpm/`)
* `harvest.yml` - main configuration file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7fe881604..feccb26f8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,16 +1,49 @@
# Change Log
[Releases](https://github.com/NetApp/harvest/releases)
+
+## 21.05.2 / 2021-06-14
+
+This release adds support for user-defined URLs for InfluxDB exporter, a new command to validate your `harvest.yml` file, improved logging, panic handling, and collector documentation. We also enabled GitHub security code scanning for the Harvest repo to catch issues sooner. These scans happen on every push.
+
+There are also several quality-of-life bug fixes listed below.
+
+### Fixes
+- Handle special characters in cluster credentials [#79](https://github.com/NetApp/harvest/pull/79)
+- TLS server verification works with basic auth [#51](https://github.com/NetApp/harvest/issues/51)
+- Collect metrics from all disk shelves instead of one [#75](https://github.com/NetApp/harvest/issues/75)
+- Disk serial number and is-failed are missing from cdot query [#60](https://github.com/NetApp/harvest/issues/60)
+- Ensure collectors and pollers recover from panics [#105](https://github.com/NetApp/harvest/issues/105)
+- Cluster status is initially reported, but then stops being reported [#66](https://github.com/NetApp/harvest/issues/66)
+- Performance metrics don't display volume names [#40](https://github.com/NetApp/harvest/issues/40)
+- Allow insecure Grafana TLS connections `--insecure` and honor requested transport. See `harvest grafana --help` for details [#111](https://github.com/NetApp/harvest/issues/111)
+- Prometheus dashboards don't load when `exemplar` is true. Thanks to @sevenval-admins, @florianmulatz, and @unbreakabl3 for their help tracking this down and suggesting a fix. [#96](https://github.com/NetApp/harvest/issues/96)
+- `harvest stop` does not stop pollers that have been renamed [#20](https://github.com/NetApp/harvest/issues/20)
+- Harvest stops working after reboot on rpm/deb [#50](https://github.com/NetApp/harvest/issues/50)
+- `harvest start` shall start as harvest user in rpm/deb [#129](https://github.com/NetApp/harvest/issues/129)
+- `harvest start` detects stale pidfiles and makes start idempotent [#123](https://github.com/NetApp/harvest/issues/123)
+- Don't include unknown metrics when talking with older versions of ONTAP [#116](https://github.com/NetApp/harvest/issues/116)
+### Enhancements
+- InfluxDB exporter supports [user-defined URLs](https://github.com/NetApp/harvest/blob/main/cmd/exporters/influxdb/README.md#parameters)
+- Add workload counters to ZapiPerf [#9](https://github.com/NetApp/harvest/issues/9)
+- Add new command to validate `harvest.yml` file and optionally redact sensitive information [#16](https://github.com/NetApp/harvest/issues/16) e.g. `harvest doctor --config ./harvest.yml`
+- Improve documentation for [Unix](https://github.com/NetApp/harvest/tree/main/cmd/collectors/unix), [Zapi](https://github.com/NetApp/harvest/tree/main/cmd/collectors/zapi), and [ZapiPerf](https://github.com/NetApp/harvest/tree/main/cmd/collectors/zapiperf) collectors
+- Add Zerolog framework for structured logging [#61](https://github.com/NetApp/harvest/issues/61)
+- Vendor 3rd party code to increase reliability and make it easier to build in air-gapped environments [#26](https://github.com/NetApp/harvest/pull/26)
+- Make contributing easier with a digital CCLA instead of 1970's era PDF :)
+- Enable GitHub security code scanning
+- InfluxDB exporter provides the option to pass the URL end-point unchanged. Thanks to @steverweber for their suggestion and validation. [#63](https://github.com/NetApp/harvest/issues/63)
+
## 21.05.1 / 2021-05-20
-Announcing the release of Harvest2. With this release the core of Harvest has been completely rewritten in Go. Harvest2 is a replacement for the older versions of Harvest 1.6 and below.
+Announcing the release of Harvest2. With this release the core of Harvest has been completely rewritten in Go. Harvest2 is a replacement for the older versions of Harvest 1.6 and below.
-If you're using one of the Harvest 2.x release candidates, you can do a direct upgrade.
+If you're using one of the Harvest 2.x release candidates, you can do a direct upgrade.
Going forward Harvest2 will follow a `year.month.fix` release naming convention with the first release being 21.05.0. See [SUPPORT.md](SUPPORT.md) for details.
**IMPORTANT** v21.05 increased Harvest's out-of-the-box security posture - self-signed certificates are rejected by default. You have two options:
-
+
1. [Setup client certificates for each cluster](https://github.com/NetApp/harvest-private/blob/main/cmd/collectors/zapi/README.md)
2. Disable the TLS check in Harvest. To disable, you need to edit `harvest.yml` and add `use_insecure_tls=true` to each poller or add it to the `Defaults` section. Doing so tells Harvest to ignore invalid TLS certificates.
@@ -33,8 +66,17 @@ Changes since rc2
- RPM install should create required directories
- Collector now warns if it falls behind schedule
- package.sh fails without internet connection
-- Version flag is missing new line on some shells [#4](https://github.com/NetApp/harvest/issues/4)
+- Version flag is missing new line on some shells [#4](https://github.com/NetApp/harvest/issues/4)
- Poller should not ignore --config [#28](https://github.com/NetApp/harvest/issues/28)
+- Handle special characters in cluster credentials [#79](https://github.com/NetApp/harvest/pull/79)
+- TLS server verification works with basic auth [#51](https://github.com/NetApp/harvest/issues/51)
+- Collect metrics from all disk shelves instead of one [#75](https://github.com/NetApp/harvest/issues/75)
+- Disk serial number and is-failed are missing from cdot query [#60](https://github.com/NetApp/harvest/issues/60)
+- Ensure collectors and pollers recover from panics [#105](https://github.com/NetApp/harvest/issues/105)
+- Cluster status is initially reported, but then stops being reported [#66](https://github.com/NetApp/harvest/issues/66)
+- Performance metrics don't display volume names [#40](https://github.com/NetApp/harvest/issues/40)
+- Allow insecure Grafana TLS connections `--insecure` and honor requested transport. See `harvest grafana --help` for details [#111](https://github.com/NetApp/harvest/issues/111)
+- Prometheus dashboards don't load when `exemplar` is true. Thanks to @sevenval-admins, @florianmulatz, and @unbreakabl3 for their help tracking this down and suggesting a fix. [#96](https://github.com/NetApp/harvest/issues/96)
### Enhancements
- Add new exporter for InfluxDB
@@ -46,6 +88,14 @@ Changes since rc2
- Add per-poller Prometheus end-point support with `promPort`
- The release, commit and build date information are baked into the release executables
- You can pick a subset of pollers to manage by passing the name of the poller to harvest. e.g. `harvest start|stop|restart POLLERS`
+- InfluxDB exporter supports [user-defined URLs](https://github.com/NetApp/harvest/blob/main/cmd/exporters/influxdb/README.md#parameters)
+- Add workload counters to ZapiPerf [#9](https://github.com/NetApp/harvest/issues/9)
+- Add new command to validate `harvest.yml` file and optionally redact sensitive information [#16](https://github.com/NetApp/harvest/issues/16) e.g. `harvest doctor --config ./harvest.yml`
+- Improve documentation for [Unix](https://github.com/NetApp/harvest/tree/main/cmd/collectors/unix), [Zapi](https://github.com/NetApp/harvest/tree/main/cmd/collectors/zapi), and [ZapiPerf](https://github.com/NetApp/harvest/tree/main/cmd/collectors/zapiperf) collectors
+- Add Zerolog framework for structured logging [#61](https://github.com/NetApp/harvest/issues/61)
+- Vendor 3rd party code to increase reliability and make it easier to build in air-gapped environments [#26](https://github.com/NetApp/harvest/pull/26)
+- Make contributing easier with a digital CCLA instead of 1970's era PDF :)
+- Enable GitHub security code scanning
## rc2
@@ -66,7 +116,7 @@ Changes since rc2
- [New plugin architecture](cmd/poller/plugin/README.md) - creating new plugins is easier and existing plugins made more generic
- You can use built-in plugins by adding rules to a collector's template. RC2 includes two built-in plugins:
- **Aggregator**: Aggregates metrics for a given label, e.g. volume data can be used to create an aggregation at the node or SVM-level
- - **LabelAgent**: Defines rules for rewriting instance labels, creating new labels or create ignore-lists based on regular expressions
+ - **LabelAgent**: Defines rules for rewriting instance labels, creating new labels or create ignore-lists based on regular expressions
## rc1
**IMPORTANT** Harvest has been rewritten in Go
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 2d05dac4d..54fec6110 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -10,9 +10,8 @@ We appreciate that you want to take the time to contribute! Please follow these
3. Let us know in the issue that you plan on creating a pull request for it. This helps us to keep track of the pull request and make sure there isn't duplicate effort.
4. Before creating a pull request, write up a brief proposal in the issue describing what your change would be and how it would work so that others can comment.
5. It's better to wait for feedback from someone on NetApp's Harvest development team before writing code. We don't have an SLA for our feedback, but we will do our best to respond in a timely manner (at a minimum, to give you an idea if you're on the right track and that you should proceed, or not).
-6. Sign NetApp's Contributor License Agreement. You must sign the [Corporate Contributor License Agreement (CCLA)](https://github.com/NetApp/Harvest/blob/master/CONTRIBUTING_CCLA.pdf) in order to contribute.
-7. Please send the signed document to .
-8. If you've made it this far, have written the code that solves your issue, and addressed the review comments, then feel free to create your pull request.
+6. Sign and submit NetApp's Contributor License Agreement. You must sign and submit the [Corporate Contributor License Agreement (CCLA)](https://netapp.na1.echosign.com/public/esignWidget?wid=CBFCIBAA3AAABLblqZhBN1IcwqxqFOpZRSYcKO8V4nppnNJaH2QnbKIaJXBcoBFoGiQXAUjSamaZAvzBJOUM*) in order to contribute.
+7. If you've made it this far, have written the code that solves your issue, and addressed the review comments, then feel free to create your pull request.
Important: **NetApp will NOT look at the PR or any of the code submitted in the PR if the CCLA is not on file with NetApp Legal.**
diff --git a/CONTRIBUTING_CCLA.pdf b/CONTRIBUTING_CCLA.pdf
deleted file mode 100644
index c9909add5..000000000
Binary files a/CONTRIBUTING_CCLA.pdf and /dev/null differ
diff --git a/Makefile b/Makefile
index 6237ae91f..fad5fe3e0 100644
--- a/Makefile
+++ b/Makefile
@@ -20,8 +20,6 @@ BUILD_DATE := `date +%FT%T%z`
LD_FLAGS := "-X 'goharvest2/cmd/harvest/version.VERSION=$(VERSION)' -X 'goharvest2/cmd/harvest/version.Release=$(RELEASE)' -X 'goharvest2/cmd/harvest/version.Commit=$(COMMIT)' -X 'goharvest2/cmd/harvest/version.BuildDate=$(BUILD_DATE)'"
GOARCH ?= amd64
GOOS ?= linux
-COLLECTORS := $(shell ls cmd/collectors)
-EXPORTERS := $(shell ls cmd/exporters)
HARVEST_PACKAGE := harvest-${VERSION}-${RELEASE}_${GOOS}_${GOARCH}
DIST := dist
TMP := /tmp/${HARVEST_PACKAGE}
@@ -72,10 +70,10 @@ fmt: ## format the go source files
go fmt ./...
vet: ## run go vet on the source files
- @echo "Running govet"
+ @echo "Running go vet"
go vet ./...
-build: clean deps fmt harvest collectors exporters ## Build the project
+build: clean deps fmt harvest ## Build the project
package: clean deps build test dist-tar ## Package Harvest binary
@@ -84,61 +82,23 @@ all: package ## Build, Test, Package
harvest: deps
@# Build the harvest cli
@echo "Building harvest"
- @GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o bin/harvest -ldflags=$(LD_FLAGS) cmd/harvest/harvest.go
+ @GOOS=$(GOOS) GOARCH=$(GOARCH) go build -trimpath -o bin/harvest -ldflags=$(LD_FLAGS) cmd/harvest/harvest.go
@# Build the harvest poller
@echo "Building poller"
- @GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o bin/poller -ldflags=$(LD_FLAGS) cmd/poller/poller.go
+ @GOOS=$(GOOS) GOARCH=$(GOARCH) go build -trimpath -o bin/poller -ldflags=$(LD_FLAGS) cmd/poller/poller.go
- @# Build the daemonizer for the pollers
- @echo "Building daemonizer"
+ @# Build the daemonize for the pollers
+ @echo "Building daemonize"
@cd cmd/tools/daemonize; gcc daemonize.c -o ../../../bin/daemonize
@# Build the zapi tool
@echo "Building zapi tool"
- @GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o bin/zapi -ldflags=$(LD_FLAGS) cmd/tools/zapi/main/main.go
+ @GOOS=$(GOOS) GOARCH=$(GOARCH) go build -trimpath -o bin/zapi -ldflags=$(LD_FLAGS) cmd/tools/zapi/main/main.go
@# Build the grafana tool
@echo "Building grafana tool"
- @GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o bin/grafana -ldflags=$(LD_FLAGS) cmd/tools/grafana/main/main.go
-
-###############################################################################
-# Collectors
-###############################################################################
-collectors: deps
- @echo "Building collectors:"
- @for collector in ${COLLECTORS}; do \
- cd cmd/collectors/$${collector}; \
- echo " Building $${collector}"; \
- GOOS=$(GOOS) GOARCH=$(GOARCH) go build -ldflags=$(LD_FLAGS) -buildmode=plugin -o ../../../bin/collectors/"$${collector}".so; \
- if [ -d plugins ]; then \
- echo " Building plugins for $${collector}"; \
- cd plugins; \
- for plugin in `ls`; do \
- echo " Building: $${plugin}"; \
- cd $${plugin}; \
- GOOS=$(GOOS) GOARCH=$(GOARCH) go build -ldflags=$(LD_FLAGS) -buildmode=plugin -o ../../../../../bin/plugins/"$${collector}"/"$${plugin}".so; \
- cd ../; \
- done; \
- cd ../../../../; \
- else \
- cd - > /dev/null; \
- fi; \
- done
-
-###############################################################################
-# Exporters
-###############################################################################
-exporters: deps
- @echo "Building exporters:"
- @for exporter in ${EXPORTERS}; do \
- cd cmd/exporters/$${exporter}; \
- echo " Building $${exporter}"; \
- GOOS=$(GOOS) GOARCH=$(GOARCH) go build -ldflags=$(LD_FLAGS) -buildmode=plugin -o ../../../bin/exporters/"$${exporter}".so; \
- cd - > /dev/null; \
- done
-
-
+ @GOOS=$(GOOS) GOARCH=$(GOARCH) go build -trimpath -o bin/grafana -ldflags=$(LD_FLAGS) cmd/tools/grafana/main/main.go
###############################################################################
# Build tar gz distribution
@@ -150,7 +110,7 @@ dist-tar:
@mkdir ${TMP}
@mkdir ${DIST}
@cp -a bin conf docs grafana README.md LICENSE ${TMP}
- @cp -a harvest.example.yml ${TMP}/harvest.yml
+ @cp -a harvest.yml ${TMP}/harvest.yml
@tar --directory /tmp --create --gzip --file ${DIST}/${HARVEST_PACKAGE}.tar.gz ${HARVEST_PACKAGE}
@rm -rf ${TMP}
@echo "tar artifact @" ${DIST}/${HARVEST_PACKAGE}.tar.gz
diff --git a/NOTICE b/NOTICE
index 410c218b1..66777d9a5 100644
--- a/NOTICE
+++ b/NOTICE
@@ -16,4 +16,20 @@ Mozilla Public License 2.0
github.com/spf13/cobra
https://github.com/spf13/cobra v1.1.3
-Apache License 2.0
\ No newline at end of file
+Apache License 2.0
+
+github.com/rs/zerolog
+https://github.com/rs/zerolog v1.22.0
+MIT License
+
+gopkg.in/natefinch/lumberjack.v2
+https://github.com/natefinch/lumberjack v2.0.0
+MIT License
+
+gopkg.in/yaml.v3
+https://github.com/go-yaml/yaml v3.0.0-20210107192922-496545a6307b
+Apache License 2.0
+
+github.com/pkg/errors
+https://github.com/pkg/errors v0.9.1
+BSD 2-Clause "Simplified" License
\ No newline at end of file
diff --git a/README.md b/README.md
index 624f88acb..a039290a1 100644
--- a/README.md
+++ b/README.md
@@ -31,12 +31,14 @@ We provide pre-compiled binaries for Linux, RPMs, and Debs.
## Pre-compiled Binaries
-Visit the [Releases page](https://github.com/NetApp/harvest/releases) and copy the `tar.gz` link you want to download. For example, to download the `v21.05.1` release:
+### Installation
+Visit the [Releases page](https://github.com/NetApp/harvest/releases) and copy the `tar.gz` link you want to download. For example, to download the `v21.05.2` release:
```
-wget https://github.com/NetApp/harvest/releases/download/v21.05.1/harvest-21.05.1-1.tar.gz
-tar -xvf harvest-21.05.1-1.tar.gz
-cd harvest-21.05.1-1
+RELEASE=harvest-21.05.2-1
+wget https://github.com/NetApp/harvest/releases/latest/download/$RELEASE.tar.gz
+tar -xvf $RELEASE.tar.gz
+cd $RELEASE
# Run Harvest with the default unix localhost collector
bin/harvest start
@@ -45,21 +47,47 @@ bin/harvest start
If you don't have `wget` installed, you can use `curl` like so:
```
-curl -L -O https://github.com/NetApp/harvest/releases/download/v21.05.1/harvest-21.05.1-1.tar.gz
+curl -L -O https://github.com/NetApp/harvest/releases/latest/download/$RELEASE.tar.gz
+```
+
+### Upgrade
+Follow the steps below to upgrade Harvest
+
+Stop harvest
+```
+cd
+bin/harvest stop
+```
+Verify that all pollers have stopped:
+```
+bin/harvest status
+or
+pgrep --full '\-\-poller' # should return nothing if all pollers are stopped
+```
+
+Follow the instructions [above](#installation) to download and install Harvest and then
+copy your old `harvest.yml` into the new install directory like so:
+
+```
+cp /path/to/old/harvest/harvest.yml /path/to/new/harvest.yml
```
## Redhat
-> Installation of the Harvest package may require root or administrator privileges
-Download the latest rpm of [Harvest](https://github.com/NetApp/harvest/releases/latest) from the releases tab and install with yum.
+> Installation and upgrade of the Harvest package may require root or administrator privileges
+
+Download the latest rpm of [Harvest](https://github.com/NetApp/harvest/releases/latest) from the releases tab and install or upgrade with yum.
```
- $ sudo yum install harvest.XXX.rpm
+ $ sudo yum install|upgrade harvest.XXX.rpm
```
Once the installation has finished, edit the [harvest.yml configuration](#harvest-configuration) file located in `/opt/harvest/harvest.yml`
-After editing `/opt/harvest/harvest.yml`, manage Harvest with `systemctl start|stop|restart harvest`
+After editing `/opt/harvest/harvest.yml`, manage Harvest with `systemctl start|stop|restart harvest`.
+
+> To ensure that you don't run into [permission issues](https://github.com/NetApp/harvest/issues/122#issuecomment-856138831), make sure you manage Harvest using `systemctl` instead of running the harvest binary directly.
+
### Changes install makes
* Directories `/var/log/harvest/` and `/var/log/run/` are created
@@ -67,17 +95,22 @@ After editing `/opt/harvest/harvest.yml`, manage Harvest with `systemctl start|s
* Systemd `/etc/systemd/system/harvest.service` file is created and enabled
## Debian
-> Installation of the Harvest package may require root or administrator privileges
-Download the latest deb of [Harvest](https://github.com/NetApp/harvest/releases/latest) from the releases tab and install with apt.
+> Installation and upgrade of the Harvest package may require root or administrator privileges
+
+Download the latest deb of [Harvest](https://github.com/NetApp/harvest/releases/latest) from the releases tab and install or upgrade with apt.
```
- $ sudo apt install ./harvest-.amd64.deb
+ $ sudo apt update
+ $ sudo apt install|upgrade ./harvest-.amd64.deb
+
```
Once the installation has finished, edit the [harvest.yml configuration](#harvest-configuration) file located in `/opt/harvest/harvest.yml`
-After editing `/opt/harvest/harvest.yml`, manage Harvest with `systemctl start|stop|restart harvest`
+After editing `/opt/harvest/harvest.yml`, manage Harvest with `systemctl start|stop|restart harvest`.
+
+> To ensure that you don't run into [permission issues](https://github.com/NetApp/harvest/issues/122#issuecomment-856138831), make sure you manage Harvest using `systemctl` instead of running the harvest binary directly.
### Changes install makes
* Directories `/var/log/harvest/` and `/var/log/run/` are created
@@ -90,14 +123,14 @@ Work in progress. Coming soon
## Building from source
-To build Harvest from source code, first make sure you have a working Go environment with [version 1.15 or greater installed](https://golang.org/doc/install). You'll also need an Internet connection to install go dependencies. If you need to build from an air-gapped machine, use `go mod vendor` from an Internet connected machine first, and then copy the `vendor` directory to the air-gapped machine.
+To build Harvest from source code, first make sure you have a working Go environment with [version 1.15 or greater installed](https://golang.org/doc/install).
Clone the repo and build everything.
```
git clone https://github.com/NetApp/harvest.git
cd harvest
-make
+make build
bin/harvest version
```
diff --git a/SUPPORT.md b/SUPPORT.md
index 34142e81d..653ea7356 100644
--- a/SUPPORT.md
+++ b/SUPPORT.md
@@ -1,7 +1,6 @@
# Harvest Support and Getting Help
-Welcome to Harvest! Harvest is an official NetApp open-source project. You can reach out
-to NetApp using any of the [standard mechanisms](http://mysupport.netapp.com/info/web/ECMLP2619434.html) and get support.
+Harvest is an open-source project developed and published by NetApp to collect performance, capacity and hardware metrics from ONTAP clusters. These metrics can then be delivered to a range of databases and displayed in Grafana dashboards. Harvest is not an officially supported NetApp product. NetApp maintains and updates Harvest with bug fixes, security updates, and feature development. For assistance refer [Getting Help](#getting%20help)
This document describes Harvest's release and support lifecycle as well as places you can get help.
@@ -23,9 +22,11 @@ We use GitHub for tracking bugs and feature requests.
# Getting Help
-There is also a vibrant community of Harvest users on NetApp's community discussion board and the `#harvest` channel in [NetApp's Slack team](https://netapppub.slack.com/archives/C02072M1UCD). This is a great place to ask general questions about the project and discuss related topics with like-minded peers.
+There is a vibrant community of Harvest users on NetApp's community discussion board and the `#harvest` channel in [NetApp's Slack team](https://netapppub.slack.com/archives/C02072M1UCD). Slack is a great place to ask general questions about the project and discuss related topics with like-minded peers.
-Join the [thePub workspace](https://www.netapp.io/slack). After joining, click the `+` sign next to `Channels` and then click the `Browse Channels` button. Search for `harvest` from the Channel Browser and click `Join`.
+## Slack
+
+Join [thePub workspace](https://www.netapp.io/slack). After joining, click the `+` sign next to `Channels` and then click the `Browse Channels` button. Search for `harvest` from the Channel Browser and click `Join`.

@@ -43,8 +44,3 @@ The Harvest Community is active on the NetApp Community Discussion Board, you ca
* [Harvest Architecture](ARCHITECTURE.md)
* [Contributing](CONTRIBUTING.md)
* [Wiki](https://github.com/NetApp/harvest/wiki)
-
-## Real-time Chat
-
-* [Slack](https://netapppub.slack.com/archives/C02072M1UCD) ([registration](https://join.slack.com/t/netapppub/shared_invite/zt-njcjx2sh-1VR2mEDvPcJAmPutOnP~mg)):
-The `#harvest` channel is the best place to chat with like-minded Harvesters.
diff --git a/cmd/collectors/unix/README.md b/cmd/collectors/unix/README.md
index 4020e10f3..74298827e 100644
--- a/cmd/collectors/unix/README.md
+++ b/cmd/collectors/unix/README.md
@@ -1,13 +1,14 @@
-# Unix Collector
+# Unix
-## Overview
+This collector polls resource usage by Harvest pollers on the local system. Collector might be extended in the future to monitor any local or remote process.
-This collector polls resource usage by Harvest pollers on the local system.
+## Target System
+The machine where Harvest is running ("localhost").
-### Supported platforms
-Collector requires an OS where the `/proc/` filesystem is available. This includes most Unix/Unix-like systems:
+## Requirements
+Collector requires any OS where the proc-filesystem is available. If you are a developer, you are welcome to add support for other platforms. Currently, supported platforms includes most Unix/Unix-like systems:
* Android / Termux
* DragonFly BSD
@@ -18,19 +19,40 @@ Collector requires an OS where the `/proc/` filesystem is available. This includ
* Plan9
* Solaris
-(On FreeBSD and NetBSD the `/proc/`-filesystem needs to be manually mounted)
+(On FreeBSD and NetBSD the proc-filesystem needs to be manually mounted).
-### Collected metrics
+## Parameters
-* `start_time`
-* `cpu`
-* `cpu_percent`
-* `memory`
-* `memory_percent`
-* `io`
-* `net`
-* `ctx`
-* `threads`
-* `fds`
+| parameter | type | description | default |
+|------------------------|--------------|--------------------------------------------------|------------------------|
+| `mount_point` | string, optional | path to the `proc` filesystem | `/proc |
+
+## Metrics
+
+The Collector follows [the Linux proc(5) manual](https://man7.org/linux/man-pages/man5/procfs.5.html) to parse a static set of metrics. Unless otherwise stated, the metric has a scalar value:
+
+| metric | type | unit | description |
+|--------------------|----------------------------|---------------|----------------------------------------------------------|
+| `start_time` | counter, `float64` | seconds | process uptime |
+| `cpu_percent` | gauge, `float64` | percent | CPU used since last poll |
+| `memory_percent` | gauge, `float64` | percent | Memory used (RSS) since last poll |
+| `cpu` | histogram, `float64` | seconds | CPU used since last poll (`system`, `user`, `iowait`) |
+| `memory` | histogram, `uint64` | kB | Memory used since last poll (`rss`, `vms`, `swap`, etc) |
+| `io` | histogram, `uint64` |
byte
count | IOs performed by process:
`rchar`, `wchar`, `read_bytes`, `write_bytes` - read/write IOs
`syscr`, `syscw` - syscalls for IO operations |
+| `net` | histogram, `uint64` | count/byte | Different IO operations over network devices |
+| `ctx` | histogram, `uint64` | count | Number of context switched (`voluntary`, `involuntary`) |
+| `threads` | counter, `uint64` | count | Number of threads |
+| `fds` | counter, `uint64` | count | Number of file descriptors |
-Note that `cpu`, `memory`, `io`, `net`, `ctx` are histograms.
\ No newline at end of file
+
+Additionally, the collector provides the following instance labels:
+
+| label | description |
+|-------------------|----------------------------------------------------------|
+| poller | name of the poller |
+| pid | PID of the poller |
+
+
+## Issues
+
+* Collector will fail on WSL because some, non-critical files in the proc-filesystem are not present. This can be fixed by making the collector (the `Reload` method in [process.go](process.go) specifically) more tolerant.
\ No newline at end of file
diff --git a/cmd/collectors/unix/errors.go b/cmd/collectors/unix/errors.go
index 46b9fb20e..2e296b983 100644
--- a/cmd/collectors/unix/errors.go
+++ b/cmd/collectors/unix/errors.go
@@ -2,7 +2,7 @@
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package unix
const (
PROCESS_NOT_FOUND = "process not found"
diff --git a/cmd/collectors/unix/main.go b/cmd/collectors/unix/main.go
index 7bb32175a..5c78d3dbd 100644
--- a/cmd/collectors/unix/main.go
+++ b/cmd/collectors/unix/main.go
@@ -1,20 +1,20 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package unix
import (
"goharvest2/cmd/poller/collector"
+ "goharvest2/cmd/poller/plugin"
"goharvest2/pkg/conf"
"goharvest2/pkg/errors"
"goharvest2/pkg/logging"
"goharvest2/pkg/matrix"
"goharvest2/pkg/set"
"goharvest2/pkg/tree/node"
- "io/ioutil"
+ "goharvest2/pkg/util"
"os"
"os/exec"
- "path"
"runtime"
"strconv"
"strings"
@@ -25,7 +25,7 @@ import (
// https://en.wikipedia.org/wiki/Procfs
var _SUPPORTED_PLATFORMS = []string{
"aix",
- "andriod", // available in termux
+ "android", // available in termux
"dragonfly",
"freebsd", // available, but not mounted by default
"linux",
@@ -70,6 +70,17 @@ var _DTYPES = map[string]string{
"fds": "uint64",
}
+func init() {
+ plugin.RegisterModule(Unix{})
+}
+
+func (Unix) HarvestModule() plugin.ModuleInfo {
+ return plugin.ModuleInfo{
+ ID: "harvest.collector.unix",
+ New: func() plugin.Module { return new(Unix) },
+ }
+}
+
func getClockTicks() {
_CLK_TCK = 100.0
if data, err := exec.Command("getconf", "CLK_TCK").Output(); err == nil {
@@ -127,14 +138,9 @@ type Unix struct {
processes map[string]*Process
}
-// New - create new, uninitialized collector
-func New(a *collector.AbstractCollector) collector.Collector {
- return &Unix{AbstractCollector: a}
-}
-
// Init - initialize the collector
-func (me *Unix) Init() error {
-
+func (me *Unix) Init(a *collector.AbstractCollector) error {
+ me.AbstractCollector = a
var err error
if !set.NewFrom(_SUPPORTED_PLATFORMS).Has(runtime.GOOS) {
@@ -150,7 +156,7 @@ func (me *Unix) Init() error {
_MOUNT_POINT = mp
}
- // assert fs is avilable
+ // assert fs is available
if fi, err := os.Stat(_MOUNT_POINT); err != nil || !fi.IsDir() {
return errors.New(errors.ERR_IMPLEMENT, "filesystem ["+_MOUNT_POINT+"] not available")
}
@@ -281,13 +287,10 @@ func (me *Unix) PollInstance() (*matrix.Matrix, error) {
}
for _, name := range pollerNames {
- pidf := path.Join(me.Options.PidPath, name+".pid")
-
pid := ""
-
- if x, err := ioutil.ReadFile(pidf); err == nil {
- //logger.Debug(me.Prefix, "skip instance (%s), err pidf: %v", name, err)
- pid = string(x)
+ pids, err := util.GetPid(name)
+ if err == nil && len(pids) == 1 {
+ pid = strconv.Itoa(pids[0])
}
if instance := me.Matrix.GetInstance(name); instance == nil {
@@ -303,12 +306,19 @@ func (me *Unix) PollInstance() (*matrix.Matrix, error) {
me.Logger.Debug().Msgf("update instance (%s) with PID (%s)", name, pid)
}
}
-
+ rewriteIndexes := currInstances.Size() > 0
for name := range currInstances.Iter() {
me.Matrix.RemoveInstance(name)
me.Logger.Debug().Msgf("remove instance (%s)", name)
}
-
+ // If there were removals, the indexes need to be rewritten since gaps were created
+ if rewriteIndexes {
+ newMatrix := me.Matrix.Clone(false, true, false)
+ for key := range me.Matrix.GetInstances() {
+ _, _ = newMatrix.NewInstance(key)
+ }
+ me.Matrix = newMatrix
+ }
t := len(me.Matrix.GetInstances())
r := currInstances.Size()
a := t - (currSize - r)
@@ -416,14 +426,14 @@ func setStartTime(m matrix.Metric, i *matrix.Instance, p *Process, s *System) {
}
}
-func setNumThreads(m matrix.Metric, i *matrix.Instance, p *Process, s *System) {
+func setNumThreads(m matrix.Metric, i *matrix.Instance, p *Process, _ *System) {
err := m.SetValueUint64(i, p.numThreads)
if err != nil {
logging.Get().Error().Stack().Err(err).Msg("error")
}
}
-func setNumFds(m matrix.Metric, i *matrix.Instance, p *Process, s *System) {
+func setNumFds(m matrix.Metric, i *matrix.Instance, p *Process, _ *System) {
err := m.SetValueUint64(i, p.numFds)
if err != nil {
logging.Get().Error().Stack().Err(err).Msg("error")
@@ -437,7 +447,7 @@ func setMemoryPercent(m matrix.Metric, i *matrix.Instance, p *Process, s *System
}
}
-func setCpuPercent(m matrix.Metric, i *matrix.Instance, p *Process, s *System) {
+func setCpuPercent(m matrix.Metric, i *matrix.Instance, p *Process, _ *System) {
if p.elapsedTime != 0 {
err := m.SetValueFloat64(i, p.cpuTotal/p.elapsedTime*100)
if err != nil {
@@ -496,5 +506,7 @@ func setCtx(m matrix.Metric, l string, i *matrix.Instance, p *Process) {
}
}
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
+// Interface guards
+var (
+ _ collector.Collector = (*Unix)(nil)
+)
diff --git a/cmd/collectors/unix/process.go b/cmd/collectors/unix/process.go
index 2755edd5e..c4178b116 100644
--- a/cmd/collectors/unix/process.go
+++ b/cmd/collectors/unix/process.go
@@ -2,7 +2,7 @@
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package unix
import (
"bytes"
diff --git a/cmd/collectors/unix/system.go b/cmd/collectors/unix/system.go
index aa123eee4..b55459d12 100644
--- a/cmd/collectors/unix/system.go
+++ b/cmd/collectors/unix/system.go
@@ -2,7 +2,7 @@
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package unix
import (
"goharvest2/pkg/errors"
diff --git a/cmd/collectors/zapi/README.md b/cmd/collectors/zapi/README.md
index 35147eef2..4a4869f8d 100644
--- a/cmd/collectors/zapi/README.md
+++ b/cmd/collectors/zapi/README.md
@@ -1,11 +1,18 @@
-# Zapi Collector
+# Zapi
-Zapi collects data from ONTAP systems using the ZAPI protocol. The collector submits data as it is and does not perform any calculations (therefore it is not able to collect `perf` objects). Since the attributes of most APIs have an irregular tree structure, sometimes a plugin will be required to collect metrics from an API.
+Zapi collects data from ONTAP systems using the ZAPI protocol. The collector submits data as received from the target system, and does not perform any calculations or post-processing. Since the attributes of most APIs have an irregular tree structure, sometimes a plugin will be required to collect all metrics from an API.
+Note that the [ZapiPerf collector](../zapiperf/README.md) is an extension of this collector, therefore many parameters and configuration settings will coincide.
-## Configuration
+## Target System
+Target system can be any cDot or 7Mode ONTAP system. Any version is supported, however the default configuration files may not completely match with an older system.
+
+## Requirements
+No SDK or any other requirement. It is recommended to create a read-only user for Harvest on the ONTAP system (see the [Authentication document](../../../docs/AuthAndPermissions.md))
+
+## Parameters
The parameters and configuration are similar to those of the [ZapiPerf collector](../zapiperf/README.md). Only the differences will be discussed below.
@@ -28,11 +35,22 @@ The Zapi collector does not have the parameters `instance_key` and `override` pa
#### `counters`
-This section contains the complete or partial attribute tree of the queried API. Since the collector does get counter metadata from the ONTAP system, two additional symbols are used for non-numeric attributes:
+This section contains the complete or partial attribute tree of the queried API. Since the collector does not get counter metadata from the ONTAP system, two additional symbols are used for non-numeric attributes:
- `^` used as a prefix indicates that the attribute should be stored as a label
- `^^` indicates that the attribute is a label and an instance key (i.e. a label that uniquely identifies an instance, such as `name`, `uuid`). If a single label does not uniquely identify an instance, then multiple instance keys should be indicated.
+Additionally, the symbol `=>` can be used to set a custom display name for both instance labels and numeric counters. Example:
+
+```yaml
+aggr-attributes:
+ - aggr-raid-attributes:
+ - ^aggregate-type => type
+ - disk-count => disks
+```
+
+will force to use `aggr_type` and `aggr_disks` for the label and the metric respectively.
+
#### Creating/editing object configurations
The Zapi tool can help to create or edit subtemplates. Examples:
@@ -48,3 +66,19 @@ $ harvest zapi --poller show data --api volume-get-iter
```
Replace `` with the name of a poller that can connect to an ONTAP system.
+
+## Metrics
+
+The collector collects a dynamic set of metrics. Since most ZAPIs have a tree structure, the collector converts that structure into a flat metric representation. No post-processing or calculation is performed on the collected data itself.
+
+As an example, the `aggr-get-iter` ZAPI provides the following partial attribute tree:
+
+```yaml
+aggr-attributes:
+ - aggr-raid-attributes:
+ - disk-count
+ - aggr-snapshot-attributes:
+ - files-total
+```
+
+The Zapi collector will convert this tree into two "flat" metrics: `aggr_raid_disk_count` and `aggr_snapshot_files_total`. (The algorithm to generate a name for the metrics will attempt to keep it as simple as possible, but sometimes it's useful to manually set a short display name (see [#counters](#counters)))
diff --git a/cmd/collectors/zapi/collector/zapi.go b/cmd/collectors/zapi/collector/zapi.go
index fa2ba052a..8f8b43bc6 100644
--- a/cmd/collectors/zapi/collector/zapi.go
+++ b/cmd/collectors/zapi/collector/zapi.go
@@ -4,6 +4,13 @@
package zapi
import (
+ "goharvest2/cmd/collectors/zapi/plugins/shelf"
+ "goharvest2/cmd/collectors/zapi/plugins/snapmirror"
+ "goharvest2/cmd/collectors/zapiperf/plugins/fcp"
+ "goharvest2/cmd/collectors/zapiperf/plugins/headroom"
+ "goharvest2/cmd/collectors/zapiperf/plugins/nic"
+ "goharvest2/cmd/collectors/zapiperf/plugins/volume"
+ "goharvest2/cmd/poller/plugin"
"strconv"
"strings"
"time"
@@ -33,16 +40,19 @@ type Zapi struct {
shortestPathPrefix []string
}
-func New(a *collector.AbstractCollector) collector.Collector {
- return &Zapi{AbstractCollector: a}
+func init() {
+ plugin.RegisterModule(Zapi{})
}
-func NewZapi(a *collector.AbstractCollector) *Zapi {
- return &Zapi{AbstractCollector: a}
+func (Zapi) HarvestModule() plugin.ModuleInfo {
+ return plugin.ModuleInfo{
+ ID: "harvest.collector.zapi",
+ New: func() plugin.Module { return new(Zapi) },
+ }
}
-func (me *Zapi) Init() error {
-
+func (me *Zapi) Init(a *collector.AbstractCollector) error {
+ me.AbstractCollector = a
if err := me.InitVars(); err != nil {
return err
}
@@ -103,6 +113,26 @@ func (me *Zapi) InitVars() error {
return nil
}
+func (me *Zapi) LoadPlugin(kind string, abc *plugin.AbstractPlugin) plugin.Plugin {
+ switch kind {
+ case "Snapmirror":
+ return snapmirror.New(abc)
+ case "Shelf":
+ return shelf.New(abc)
+ case "Nic":
+ return nic.New(abc)
+ case "Fcp":
+ return fcp.New(abc)
+ case "Headroom":
+ return headroom.New(abc)
+ case "Volume":
+ return volume.New(abc)
+ default:
+ me.Logger.Info().Msgf("no zapi plugin found for %s", kind)
+ }
+ return nil
+}
+
func (me *Zapi) InitCache() error {
if me.Client.IsClustered() {
@@ -375,3 +405,8 @@ func (me *Zapi) PollData() (*matrix.Matrix, error) {
return me.Matrix, nil
}
+
+// Interface guards
+var (
+ _ collector.Collector = (*Zapi)(nil)
+)
diff --git a/cmd/collectors/zapi/main.go b/cmd/collectors/zapi/main.go
deleted file mode 100644
index 5f9b38342..000000000
--- a/cmd/collectors/zapi/main.go
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright NetApp Inc, 2021 All rights reserved
- */
-package main
-
-import (
- zapi "goharvest2/cmd/collectors/zapi/collector"
- "goharvest2/cmd/poller/collector"
-)
-
-func New(a *collector.AbstractCollector) collector.Collector {
- return zapi.New(a)
-}
-
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
diff --git a/cmd/collectors/zapi/plugins/shelf/shelf.go b/cmd/collectors/zapi/plugins/shelf/shelf.go
index 32688a2fd..114776710 100644
--- a/cmd/collectors/zapi/plugins/shelf/shelf.go
+++ b/cmd/collectors/zapi/plugins/shelf/shelf.go
@@ -1,7 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package shelf
import (
"goharvest2/cmd/poller/collector"
@@ -150,7 +150,13 @@ func (my *Shelf) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
my.Logger.Debug().Msgf("fetching %d shelf counters", len(shelves))
- output := make([]*matrix.Matrix, 0)
+ var output []*matrix.Matrix
+
+ // Purge and reset data
+ for _, data1 := range my.data {
+ data1.PurgeInstances()
+ data1.Reset()
+ }
for _, shelf := range shelves {
@@ -163,10 +169,7 @@ func (my *Shelf) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
shelfId = uid
}
- for attribute, data := range my.data {
-
- data.PurgeInstances()
-
+ for attribute, data1 := range my.data {
if my.instanceKeys[attribute] == "" {
my.Logger.Warn().Msgf("no instance keys defined for object [%s], skipping", attribute)
continue
@@ -183,8 +186,7 @@ func (my *Shelf) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
for _, obj := range objectElem.GetChildren() {
if key := obj.GetChildContentS(my.instanceKeys[attribute]); key != "" {
-
- instance, err := data.NewInstance(shelfId + "." + key)
+ instance, err := data1.NewInstance(shelfId + "." + key)
if err != nil {
my.Logger.Debug().Msgf("add (%s) instance: %v", attribute, err)
@@ -207,7 +209,7 @@ func (my *Shelf) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
}
}
- output = append(output, data)
+ output = append(output, data1)
}
}
@@ -220,9 +222,7 @@ func (my *Shelf) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
shelfId = shelf.GetChildContentS("shelf-id")
}
- for attribute, data := range my.data {
-
- data.Reset()
+ for attribute, data1 := range my.data {
objectElem := shelf.GetChildS(attribute)
if objectElem == nil {
@@ -237,14 +237,14 @@ func (my *Shelf) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
continue
}
- instance := data.GetInstance(shelfId + "." + key)
+ instance := data1.GetInstance(shelfId + "." + key)
if instance == nil {
my.Logger.Debug().Msgf("(%s) instance [%s.%s] not found in cache skipping", attribute, shelfId, key)
continue
}
- for metricKey, m := range data.GetMetrics() {
+ for metricKey, m := range data1.GetMetrics() {
if value := strings.Split(obj.GetChildContentS(metricKey), " ")[0]; value != "" {
if err := m.SetValueString(instance, value); err != nil {
@@ -260,6 +260,3 @@ func (my *Shelf) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
return output, nil
}
-
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
diff --git a/cmd/collectors/zapi/plugins/snapmirror/snapmirror.go b/cmd/collectors/zapi/plugins/snapmirror/snapmirror.go
index f10d31794..7141fde6f 100644
--- a/cmd/collectors/zapi/plugins/snapmirror/snapmirror.go
+++ b/cmd/collectors/zapi/plugins/snapmirror/snapmirror.go
@@ -1,7 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package snapmirror
import (
"goharvest2/cmd/poller/plugin"
@@ -230,8 +230,4 @@ func (my *SnapMirror) updateLimitCache() error {
}
my.Logger.Debug().Msgf("updated limit cache for %d nodes", count)
return nil
-
}
-
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
diff --git a/cmd/collectors/zapiperf/README.md b/cmd/collectors/zapiperf/README.md
index fc5bd725b..6d0ecad29 100644
--- a/cmd/collectors/zapiperf/README.md
+++ b/cmd/collectors/zapiperf/README.md
@@ -1,10 +1,17 @@
-# ZapiPerf Collector
+# ZapiPerf
ZapiPerf collects performance metrics from ONTAP systems using the ZAPI protocol. The collector is designed to be easily extendible to collect new objects or to collect additional counters from already configured objects. (The [default configuration](../../../conf/zapiperf/default.yaml) file contains 25 objects)
-Note: The main difference between this collector and the Zapi collector is that ZapiPerf collects only the `perf` subfamily of the ZAPIs. Additionally, ZapiPerf always calculates final values from deltas of two subsequent polls (therefore data is emitted only after the second poll).
+This collector is an extension of the [Zapi collector](../zapi/README.md) with the major difference between that ZapiPerf collects only the `perf` subfamily of the ZAPIs. Additionally, ZapiPerf always calculates final values from deltas of two subsequent polls.
+
+## Target System
+Target system can be any cDot or 7Mode ONTAP system. Any version is supported, however the default configuration files may not completely match with an older system.
+
+## Requirements
+No SDK or any other requirement. It is recommended to create a read-only user for Harvest on the ONTAP system (see the [Authentication document](../../../docs/AuthAndPermissions.md))
+
+## Parameters
-## Configuration
The parameters of the collector are distributed across three files:
- Harvest configuration file (default: `harvest.yml`)
@@ -13,7 +20,7 @@ The parameters of the collector are distributed across three files:
With the exception of `addr`, `datacenter` and `auth_style`, all other parameters of the ZapiPerf collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level file. This allows the user to configure each objects individually, or use same parameters for all objects.
-For the sake of brevity, these parameters are described only in the section [ZapiPerf configuration file](#zapiperf-configuration-file).
+For the sake of brevity, these parameters are described only in the section [Collector configuration file](#collector-configuration-file).
### Harvest configuration file
@@ -139,4 +146,17 @@ $ harvest zapi --poller show counters --object volume
# will print raw data of all counters in the volume objects
```
-Replace `` with the name of one of your ONTAP pollers.
\ No newline at end of file
+Replace `` with the name of one of your ONTAP pollers.
+
+
+## Metrics
+
+The collector collects a dynamic set of metrics. The metric values are calculated from two consecutive polls (therefore no metrics are emitted after the first poll). The calculation algorithm depends on the `property` and `base-counter` attributes of each metric, the following properties are supported:
+
+| property | formula | description |
+|-----------|--------------------------------------------|-----------------------------------------------------------|
+| raw | x = xi | no post-processing, value **x** is submitted as it is |
+| delta | x = xi - xi-1 | delta of two poll values, **xi** and **xi-1** |
+| rate | x = (xi - xi-1) / (ti - ti-1) | delta divided by the interval of the two polls in seconds |
+| average | x = (xi - xi-1) / (yi - yi-1) | delta divided by the delta of the base counter **y** |
+| percent | x = 100 * (xi - xi-1) / (yi - yi-1) | average multiplied by 100 |
diff --git a/cmd/collectors/zapiperf/plugins/fcp/fcp.go b/cmd/collectors/zapiperf/plugins/fcp/fcp.go
index 4a81e133d..442cafb76 100644
--- a/cmd/collectors/zapiperf/plugins/fcp/fcp.go
+++ b/cmd/collectors/zapiperf/plugins/fcp/fcp.go
@@ -1,7 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package fcp
import (
"goharvest2/cmd/poller/plugin"
@@ -100,6 +100,3 @@ func (me *Fcp) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
}
return nil, nil
}
-
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
diff --git a/cmd/collectors/zapiperf/plugins/headroom/headroom.go b/cmd/collectors/zapiperf/plugins/headroom/headroom.go
index 32ab5a2c7..370c4679a 100644
--- a/cmd/collectors/zapiperf/plugins/headroom/headroom.go
+++ b/cmd/collectors/zapiperf/plugins/headroom/headroom.go
@@ -1,7 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package headroom
import (
"goharvest2/cmd/poller/plugin"
@@ -40,6 +40,3 @@ func (me *Headroom) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
return nil, nil
}
-
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
diff --git a/cmd/collectors/zapiperf/plugins/nic/nic.go b/cmd/collectors/zapiperf/plugins/nic/nic.go
index 60abc8215..25b7e5db9 100644
--- a/cmd/collectors/zapiperf/plugins/nic/nic.go
+++ b/cmd/collectors/zapiperf/plugins/nic/nic.go
@@ -11,7 +11,7 @@ Package Description:
- "nic_state": 0 if port is up, 1 otherwise
*/
-package main
+package nic
import (
"goharvest2/cmd/poller/plugin"
@@ -124,6 +124,3 @@ func (me *Nic) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
return nil, nil
}
-
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
diff --git a/cmd/collectors/zapiperf/plugins/volume/volume.go b/cmd/collectors/zapiperf/plugins/volume/volume.go
index 53fc8c2dc..54c0dd16c 100644
--- a/cmd/collectors/zapiperf/plugins/volume/volume.go
+++ b/cmd/collectors/zapiperf/plugins/volume/volume.go
@@ -1,7 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package volume
import (
"goharvest2/cmd/poller/plugin"
@@ -150,6 +150,3 @@ func (me *Volume) Run(data *matrix.Matrix) ([]*matrix.Matrix, error) {
return []*matrix.Matrix{cache}, nil
}
-
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
diff --git a/cmd/collectors/zapiperf/zapiperf.go b/cmd/collectors/zapiperf/zapiperf.go
index d1aeccf54..9132396bd 100644
--- a/cmd/collectors/zapiperf/zapiperf.go
+++ b/cmd/collectors/zapiperf/zapiperf.go
@@ -1,10 +1,31 @@
/*
- * Copyright NetApp Inc, 2021 All rights reserved
- */
-package main
+ Copyright NetApp Inc, 2021 All rights reserved
+
+ ZapiPerf collects and processes metrics from the "perf" APIs of the
+ ZAPI protocol. This collector inherits some methods and fields of
+ the Zapi collector (as they use the same protocol). However,
+ ZapiPerf calculates final metric values from the deltas of two
+ consecutive polls.
+
+ The exact formula of doing these calculations, depends on the property
+ of each counter and some counters require a "base-counter" additionally.
+
+ Counter properties and other metadata are fetched from the target system
+ during PollCounter() making the collector ONTAP-version independent.
+
+ The collector maintains a cache of instances, updated periodically as well,
+ during PollInstance().
+
+ The source code prioritizes performance over simplicity/readability.
+ Additionally, some objects (e.g. workloads) come with twists that
+ force the collector to do acrobatics. Don't expect to easily
+ comprehend what comes below.
+*/
+package zapiperf
import (
"goharvest2/cmd/poller/collector"
+ "goharvest2/cmd/poller/plugin"
"goharvest2/pkg/color"
"goharvest2/pkg/dict"
"goharvest2/pkg/errors"
@@ -18,38 +39,52 @@ import (
zapi "goharvest2/cmd/collectors/zapi/collector"
)
-// default parameter values
const (
+ // default parameter values
instanceKey = "uuid"
batchSize = 500
latencyIoReqd = 10
+ // objects that need special handling
+ objWorkload = "workload"
+ objWorkloadDetail = "workload_detail"
+ objWorkloadVolume = "workload_volume"
+ objWorkloadDetailVolume = "workload_detail_volume"
)
const BILLION = 1000000000
type ZapiPerf struct {
- *zapi.Zapi // provides: AbstractCollector, Connection, Object, Query, TemplateFn, TemplateType
+ *zapi.Zapi // provides: AbstractCollector, Client, Object, Query, TemplateFn, TemplateType
object string
batchSize int
latencyIoReqd int
instanceKey string
instanceLabels map[string]string
histogramLabels map[string][]string
+ scalarCounters []string
+ qosLabels map[string]string
isCacheEmpty bool
}
-func New(a *collector.AbstractCollector) collector.Collector {
- //return &ZapiPerf{AbstractCollector: a}
- return &ZapiPerf{Zapi: zapi.NewZapi(a)}
+func init() {
+ plugin.RegisterModule(ZapiPerf{})
+}
+
+func (ZapiPerf) HarvestModule() plugin.ModuleInfo {
+ return plugin.ModuleInfo{
+ ID: "harvest.collector.zapiperf",
+ New: func() plugin.Module { return new(ZapiPerf) },
+ }
}
-func (me *ZapiPerf) Init() error {
+func (me *ZapiPerf) Init(a *collector.AbstractCollector) error {
+ me.Zapi = &zapi.Zapi{AbstractCollector: a}
if err := me.InitVars(); err != nil {
return err
}
// Invoke generic initializer
- // this will load Schedule, initialize Data and Metadata
+ // this will load Schedule, initialize data and metadata Matrices
if err := collector.Init(me); err != nil {
return err
}
@@ -84,6 +119,7 @@ func (me *ZapiPerf) InitCache() error {
return nil
}
+// load a string parameter or use defaultValue
func (me *ZapiPerf) loadParamStr(name, defaultValue string) string {
var x string
@@ -96,11 +132,14 @@ func (me *ZapiPerf) loadParamStr(name, defaultValue string) string {
return defaultValue
}
+// load an int parameter or use defaultValue
func (me *ZapiPerf) loadParamInt(name string, defaultValue int) int {
- var x string
- var n int
- var e error
+ var (
+ x string
+ n int
+ e error
+ )
if x = me.Params.GetChildContentS(name); x != "" {
if n, e = strconv.Atoi(x); e == nil {
@@ -114,9 +153,15 @@ func (me *ZapiPerf) loadParamInt(name string, defaultValue int) int {
return defaultValue
}
+// PollData updates the data cache of the collector. During first poll, no data will
+// be emitted. Afterwards, final metric values will be calculated from previous poll.
func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
- var err error
+ var (
+ instanceKeys []string
+ resourceLatency matrix.Metric // for workload* objects
+ err error
+ )
me.Logger.Debug().Msg("updating data cache")
@@ -143,7 +188,20 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
// list of instance keys (instance names or uuids) for which
// we will request counter data
- instanceKeys := newData.GetInstanceKeys()
+ if me.Query == objWorkloadDetail || me.Query == objWorkloadDetailVolume {
+ if resourceMap := me.Params.GetChildS("resource_map"); resourceMap == nil {
+ return nil, errors.New(errors.MISSING_PARAM, "resource_map")
+ } else {
+ instanceKeys = make([]string, 0)
+ for _, layer := range resourceMap.GetAllChildNamesS() {
+ for key := range me.Matrix.GetInstances() {
+ instanceKeys = append(instanceKeys, key+"."+layer)
+ }
+ }
+ }
+ } else {
+ instanceKeys = newData.GetInstanceKeys()
+ }
// build ZAPI request
request := node.NewXmlS("perf-object-get-instances")
@@ -152,11 +210,8 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
// load requested counters (metrics + labels)
requestCounters := request.NewChildS("counters", "")
// load scalar metrics
- for key, m := range newData.GetMetrics() {
- // no histograms
- if !m.HasLabels() {
- requestCounters.NewChildS("counter", key)
- }
+ for _, key := range me.scalarCounters {
+ requestCounters.NewChildS("counter", key)
}
// load histograms
for key := range me.histogramLabels {
@@ -191,15 +246,18 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
if err = me.Client.BuildRequest(request); err != nil {
me.Logger.Error().Stack().Err(err).Msg("build request: ")
- //break?
return nil, err
}
response, rd, pd, err := me.Client.InvokeWithTimers()
if err != nil {
- //me.Logger.Error(me.Prefix, "data request: %v", err)
- //@TODO handle "resource limit exceeded"
- //break
+ // if ONTAP complains about batch size, use a smaller batch size
+ if strings.Contains(err.Error(), "resource limit exceeded") && me.batchSize > 100 {
+ me.Logger.Error().Stack().Err(err)
+ me.Logger.Info().Msgf("changed batch_size from [%d] to [%d]", me.batchSize, me.batchSize-100)
+ me.batchSize -= 100
+ return nil, nil
+ }
return nil, err
}
@@ -224,6 +282,27 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
for _, i := range instances.GetChildren() {
key := i.GetChildContentS(me.instanceKey)
+
+ // special case for these two objects
+ // we need to process each latency layer for each instance/counter
+ if me.Query == objWorkloadDetail || me.Query == objWorkloadDetailVolume {
+
+ layer := "" // latency layer (resource) for workloads
+
+ if x := strings.Split(key, "."); len(x) == 2 {
+ key = x[0]
+ layer = x[1]
+ } else {
+ me.Logger.Warn().Msgf("instance name [%s] has unexpected format", key)
+ continue
+ }
+
+ if resourceLatency = newData.GetMetric(layer); resourceLatency == nil {
+ me.Logger.Warn().Msgf("resource-latency metric [%s] missing in cache", layer)
+ continue
+ }
+ }
+
if key == "" {
me.Logger.Debug().Msgf("skip instance, no key [%s] (name=%s, uuid=%s)", me.instanceKey, i.GetChildContentS("name"), i.GetChildContentS("uuid"))
continue
@@ -244,9 +323,8 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
me.Logger.Debug().Msgf("fetching data of instance [%s]", key)
// add batch timestamp as custom counter
- err := timestamp.SetValueFloat64(instance, ts)
- if err != nil {
- me.Logger.Error().Stack().Err(err).Msg("")
+ if err := timestamp.SetValueFloat64(instance, ts); err != nil {
+ me.Logger.Error().Stack().Err(err).Msg("set timestamp value: ")
}
for _, cnt := range counters.GetChildren() {
@@ -264,7 +342,7 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
me.Logger.Trace().Msgf("(%s%s%s) parsing counter (%s) = %v", color.Grey, key, color.End, name, value)
// ZAPI counter for us is either instance label (string)
- // or numeric metric (scalar or string)
+ // or numeric metric (scalar or histogram)
// store as instance label
if display, has := me.instanceLabels[name]; has {
@@ -299,6 +377,23 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
continue
}
+ // special case for workload_detail
+ if me.Query == objWorkloadDetail || me.Query == objWorkloadDetailVolume {
+ if name == "wait_time" || name == "service_time" {
+ if err := resourceLatency.AddValueString(instance, value); err != nil {
+ me.Logger.Error().Stack().Err(err).Msgf("add resource-latency (%s) value [%s]: %v", name, value, err)
+ } else {
+ me.Logger.Trace().Msgf("++ resource-latency (%s) = [%s%s%s]", name, color.Blue, value, color.End)
+ count++
+ }
+ continue
+ }
+ // "visits" are ignored
+ if name == "visits" {
+ continue
+ }
+ }
+
// store as scalar metric
if metric := newData.GetMetric(name); metric != nil {
if err = metric.SetValueString(instance, value); err != nil {
@@ -317,8 +412,17 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
} // end loop over instances
} // end batch request
- // terminate if serious errors
- // @TODO handle...
+ me.Logger.Debug().Msgf("collected %d data points in %d batch polls", count, batchCount)
+
+ if me.Query == objWorkloadDetail || me.Query == objWorkloadDetailVolume {
+ if rd, pd, err := me.getParentOpsCounters(newData, keyName); err == nil {
+ apiT += rd
+ parseT += pd
+ } else {
+ // no point to continue as we can't calculate the other counters
+ return nil, err
+ }
+ }
// update metadata
me.Metadata.LazySetValueInt64("api_time", "data", apiT.Microseconds())
@@ -326,8 +430,6 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
me.Metadata.LazySetValueUint64("count", "data", count)
me.AddCollectCount(count)
- me.Logger.Debug().Msgf("collected %d data points in %d batch polls", count, batchCount)
-
// skip calculating from delta if no data from previous poll
if me.isCacheEmpty {
me.Logger.Debug().Msg("skip postprocessing until next poll (previous cache empty)")
@@ -339,9 +441,8 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
calcStart := time.Now()
me.Logger.Debug().Msg("starting delta calculations from previous cache")
- //me.Logger.Debug(me.Prefix, "data has dimensions (%d x %d)", len(newData.Data), len(newData.Data[0]))
- // cache data, to store after calculations
+ // cache raw data for next poll
cachedData := newData.Clone(true, true, true) // @TODO implement copy data
// order metrics, such that those requiring base counters are processed last
@@ -363,18 +464,11 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
// calculate timestamp delta first since many counters require it for postprocessing
// timestamp has "raw" property, so won't be postprocessed automatically
- // fmt.Printf("\npostprocessing %s%s%s - %s%v%s\n", color.Red, timestamp.Name, color.End, color.Bold, timestamp.Properties, color.End)
- //me.Logger.Debug(me.Prefix, "cooking [%s] (%s)", timestamp.Name, timestamp.Properties)
- //print_vector("current", newData.Data[timestamp.Index])
- //print_vector("previous", me.Data.Data[timestamp.Index])
if err = timestamp.Delta(me.Matrix.GetMetric("timestamp")); err != nil {
me.Logger.Error().Stack().Err(err).Msg("(timestamp) calculate delta:")
// @TODO terminate since other counters will be incorrect
}
- //newData.Delta(me.Data, timestamp.Index)
- //print_vector(color.Green+"delta"+color.End, newData.Data[timestamp.Index])
-
var base matrix.Metric
for i, metric := range orderedMetrics {
@@ -382,25 +476,28 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
property := metric.GetProperty()
key := orderedKeys[i]
- // raw counters don't require postprocessing
- if property == "raw" || property == "" {
+ // RAW - submit without post-processing
+ if property == "raw" {
continue
}
- // for all the other properties we start with delta
+ // all other properties - first calculate delta
if err = metric.Delta(me.Matrix.GetMetric(key)); err != nil {
me.Logger.Error().Stack().Err(err).Msgf("(%s) calculate delta:", key)
continue
}
+ // DELTA - subtract previous value from current
if property == "delta" {
// already done
continue
}
- // rate is delta, normalized by elapsed time
- // we skip calculating rates to fist calculate averages and percentages
+ // RATE - delta, normalized by elapsed time
if property == "rate" {
+ // defer calculation, so we can first calculate averages/percents
+ // Note: calculating rate before averages are averages/percentages are calculated
+ // used to be a bug in Harvest 2.0 (Alpha, RC1, RC2) resulting in very high latency values
continue
}
@@ -412,11 +509,15 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
continue
}
- // average and percentage are calculated by dividing by the value of the base counter
+ // remaining properties: average and percent
+ //
+ // AVERAGE - delta, divided by base-counter delta
+ //
+ // PERCENT - average * 100
// special case for latency counter: apply minimum number of iops as threshold
if property == "average" || property == "percent" {
- if strings.HasSuffix(metric.GetName(), "_latency") {
+ if strings.HasSuffix(metric.GetName(), "latency") {
err = metric.DivideWithThreshold(base, me.latencyIoReqd)
} else {
err = metric.Divide(base)
@@ -451,13 +552,139 @@ func (me *ZapiPerf) PollData() (*matrix.Matrix, error) {
}
me.Metadata.LazySetValueInt64("calc_time", "data", time.Since(calcStart).Microseconds())
+
// store cache for next poll
me.Matrix = cachedData
- //me.Data.IsEmpty = false // @redundant
return newData, nil
}
+// Poll counter "ops" of the related/parent object, required for objects
+// workload_detail and workload_detail_volume. This counter is already
+// collected by the other ZapiPerf collectors, so this poll is redundant
+// (until we implement some sort of inter-collector communication).
+func (me *ZapiPerf) getParentOpsCounters(data *matrix.Matrix, KeyAttr string) (time.Duration, time.Duration, error) {
+
+ var (
+ ops matrix.Metric
+ object string
+ instanceKeys []string
+ apiT, parseT time.Duration
+ err error
+ )
+
+ if me.Query == objWorkloadDetail {
+ object = objWorkload
+ } else {
+ object = objWorkloadVolume
+ }
+
+ me.Logger.Debug().Msgf("(%s) starting redundancy poll for ops from parent object (%s)", me.Query, object)
+
+ apiT = 0 * time.Second
+ parseT = 0 * time.Second
+
+ if ops = data.GetMetric("ops"); ops == nil {
+ me.Logger.Error().Stack().Err(nil).Msgf("ops counter not found in cache")
+ return apiT, parseT, errors.New(errors.MISSING_PARAM, "counter ops")
+ }
+
+ instanceKeys = data.GetInstanceKeys()
+
+ // build ZAPI request
+ request := node.NewXmlS("perf-object-get-instances")
+ request.NewChildS("objectname", object)
+
+ requestCounters := request.NewChildS("counters", "")
+ requestCounters.NewChildS("counter", "ops")
+
+ // batch indices
+ startIndex := 0
+ endIndex := 0
+
+ count := 0
+
+ for endIndex < len(instanceKeys) {
+
+ // update batch indices
+ endIndex += me.batchSize
+ if endIndex > len(instanceKeys) {
+ endIndex = len(instanceKeys)
+ }
+
+ me.Logger.Debug().Msgf("starting batch poll for instances [%d:%d]", startIndex, endIndex)
+
+ request.PopChildS(KeyAttr + "s")
+ requestInstances := request.NewChildS(KeyAttr+"s", "")
+ for _, key := range instanceKeys[startIndex:endIndex] {
+ requestInstances.NewChildS(KeyAttr, key)
+ }
+
+ startIndex = endIndex
+
+ if err = me.Client.BuildRequest(request); err != nil {
+ return apiT, parseT, err
+ }
+
+ response, rt, pt, err := me.Client.InvokeWithTimers()
+ if err != nil {
+ return apiT, parseT, err
+ }
+
+ apiT += rt
+ parseT += pt
+
+ // fetch instances
+ instances := response.GetChildS("instances")
+ if instances == nil || len(instances.GetChildren()) == 0 {
+ return apiT, parseT, err
+ }
+
+ for _, i := range instances.GetChildren() {
+
+ key := i.GetChildContentS(me.instanceKey)
+
+ if key == "" {
+ me.Logger.Debug().Msgf("skip instance, no key [%s] (name=%s, uuid=%s)", me.instanceKey, i.GetChildContentS("name"), i.GetChildContentS("uuid"))
+ continue
+ }
+
+ instance := data.GetInstance(key)
+ if instance == nil {
+ me.Logger.Warn().Msgf("skip instance [%s], not found in cache", key)
+ continue
+ }
+
+ counters := i.GetChildS("counters")
+ if counters == nil {
+ me.Logger.Debug().Msgf("skip instance [%s], no data counters", key)
+ continue
+ }
+
+ for _, cnt := range counters.GetChildren() {
+
+ name := cnt.GetChildContentS("name")
+ value := cnt.GetChildContentS("value")
+
+ me.Logger.Trace().Msgf("(%s%s%s%s) parsing counter = %v", color.Grey, key, color.End, name, value)
+
+ if name == "ops" {
+ if err = ops.SetValueString(instance, value); err != nil {
+ me.Logger.Error().Stack().Err(err).Msgf("set metric (%s) value [%s]", name, value)
+ } else {
+ me.Logger.Trace().Msgf("+ metric (%s) = [%s%s%s]", name, color.Cyan, value, color.End)
+ count++
+ }
+ } else {
+ me.Logger.Error().Stack().Err(nil).Msgf("unrequested metric (%s)", name)
+ }
+ }
+ }
+ }
+ me.Logger.Debug().Msgf("(%s) completed redundant ops poll (%s): collected %d", me.Query, object, count)
+ return apiT, parseT, nil
+}
+
func (me *ZapiPerf) PollCounter() (*matrix.Matrix, error) {
var (
@@ -469,6 +696,7 @@ func (me *ZapiPerf) PollCounter() (*matrix.Matrix, error) {
counters map[string]*node.Node
)
+ me.scalarCounters = make([]string, 0)
counters = make(map[string]*node.Node)
oldMetrics = set.New() // current set of metrics, so we can remove from matrix if not updated
oldLabels = set.New() // current set of labels
@@ -535,6 +763,7 @@ func (me *ZapiPerf) PollCounter() (*matrix.Matrix, error) {
// override counter properties from template
if p := me.GetOverride(key); p != "" {
+ me.Logger.Debug().Msgf("%soverride counter [%s] property [%s] => [%s]%s", color.Red, key, counter.GetChildContentS("properties"), p, color.End)
counter.SetChildContentS("properties", p)
}
@@ -579,7 +808,7 @@ func (me *ZapiPerf) PollCounter() (*matrix.Matrix, error) {
for name, counter := range counters {
if replaced.Has(name) {
oldMetrics.Delete(name)
- me.Logger.Debug().Msgf("adding [%s] (replacment for deprecated counter)", name)
+ me.Logger.Debug().Msgf("adding [%s] (replacement for deprecated counter)", name)
if r := me.addCounter(counter, name, name, true, counters); r != "" && !wanted.Has(r) {
missing.Add(r) // required base counter, missing in template
me.Logger.Debug().Msgf("%smarking [%s] as required base counter for [%s]%s", color.Red, r, name, color.End)
@@ -592,7 +821,6 @@ func (me *ZapiPerf) PollCounter() (*matrix.Matrix, error) {
if missing.Size() > 0 {
me.Logger.Debug().Msgf("attempting to retrieve metadata of %d missing base counters", missing.Size())
for name, counter := range counters {
- //me.Logger.Debug(me.Prefix, "%shas??? [%s]%s", color.Grey, name, color.End)
if missing.Has(name) {
oldMetrics.Delete(name)
me.Logger.Debug().Msgf("adding [%s] (missing base counter)", name)
@@ -614,6 +842,88 @@ func (me *ZapiPerf) PollCounter() (*matrix.Matrix, error) {
m.SetExportable(false)
}
+ // hack for workload objects, @TODO replace with a plugin
+ if me.Query == objWorkload || me.Query == objWorkloadDetail || me.Query == objWorkloadVolume || me.Query == objWorkloadDetailVolume {
+
+ // for these two objects, we need to create latency/ops counters for each of the workload layers
+ // there original counters will be discarded
+ if me.Query == objWorkloadDetail || me.Query == objWorkloadDetailVolume {
+
+ var service, wait, visits, ops matrix.Metric
+ oldMetrics.Delete("service_time")
+ oldMetrics.Delete("wait_time")
+ oldMetrics.Delete("visits")
+ oldMetrics.Delete("ops")
+
+ if service = me.Matrix.GetMetric("service_time"); service == nil {
+ me.Logger.Error().Stack().Err(nil).Msg("metric [service_time] required to calculate workload missing")
+ }
+
+ if wait = me.Matrix.GetMetric("wait_time"); wait == nil {
+ me.Logger.Error().Stack().Err(nil).Msg("metric [wait-time] required to calculate workload missing")
+ }
+
+ if visits = me.Matrix.GetMetric("visits"); visits == nil {
+ me.Logger.Error().Stack().Err(nil).Msg("metric [visits] required to calculate workload missing")
+ }
+
+ if service == nil || wait == nil || visits == nil {
+ return nil, errors.New(errors.MISSING_PARAM, "workload metrics")
+ }
+
+ if ops = me.Matrix.GetMetric("ops"); ops == nil {
+ if ops, err = me.Matrix.NewMetricFloat64("ops"); err != nil {
+ return nil, err
+ }
+ ops.SetProperty(visits.GetProperty())
+ me.Logger.Debug().Msgf("+ [resource_ops] [%s] added workload ops metric with property (%s)", ops.GetName(), ops.GetProperty())
+ }
+
+ service.SetExportable(false)
+ wait.SetExportable(false)
+ visits.SetExportable(false)
+
+ if resourceMap := me.Params.GetChildS("resource_map"); resourceMap == nil {
+ return nil, errors.New(errors.MISSING_PARAM, "resource_map")
+ } else {
+ for _, x := range resourceMap.GetChildren() {
+ name := x.GetNameS()
+ resource := x.GetContentS()
+
+ if m, err := me.Matrix.NewMetricFloat64(name); err != nil {
+ return nil, err
+ } else {
+ m.SetName("resource_latency")
+ m.SetLabel("resource", resource)
+ m.SetProperty(service.GetProperty())
+ // base counter is the ops of the same resource
+ m.SetComment("ops")
+
+ oldMetrics.Delete(name)
+ me.Logger.Debug().Msgf("+ [%s] (=> %s) added workload latency metric", name, resource)
+ }
+ }
+ }
+ }
+
+ if qosLabels := me.Params.GetChildS("qos_labels"); qosLabels == nil {
+ return nil, errors.New(errors.MISSING_PARAM, "qos_labels")
+ } else {
+ me.qosLabels = make(map[string]string)
+ for _, label := range qosLabels.GetAllChildContentS() {
+
+ display := strings.ReplaceAll(label, "-", "_")
+ if x := strings.Split(label, "=>"); len(x) == 2 {
+ label = strings.TrimSpace(x[0])
+ display = strings.TrimSpace(x[1])
+ }
+ me.qosLabels[label] = display
+ //me.instanceLabels[label] = display
+ //oldLabels.Delete(label)
+ }
+ }
+ }
+
for key := range oldMetrics.Iter() {
// temporary fix: prevent removing array counters
// @TODO
@@ -624,7 +934,6 @@ func (me *ZapiPerf) PollCounter() (*matrix.Matrix, error) {
}
for key := range oldLabels.Iter() {
- //me.Data.RemoveLabel(key)
delete(me.instanceLabels, key)
me.Logger.Debug().Msgf("removed label [%s]", key)
}
@@ -642,6 +951,7 @@ func (me *ZapiPerf) PollCounter() (*matrix.Matrix, error) {
return nil, nil
}
+// create new or update existing metric based on Zapi counter metadata
func (me *ZapiPerf) addCounter(counter *node.Node, name, display string, enabled bool, cache map[string]*node.Node) string {
var (
@@ -749,6 +1059,7 @@ func (me *ZapiPerf) addCounter(counter *node.Node, name, display string, enabled
return ""
}
+ me.scalarCounters = append(me.scalarCounters, name)
m.SetName(display)
m.SetProperty(property)
m.SetComment(baseCounter)
@@ -758,6 +1069,7 @@ func (me *ZapiPerf) addCounter(counter *node.Node, name, display string, enabled
return baseCounter
}
+// override counter property
func (me *ZapiPerf) GetOverride(counter string) string {
if o := me.Params.GetChildS("override"); o != nil {
return o.GetChildContentS(counter)
@@ -765,6 +1077,8 @@ func (me *ZapiPerf) GetOverride(counter string) string {
return ""
}
+// parse ZAPI array counter (histogram), so we can store it
+// as multiple flat metrics
func parseHistogramLabels(elem *node.Node) ([]string, string) {
var (
labels []string
@@ -790,14 +1104,15 @@ func parseHistogramLabels(elem *node.Node) ([]string, string) {
return labels, msg
}
+// Update instance cache
func (me *ZapiPerf) PollInstance() (*matrix.Matrix, error) {
var (
- err error
- request, results *node.Node
- oldInstances *set.Set
- oldSize, newSize, removed, added int
- instancesAttr string
+ err error
+ request, results *node.Node
+ oldInstances *set.Set
+ oldSize, newSize, removed, added int
+ keyAttr, instancesAttr, nameAttr, uuidAttr string
)
oldInstances = set.New()
@@ -808,15 +1123,41 @@ func (me *ZapiPerf) PollInstance() (*matrix.Matrix, error) {
me.Logger.Debug().Msgf("updating instance cache (old cache has: %d)", oldInstances.Size())
- if me.Client.IsClustered() {
+ nameAttr = "name"
+ uuidAttr = "uuid"
+ keyAttr = me.instanceKey
+
+ // hack for workload objects: get instances from Zapi
+ if me.Query == objWorkload || me.Query == objWorkloadDetail || me.Query == objWorkloadVolume || me.Query == objWorkloadDetailVolume {
+ request = node.NewXmlS("qos-workload-get-iter")
+ queryElem := request.NewChildS("query", "")
+ infoElem := queryElem.NewChildS("qos-workload-info", "")
+ if me.Query == objWorkloadVolume || me.Query == objWorkloadDetailVolume {
+ infoElem.NewChildS("workload-class", "autovolume")
+ } else {
+ infoElem.NewChildS("workload-class", "user-defined")
+ }
+
+ instancesAttr = "attributes-list"
+ nameAttr = "workload-name"
+ uuidAttr = "workload-uuid"
+ if me.instanceKey == "instance_name" || me.instanceKey == "name" {
+ keyAttr = "workload-name"
+ } else {
+ keyAttr = "workload-uuid"
+ }
+ // syntax for cdot/perf
+ } else if me.Client.IsClustered() {
request = node.NewXmlS("perf-object-instance-list-info-iter")
+ request.NewChildS("objectname", me.Query)
instancesAttr = "attributes-list"
+ // syntax for 7mode/perf
} else {
request = node.NewXmlS("perf-object-instance-list-info")
+ request.NewChildS("objectname", me.Query)
instancesAttr = "instances"
}
- request.NewChildS("objectname", me.Query)
if me.Client.IsClustered() {
request.NewChildS("max-records", strconv.Itoa(me.batchSize))
}
@@ -842,18 +1183,27 @@ func (me *ZapiPerf) PollInstance() (*matrix.Matrix, error) {
for _, i := range instances.GetChildren() {
- if key := i.GetChildContentS(me.instanceKey); key == "" {
+ if key := i.GetChildContentS(keyAttr); key == "" {
// instance key missing
- n := i.GetChildContentS("name")
- u := i.GetChildContentS("uuid")
- me.Logger.Debug().Msgf("skip instance, missing key [%s] (name=%s, uuid=%s)", me.instanceKey, n, u)
+ name := i.GetChildContentS(nameAttr)
+ uuid := i.GetChildContentS(uuidAttr)
+ me.Logger.Debug().Msgf("skip instance, missing key [%s] (name=%s, uuid=%s)", me.instanceKey, name, uuid)
} else if oldInstances.Delete(key) {
// instance already in cache
+ me.Logger.Debug().Msgf("updated instance [%s%s%s%s]", color.Bold, color.Yellow, key, color.End)
continue
- } else if _, err = me.Matrix.NewInstance(key); err != nil {
- me.Logger.Warn().Msgf("add instance: %v", err)
+ } else if instance, err := me.Matrix.NewInstance(key); err != nil {
+ me.Logger.Error().Err(err).Msg("add instance")
} else {
me.Logger.Debug().Msgf("added new instance [%s]", key)
+ if me.Query == objWorkload || me.Query == objWorkloadDetail || me.Query == objWorkloadVolume || me.Query == objWorkloadDetailVolume {
+ for label, display := range me.qosLabels {
+ if value := i.GetChildContentS(label); value != "" {
+ instance.SetLabel(display, value)
+ }
+ }
+ me.Logger.Debug().Msgf("(%s) [%s] added QOS labels: %s", me.Query, key, instance.GetLabels().String())
+ }
}
}
}
@@ -876,5 +1226,7 @@ func (me *ZapiPerf) PollInstance() (*matrix.Matrix, error) {
return nil, err
}
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
+// Interface guards
+var (
+ _ collector.Collector = (*ZapiPerf)(nil)
+)
diff --git a/cmd/exporters/influxdb/README.md b/cmd/exporters/influxdb/README.md
index 5fe42c761..37312e5e3 100644
--- a/cmd/exporters/influxdb/README.md
+++ b/cmd/exporters/influxdb/README.md
@@ -9,11 +9,14 @@ The InfluxDB Exporter will format metrics into the InfluxDB's [line protocol](ht
## Parameters
-Overview of all parameters:
+Overview of all parameters is provided below. Only one of `url` and `addr` should be provided (at least one is required).
+If `url` is specified, you must add all arguments to the url. Harvest will do no additional processing and use exactly what you specify. (e.g. `url: https://influxdb.example.com:8086/write?db=netapp&u=user&p=pass&precision=2`.
+That means when using `url` the `org`, `bucket`, `port`, and `precision` fields will be ignored.
| parameter | type | description | default |
|------------------------|--------------|--------------------------------------------------|------------------------|
-| `addr` | string | address of the database | |
+| `url` | string | URL of the database, format: `SCHEME://HOST[:PORT]` | |
+| `addr` | string | address of the database, format: `[SCHEME://]HOST` | |
| `port` | int, optional| port of the database | `8086` |
| `bucket` | string | InfluxDB bucket to write | |
| `org` | string | InfluxDB organization name | |
@@ -34,8 +37,6 @@ Exporters:
bucket: harvest
org: harvest
token: ZTTrt%24@#WNFM2VZTTNNT25wZWUdtUmhBZEdVUmd3dl@#
- allow_addrs_regex:
- - `^192.168.0.\d+$`
```
Notice: InfluxDB stores a token in `~/.influxdbv2/configs`, but you can also retrieve it from the UI (usually serving on `localhost:8086`): click on "Data" on the left task bar, then on "Tokens".
diff --git a/cmd/exporters/influxdb/influxdb.go b/cmd/exporters/influxdb/influxdb.go
index 1db4a2882..2de799891 100644
--- a/cmd/exporters/influxdb/influxdb.go
+++ b/cmd/exporters/influxdb/influxdb.go
@@ -1,7 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package influxdb
import (
"bytes"
@@ -50,20 +50,12 @@ func (e *InfluxDB) Init() error {
return err
}
- var addr, port, bucket, org, v, p string
- var err error
+ var (
+ url, addr, port, bucket, org, v, p string
+ err error
+ )
// check required / optional parameters
- if addr = e.Params.GetChildContentS("addr"); addr == "" {
- return errors.New(errors.MISSING_PARAM, "addr")
- }
-
- if port = e.Params.GetChildContentS("port"); port == "" {
- e.Logger.Debug().Msgf("using default port [%s]", defaultPort)
- port = defaultPort
- } else if _, err = strconv.Atoi(port); err != nil {
- return errors.New(errors.INVALID_PARAM, "port")
- }
if bucket = e.Params.GetChildContentS("bucket"); bucket == "" {
return errors.New(errors.MISSING_PARAM, "bucket")
@@ -91,6 +83,27 @@ func (e *InfluxDB) Init() error {
}
e.Logger.Debug().Msgf("using api precision [%s]", p)
+ // user should provide either url or addr
+ // url is expected to be the full write URL with all query params specified (optionally with scheme)
+ // addr is expected to include host only (no scheme, no port)
+ if url = e.Params.GetChildContentS("url"); url == "" {
+ if addr = e.Params.GetChildContentS("addr"); addr == "" {
+ return errors.New(errors.MISSING_PARAM, "url or addr")
+ }
+
+ if port = e.Params.GetChildContentS("port"); port == "" {
+ e.Logger.Debug().Msgf("using default port [%s]", defaultPort)
+ port = defaultPort
+ } else if _, err = strconv.Atoi(port); err != nil {
+ return errors.New(errors.INVALID_PARAM, "port")
+ }
+
+ url = "http://" + addr + ":" + port
+ e.url = fmt.Sprintf("%s/api/v%s/write?org=%s&bucket=%s&precision=%s", url, v, org, bucket, p)
+ } else {
+ e.url = url
+ }
+
// timeout parameter
timeout := time.Duration(detaultTimeout) * time.Second
if ct := e.Params.GetChildContentS("client_timeout"); ct != "" {
@@ -103,8 +116,6 @@ func (e *InfluxDB) Init() error {
e.Logger.Debug().Msgf("using default client_timeout: %d s", detaultTimeout)
}
- // construct client URL
- e.url = fmt.Sprintf("http://%s:%s/api/v%s/write?org=%s&bucket=%s&precision=%s", addr, port, v, org, bucket, p)
e.Logger.Debug().Msgf("url= [%s]", e.url)
// construct HTTP client
@@ -322,6 +333,3 @@ func (e *InfluxDB) Render(data *matrix.Matrix) ([][]byte, error) {
}
return rendered, nil
}
-
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
diff --git a/cmd/exporters/influxdb/influxdb_test.go b/cmd/exporters/influxdb/influxdb_test.go
index f3360be7b..65fe21bed 100644
--- a/cmd/exporters/influxdb/influxdb_test.go
+++ b/cmd/exporters/influxdb/influxdb_test.go
@@ -1,7 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package influxdb
import (
"goharvest2/cmd/poller/exporter"
@@ -11,6 +11,59 @@ import (
"testing"
)
+// test that the addr (and port) parameters
+// are handled properly to construct server URL
+func TestAddrParameter(t *testing.T) {
+
+ expectedURL := "http://localhost:8086/api/v2/write?org=netapp&bucket=harvest&precision=s"
+
+ opts := &options.Options{}
+ opts.Debug = true
+
+ params := node.NewS("")
+ params.NewChildS("addr", "localhost")
+ params.NewChildS("org", "netapp")
+ params.NewChildS("bucket", "harvest")
+ params.NewChildS("token", "xxxxxxx")
+
+ influx := &InfluxDB{AbstractExporter: exporter.New("InfluxDB", "influx-test", opts, params)}
+ if err := influx.Init(); err != nil {
+ t.Fatal(err)
+ }
+
+ if influx.url == expectedURL {
+ t.Logf("OK - url: [%s]", expectedURL)
+ } else {
+ t.Fatalf("FAIL - expected [%s]\n got [%s]", expectedURL, influx.url)
+ }
+}
+
+// test that the addr (and port) parameters
+// are handled properly to construct server URL
+func TestUrlParameter(t *testing.T) {
+
+ expectedURL := "https://some-valid-domain-name.net/api/v2/write?org=netapp&bucket=harvest&precision=s"
+
+ opts := &options.Options{}
+ opts.Debug = true
+
+ params := node.NewS("")
+ params.NewChildS("url", "https://some-valid-domain-name.net/api/v2/write?org=netapp&bucket=harvest&precision=s")
+ params.NewChildS("org", "netapp")
+ params.NewChildS("bucket", "harvest")
+ params.NewChildS("token", "xxxxxxx")
+ influx := &InfluxDB{AbstractExporter: exporter.New("InfluxDB", "influx-test", opts, params)}
+ if err := influx.Init(); err != nil {
+ t.Fatal(err)
+ }
+
+ if influx.url == expectedURL {
+ t.Logf("OK - url: [%s]", expectedURL)
+ } else {
+ t.Fatalf("FAIL - expected [%s]\n got [%s]", expectedURL, influx.url)
+ }
+}
+
// test rendering in debug mode
// this does not send to influxdb, but simply prints
// rendered data
diff --git a/cmd/exporters/influxdb/measurement.go b/cmd/exporters/influxdb/measurement.go
index f08780744..08ef5bd2b 100644
--- a/cmd/exporters/influxdb/measurement.go
+++ b/cmd/exporters/influxdb/measurement.go
@@ -1,7 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package influxdb
import (
"fmt"
diff --git a/cmd/exporters/influxdb/measurement_test.go b/cmd/exporters/influxdb/measurement_test.go
index 00b4ab479..e9c803a22 100644
--- a/cmd/exporters/influxdb/measurement_test.go
+++ b/cmd/exporters/influxdb/measurement_test.go
@@ -1,7 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package influxdb
import "testing"
diff --git a/cmd/exporters/prometheus/cache.go b/cmd/exporters/prometheus/cache.go
index f491da5ea..ae73864c8 100644
--- a/cmd/exporters/prometheus/cache.go
+++ b/cmd/exporters/prometheus/cache.go
@@ -2,7 +2,7 @@
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package prometheus
import (
"sync"
diff --git a/cmd/exporters/prometheus/html.go b/cmd/exporters/prometheus/html.go
index 4261ba11c..b5f1fe9f0 100644
--- a/cmd/exporters/prometheus/html.go
+++ b/cmd/exporters/prometheus/html.go
@@ -1,7 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
-package main
+package prometheus
var html_template = `
diff --git a/cmd/exporters/prometheus/httpd.go b/cmd/exporters/prometheus/httpd.go
index b1949081e..30afd4696 100644
--- a/cmd/exporters/prometheus/httpd.go
+++ b/cmd/exporters/prometheus/httpd.go
@@ -1,11 +1,10 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
+ */
-Package Description:
- The HTTP daemon exposes metrics for the Prometheus database
- as well as a list of the names of available metrics for humans
-*/
-package main
+// Package prometheus creates an HTTP end-point for Prometheus to scrape on `/metrics`
+//It also publishes a list of available metrics for human consumption on `/`
+package prometheus
import (
"bytes"
@@ -16,19 +15,19 @@ import (
"time"
)
-func (me *Prometheus) startHttpD(addr, port string) {
+func (me *Prometheus) startHttpD(addr string, port int) {
mux := http.NewServeMux()
mux.HandleFunc("/", me.ServeInfo)
mux.HandleFunc("/metrics", me.ServeMetrics)
- me.Logger.Debug().Msgf(" (httpd)", "starting server at [%s:%s]", addr, port)
- server := &http.Server{Addr: addr + ":" + port, Handler: mux}
+ me.Logger.Debug().Msgf("(httpd) starting server at [%s:%s]", addr, port)
+ server := &http.Server{Addr: addr + ":" + fmt.Sprint(port), Handler: mux}
if err := server.ListenAndServe(); err != nil {
me.Logger.Fatal().Msgf(" (httpd) %v", err.Error())
} else {
- me.Logger.Info().Msgf(" (httpd)", "listening at [http://%s:%s]", addr, port)
+ me.Logger.Info().Msgf("(httpd) listening at [http://%s:%s]", addr, port)
}
}
@@ -71,7 +70,7 @@ func (me *Prometheus) checkAddr(addr string) bool {
// send a deny request response
func (me *Prometheus) denyAccess(w http.ResponseWriter, r *http.Request) {
- me.Logger.Debug().Msgf(" (httpd) ", "denied request [%s] (%s)", r.RequestURI, r.RemoteAddr)
+ me.Logger.Debug().Msgf("(httpd) denied request [%s] (%s)", r.RequestURI, r.RemoteAddr)
w.WriteHeader(403)
w.Header().Set("content-type", "text/plain")
_, err := w.Write([]byte("403 Forbidden"))
@@ -94,7 +93,7 @@ func (me *Prometheus) ServeMetrics(w http.ResponseWriter, r *http.Request) {
return
}
- me.Logger.Debug().Msgf(" (httpd) ", "serving request [%s] (%s)", r.RequestURI, r.RemoteAddr)
+ me.Logger.Debug().Msgf("(httpd) serving request [%s] (%s)", r.RequestURI, r.RemoteAddr)
me.cache.Lock()
for _, metrics := range me.cache.Get() {
@@ -117,15 +116,26 @@ func (me *Prometheus) ServeMetrics(w http.ResponseWriter, r *http.Request) {
if md, err := e.Render(e.Metadata); err == nil {
data = append(data, md...)
- }*/
+ }
+ */
+
+ if me.addMetaTags {
+ data = filterMetaTags(data)
+ }
w.WriteHeader(200)
w.Header().Set("content-type", "text/plain")
_, err := w.Write(bytes.Join(data, []byte("\n")))
if err != nil {
- me.Logger.Error().Stack().Err(err).Msg("error")
+ me.Logger.Error().Stack().Err(err).Msg("write metrics")
}
+ // make sure stream ends with newline
+ if _, err = w.Write([]byte("\n")); err != nil {
+ me.Logger.Error().Stack().Err(err).Msg("write ending newline")
+ }
+
+ // update metadata
me.Metadata.Reset()
err = me.Metadata.LazySetValueInt64("time", "http", time.Since(start).Microseconds())
if err != nil {
@@ -137,13 +147,37 @@ func (me *Prometheus) ServeMetrics(w http.ResponseWriter, r *http.Request) {
}
}
-// provide a human-friendly overview of metric types and source collectors
-// this is than in a very inefficient way, by "reverse engineering" the metrics,
-// but probably that's ok, since we don't expected this to be requested
-// very often.
-// @TODO: also add plugins and plugin metrics
-func (me *Prometheus) ServeInfo(w http.ResponseWriter, r *http.Request) {
+func filterMetaTags(metrics [][]byte) [][]byte {
+
+ filtered := make([][]byte, 0)
+
+ metricsWithTags := make(map[string]bool)
+ for i, m := range metrics {
+ if bytes.HasPrefix(m, []byte("# ")) {
+ if fields := strings.Fields(string(m)); len(fields) > 3 {
+ name := fields[2]
+ if !metricsWithTags[name] {
+ metricsWithTags[name] = true
+ filtered = append(filtered, m)
+ if i+1 < len(metrics) {
+ filtered = append(filtered, metrics[i+1])
+ i++
+ }
+ }
+ }
+ } else {
+ filtered = append(filtered, m)
+ }
+ }
+ return filtered
+}
+
+// ServeInfo provides a human-friendly overview of metric types and source collectors
+// this is done in a very inefficient way, by "reverse engineering" the metrics.
+// That's probably ok, since we don't expect this to be called often.
+func (me *Prometheus) ServeInfo(w http.ResponseWriter, r *http.Request) {
+ // TODO: also add plugins and plugin metrics
start := time.Now()
if !me.checkAddr(r.RemoteAddr) {
@@ -151,15 +185,15 @@ func (me *Prometheus) ServeInfo(w http.ResponseWriter, r *http.Request) {
return
}
- me.Logger.Debug().Msgf(" (httpd)", "serving info request [%s] (%s)", r.RequestURI, r.RemoteAddr)
+ me.Logger.Debug().Msgf("(httpd) serving info request [%s] (%s)", r.RequestURI, r.RemoteAddr)
body := make([]string, 0)
- num_collectors := 0
- num_objects := 0
- num_metrics := 0
+ numCollectors := 0
+ numObjects := 0
+ numMetrics := 0
- unique_data := map[string]map[string][]string{}
+ uniqueData := map[string]map[string][]string{}
// copy cache so we don't lock it
me.cache.Lock()
@@ -170,16 +204,16 @@ func (me *Prometheus) ServeInfo(w http.ResponseWriter, r *http.Request) {
}
me.cache.Unlock()
- me.Logger.Debug().Msgf(" (httpd)", "fetching %d cached elements", len(cache))
+ me.Logger.Debug().Msgf("(httpd) fetching %d cached elements", len(cache))
for key, data := range cache {
- me.Logger.Debug().Msgf(" (httpd)", "key => [%s] (%d)", key, len(data))
+ me.Logger.Debug().Msgf("(httpd) key => [%s] (%d)", key, len(data))
var collector, object string
if keys := strings.Split(key, "."); len(keys) == 2 {
collector = keys[0]
object = keys[1]
- me.Logger.Debug().Msgf(" (httpd)", "collector [%s] - object [%s]", collector, object)
+ me.Logger.Debug().Msgf("(httpd) collector [%s] - object [%s]", collector, object)
} else {
continue
}
@@ -189,44 +223,44 @@ func (me *Prometheus) ServeInfo(w http.ResponseWriter, r *http.Request) {
continue
}
- metric_names := set.New()
+ metricNames := set.New()
for _, m := range data {
if x := strings.Split(string(m), "{"); len(x) >= 2 && x[0] != "" {
- metric_names.Add(x[0])
+ metricNames.Add(x[0])
}
}
- num_metrics += metric_names.Size()
+ numMetrics += metricNames.Size()
- if _, exists := unique_data[collector]; !exists {
- unique_data[collector] = make(map[string][]string)
+ if _, exists := uniqueData[collector]; !exists {
+ uniqueData[collector] = make(map[string][]string)
}
- unique_data[collector][object] = metric_names.Values()
+ uniqueData[collector][object] = metricNames.Values()
}
- for col, per_object := range unique_data {
+ for col, perObject := range uniqueData {
objects := make([]string, 0)
- for obj, metric_names := range per_object {
+ for obj, metricNames := range perObject {
metrics := make([]string, 0)
- for _, m := range metric_names {
+ for _, m := range metricNames {
if m != "" {
metrics = append(metrics, fmt.Sprintf(metric_template, m))
}
}
objects = append(objects, fmt.Sprintf(object_template, obj, strings.Join(metrics, "\n")))
- num_objects += 1
+ numObjects += 1
}
body = append(body, fmt.Sprintf(collector_template, col, strings.Join(objects, "\n")))
- num_collectors += 1
+ numCollectors += 1
}
poller := me.Options.Poller
- body_flat := fmt.Sprintf(html_template, poller, poller, poller, num_collectors, num_objects, num_metrics, strings.Join(body, "\n\n"))
+ bodyFlat := fmt.Sprintf(html_template, poller, poller, poller, numCollectors, numObjects, numMetrics, strings.Join(body, "\n\n"))
w.WriteHeader(200)
w.Header().Set("content-type", "text/html")
- _, err := w.Write([]byte(body_flat))
+ _, err := w.Write([]byte(bodyFlat))
if err != nil {
me.Logger.Error().Stack().Err(err).Msg("error")
}
diff --git a/cmd/exporters/prometheus/prometheus.go b/cmd/exporters/prometheus/prometheus.go
index 20e2eb88a..cff7f9551 100644
--- a/cmd/exporters/prometheus/prometheus.go
+++ b/cmd/exporters/prometheus/prometheus.go
@@ -19,7 +19,7 @@ Package Description:
on the cache creates a race-condition (not caught on all Linux systems).
*/
-package main
+package prometheus
import (
"fmt"
@@ -159,15 +159,20 @@ func (me *Prometheus) Init() error {
// finally the most important and only required parameter: port
// can be passed to us either as an option or as a parameter
port := me.Options.PromPort
- if port == "" {
- port = me.Params.GetChildContentS("port")
+ if port == 0 {
+ p, err := strconv.Atoi(me.Params.GetChildContentS("port"))
+ if err != nil {
+ me.Logger.Error().Stack().Err(err).Msg("Issue while reading prometheus port")
+ } else {
+ port = p
+ }
}
// sanity check on port
- if port == "" {
+ if port == 0 {
return errors.New(errors.MISSING_PARAM, "port")
- } else if _, err := strconv.Atoi(port); err != nil {
- return errors.New(errors.INVALID_PARAM, "port ("+port+")")
+ } else if port < 0 {
+ return errors.New(errors.INVALID_PARAM, "port")
}
addr := localHttpAddr
@@ -269,7 +274,7 @@ func (me *Prometheus) render(data *matrix.Matrix) ([][]byte, error) {
tagged *set.Set
labels_to_include, keys_to_include, global_labels []string
prefix string
- include_all_labels bool
+ err error
)
rendered = make([][]byte, 0)
@@ -291,10 +296,19 @@ func (me *Prometheus) render(data *matrix.Matrix) ([][]byte, error) {
me.Logger.Debug().Msgf("requested keys_labels : %v", keys_to_include)
}
- if options.GetChildContentS("include_all_labels") == "true" {
- include_all_labels = true
- } else {
- include_all_labels = false
+ include_all_labels := false
+ require_instance_keys := true
+
+ if x := options.GetChildContentS("include_all_labels"); x != "" {
+ if include_all_labels, err = strconv.ParseBool(x); err != nil {
+ me.Logger.Error().Stack().Err(err).Msg("parameter: include_all_labels")
+ }
+ }
+
+ if x := options.GetChildContentS("require_instance_keys"); x != "" {
+ if require_instance_keys, err = strconv.ParseBool(x); err != nil {
+ me.Logger.Error().Stack().Err(err).Msg("parameter: require_instance_keys")
+ }
}
prefix = me.globalPrefix + data.Object
@@ -313,18 +327,26 @@ func (me *Prometheus) render(data *matrix.Matrix) ([][]byte, error) {
me.Logger.Trace().Msgf("rendering instance [%s] (%v)", key, instance.GetLabels())
instance_keys := make([]string, len(global_labels))
- instance_labels := make([]string, 0)
copy(instance_keys, global_labels)
+ instance_keys_ok := false
+ instance_labels := make([]string, 0)
if include_all_labels {
for label, value := range instance.GetLabels().Map() {
- instance_keys = append(instance_keys, fmt.Sprintf("%s=\"%s\"", label, value))
+ // temporary fix for the rarely happening duplicate labels
+ // known case is: ZapiPerf -> 7mode -> disk.yaml
+ // actual cause is the Aggregator plugin, which is adding node as
+ // instance label (even though it's already a global label for 7modes)
+ if !data.GetGlobalLabels().Has(label) {
+ instance_keys = append(instance_keys, fmt.Sprintf("%s=\"%s\"", label, value))
+ }
}
} else {
for _, key := range keys_to_include {
value := instance.GetLabel(key)
- if value != "" {
- instance_keys = append(instance_keys, fmt.Sprintf("%s=\"%s\"", key, value))
+ instance_keys = append(instance_keys, fmt.Sprintf("%s=\"%s\"", key, value))
+ if !instance_keys_ok && value != "" {
+ instance_keys_ok = true
}
me.Logger.Trace().Msgf("++ key [%s] (%s) found=%v", key, value, value != "")
}
@@ -336,7 +358,7 @@ func (me *Prometheus) render(data *matrix.Matrix) ([][]byte, error) {
}
// @TODO, probably be strict, and require all keys to be present
- if len(instance_keys) == 0 && options.GetChildContentS("require_instance_keys") != "False" {
+ if !instance_keys_ok && require_instance_keys {
me.Logger.Trace().Msgf("skip instance, no keys parsed (%v) (%v)", instance_keys, instance_labels)
continue
}
@@ -344,6 +366,11 @@ func (me *Prometheus) render(data *matrix.Matrix) ([][]byte, error) {
// @TODO, check at least one label is found?
if len(instance_labels) != 0 {
label_data := fmt.Sprintf("%s_labels{%s,%s} 1.0", prefix, strings.Join(instance_keys, ","), strings.Join(instance_labels, ","))
+ if me.addMetaTags && !tagged.Has(prefix+"_labels") {
+ tagged.Add(prefix + "_labels")
+ rendered = append(rendered, []byte("# HELP "+prefix+"_labels Pseudo-metric for "+data.Object+" labels"))
+ rendered = append(rendered, []byte("# TYPE "+prefix+"_labels gauge"))
+ }
rendered = append(rendered, []byte(label_data))
} else {
me.Logger.Trace().Msgf("skip instance labels, no labels parsed (%v) (%v)", instance_keys, instance_labels)
@@ -396,6 +423,3 @@ func (me *Prometheus) render(data *matrix.Matrix) ([][]byte, error) {
me.Logger.Debug().Msgf("rendered %d data points from %d (%s) instances", len(rendered), len(data.GetInstances()), data.Object)
return rendered, nil
}
-
-// Need to appease go build - see https://github.com/golang/go/issues/20312
-func main() {}
diff --git a/cmd/exporters/prometheus/validator.py b/cmd/exporters/prometheus/validator.py
new file mode 100755
index 000000000..2b3938769
--- /dev/null
+++ b/cmd/exporters/prometheus/validator.py
@@ -0,0 +1,271 @@
+#!/usr/bin/env python3
+
+"""
+Copyright NetApp Inc, 2021 All rights reserved
+
+Utility to validate integrity of Prometheus metrics generated
+by Harvests' Prometheus exporter. This utility takes into account
+the parsing rules of the PrometheusDB, as well as other collector
+servers, such as InfluxDB's Telegraf.
+
+"""
+
+import argparse
+import regex
+import signal
+import sys
+import time
+import urllib.request
+
+# error summary
+errors = {
+ 'corrupt_metrics' : 0,
+ 'corrupt_labels' : 0,
+ 'corrupt_metatags' : 0,
+ 'inconsistent_labels' : 0,
+ 'duplicate_labels' : 0,
+ 'duplicate_metatags' : 0,
+ 'missing_metatags' : 0,
+ 'missing_newlines' : 0,
+ }
+
+# cache label keys of seen metrics to check for consistency
+label_cache = {} # str -> set
+
+# regular expressions to match metric
+metric_pattern = regex.compile(r'^(\w+)\{(.+)\} \d+(\.\d+(e[-+]\d+)?)?$')
+# pattern to match HELP/TYPE metatags
+tag_pattern = regex.compile(r'^# (\w+) (\w+) .*$')
+# label name must start with alphabetical char
+# see: https://github.com/prometheus/common/blob/main/model/labels.go#L94
+label_pattern = regex.compile(r'^([_a-zA-Z]\w*)="[^"]*?"$', flags=regex.ASCII)
+
+# tty colors
+END = '\033[0m'
+BOLD = '\033[1m'
+RED = '\033[91m'
+GREEN = '\033[92m'
+YELLOW = '\033[93m'
+PINK = '\033[95m'
+
+def main():
+ # parse arguments
+ a = get_args()
+
+ # make sure to print errors before exiting
+ signal.signal(signal.SIGINT, terminate)
+
+ # run the scrapes
+ for i in range(a.scrapes):
+
+ # cache metrics for which we have seen metatags
+ help_cache = {} # str -> bool
+ type_cache = {} # str -> bool
+
+ metrics = get_batch_metrics(a.addr, a.port)
+ print('{}-> scrape #{:<4} - scraped metrics/lines: {}{}'.format(BOLD, i+1, len(metrics.splitlines()), END))
+
+ if metrics == '':
+ # sleep until next scrape
+ time.sleep(a.interval)
+
+ if not metrics.endswith('\n'):
+ errors['missing_newlines'] += 1
+ print(' {}missing newline at the end of metric batch{}'.format(PINK, END))
+
+ for m in metrics.splitlines():
+
+ # skip newline
+ if m == '\n' or m == '':
+ continue
+
+ # handle metatag
+ if len(m) and m[0] == '#':
+ ok, tag, metric_name = check_metatag(m)
+ if not ok:
+ errors['corrupt_metatags'] += 1
+ print(' corrupt {} metatag:'.format(tag))
+ print(' [{}{}{}]'.format(RED, m, END))
+ elif tag == 'HELP':
+ if help_cache.get(metric_name, False):
+ errors['duplicate_metatags'] += 1 # count only once
+ print(' duplicate HELP tag for metric {}'.format(metric_name))
+ help_cache[metric_name] = True
+ elif tag == 'TYPE':
+ if type_cache.get(metric_name, False):
+ print(' duplicate TYPE tag for metric {}'.format(metric_name))
+ type_cache[metric_name] = True
+ continue
+
+ # check general metric intergrity and parse raw labels substring
+ ok, metric_name, raw_labels = check_metric(m)
+
+ if not ok:
+ errors['corrupt_metrics'] += 1
+ print(' corrupt metric format:')
+ print(' [{}{}{}]'.format(RED, m, END))
+ continue
+
+ # check labels integrity
+ ok, labels = parse_labels(raw_labels) # list
+ if not ok:
+ errors['corrupt_metrics'] += 1
+ print(' corrupt metric format (labels):')
+ print(' [{}{}{}]'.format(RED, m, END))
+ continue
+
+ # check for duplicate labels
+ duplicates = set([l for l in labels if labels.count(l) > 1])
+ if duplicates:
+ errors['duplicate_labels'] += 1
+ print(' duplicate labels ({}):'.format(', '.join(duplicates)))
+ print(' [{}{}{}]'.format(RED, m, END))
+
+ labels = set(labels)
+
+ # compare with cached labels for consistency
+ cached_labels = label_cache.get(metric_name, None)
+ if cached_labels == None:
+ label_cache[metric_name] = labels
+ else:
+ missing = cached_labels - labels
+ added = labels - cached_labels
+ if missing or added:
+ errors['inconsistent_labels'] += 1
+ print(' inconsistent labels (cached: {}):'.format(' '.join(cached_labels)))
+ if missing:
+ print(' - missing ({})'.format(', '.join(missing)))
+ if added:
+ print(' - added ({})'.format(', '.join(added)))
+ print(' [{}{}{}]'.format(RED, m, END))
+
+ # optionally check for metatags
+ # each metrics should at least once include HELP/TYPE metametric
+ if a.metatags:
+ has_help = help_cache.get(metric_name, False)
+ has_type = type_cache.get(metric_name, False)
+ if not has_help or not has_type:
+ errors['missing_metatags'] += 1
+ print(' {}missing metatags for metric [{}]{}'.format(RED, metric_name, END))
+ if not has_help:
+ print(' - HELP tag not detected')
+ if not has_type:
+ print(' - TYPE tag not detected')
+
+ # sleep until next scrape
+ time.sleep(a.interval)
+
+ print_errors()
+ # DONE
+
+# Scrape an HTTP endpoint and return data
+def get_batch_metrics(addr: str, port: int) -> str:
+ try:
+ return urllib.request.urlopen('http://{}:{}/metrics'.format(addr, port)).read().decode()
+ except urllib.error.URLError as err:
+ print(err)
+ return ''
+
+# validate metric format (without labels), extract name and labels substring
+def check_metric(metric: str) -> (bool, str, str):
+ match = metric_pattern.match(metric)
+ if match:
+ try:
+ return True, match.captures(1)[0], match.captures(2)[0]
+ except Exception as ex:
+ print('regex exception: {}'.format(ex))
+ return False, '', ''
+
+def check_metatag(metric: str) -> (bool, str, str):
+ match = tag_pattern.match(metric)
+ if match:
+ try:
+ return True, match.captures(1)[0], match.captures(2)[0]
+ except Exception as ex:
+ print('regex exception: {}'.format(ex))
+ return False, '', ''
+
+# parse label keys from raw labels substring
+def parse_labels(labels: str) -> (bool, [str]):
+ keys = []
+ for pair in labels.split(','):
+ match = label_pattern.match(pair)
+ if not match:
+ print(' - failed parse label pair ({})'.format(pair))
+ return False, keys
+ keys.append(match.captures(1)[0])
+
+ return True, keys
+
+def terminate(signum, frame):
+ print('\n{}-> terminating validation session{}'.format(YELLOW, END))
+ print_errors()
+ sys.exit()
+
+def print_errors():
+ print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
+ print('-> {} unique metrics validated'.format(len(label_cache)))
+ total = sum(errors.values())
+ if total == 0:
+ print('{}-> OK - no errors detected{}'.format(GREEN, END))
+ else:
+ print('{}-> FAIL - {} errors detected{}'.format(RED, total, END))
+
+ for k, v in errors.items():
+ print('{:<30} - {:>8}'.format(k, v))
+ print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
+
+
+# Parse CLI arguments
+def get_args() -> argparse.Namespace:
+ p = argparse.ArgumentParser(
+ formatter_class = argparse.RawTextHelpFormatter,
+ description = """Open Metric Validator using an HTTP endpoint
+
+SYNOPSIS:
+ Run this tool specifying the port of the Prometheus exporter. Then,
+ start a Harvest poller that will serve the metrics on the port.
+ (Start tools first, so no metatags are missed).
+
+VALIDATION:
+ Tool will validate integrity of the rendered metrics:
+ - metric format
+ - label integrity
+ - label consistency
+ - label duplicates
+ - HELP/TYPE metatags (optional)"""
+ )
+ p.add_argument('-a', '--addr',
+ help = 'Address of the HTTP endpoint (default: localhost)',
+ dest = 'addr',
+ type = str,
+ default = 'localhost'
+ )
+ p.add_argument('-p', '--port',
+ help = 'Port of the HTTP endpoint',
+ dest = 'port',
+ type = int,
+ required = True
+ )
+ p.add_argument('-i', '--interval',
+ help = 'Interval between scrapes (in seconds, default: 60)',
+ dest = 'interval',
+ type = int,
+ default = 60
+ )
+ p.add_argument('-s', '--scrapes',
+ help = 'Number of scrapes to run (default: 5)',
+ dest = 'scrapes',
+ type = int,
+ default = 5
+ )
+ p.add_argument('-m', '--metatags',
+ help = 'Check TYPE/HELP metatags (default: false)',
+ dest = 'metatags',
+ action = 'store_true',
+ default = False
+ )
+ return p.parse_args()
+
+if __name__ == '__main__':
+ main()
diff --git a/cmd/harvest/harvest.go b/cmd/harvest/harvest.go
index 9d0c6924a..c9d19afd5 100644
--- a/cmd/harvest/harvest.go
+++ b/cmd/harvest/harvest.go
@@ -15,19 +15,19 @@ Chris Madden in 2015.
package main
import (
- "bytes"
"fmt"
"github.com/spf13/cobra"
"goharvest2/cmd/harvest/config"
"goharvest2/cmd/harvest/stub"
"goharvest2/cmd/harvest/version"
"goharvest2/cmd/tools/doctor"
+ "goharvest2/cmd/tools/generate"
"goharvest2/cmd/tools/grafana"
"goharvest2/cmd/tools/zapi"
"goharvest2/pkg/conf"
"goharvest2/pkg/set"
"goharvest2/pkg/tree/node"
- "io/ioutil"
+ "goharvest2/pkg/util"
"net"
_ "net/http/pprof" // #nosec since pprof is off by default
"os"
@@ -67,7 +67,6 @@ type pollerStatus struct {
var (
HarvestHomePath string
HarvestConfigPath string
- HarvestPidPath string
)
var rootCmd = &cobra.Command{
@@ -94,8 +93,6 @@ func doManageCmd(cmd *cobra.Command, args []string) {
os.Exit(1)
}
- HarvestPidPath = conf.GetHarvestPidPath()
-
if opts.verbose {
_ = cmd.Flags().Set("loglevel", "1")
}
@@ -115,6 +112,16 @@ func doManageCmd(cmd *cobra.Command, args []string) {
os.Exit(1)
}
+ var pollerNames []string
+ for _, p := range pollers.GetChildren() {
+ pollerNames = append(pollerNames, p.GetNameS())
+ }
+ // do this before filtering of pollers
+ // stop pollers which may have been renamed or no longer exists in harvest.yml
+ if opts.command == "start" || opts.command == "restart" {
+ stopGhostPollers("poller", pollerNames)
+ }
+
pollersFromCmdLine := args
if len(pollersFromCmdLine) > 0 {
// verify poller names
@@ -167,7 +174,6 @@ func doManageCmd(cmd *cobra.Command, args []string) {
name := p.GetNameS()
datacenter := p.GetChildContentS("datacenter")
- promPort := getPollerPrometheusPort(p, opts)
s = getStatus(name)
if opts.command == "kill" {
@@ -187,10 +193,17 @@ func doManageCmd(cmd *cobra.Command, args []string) {
if opts.command == "start" || opts.command == "restart" {
// only start poller if confirmed that it's not running
- if s.status == "not running" || s.status == "stopped" || s.status == "killed" {
+ // if it's running do nothing
+ switch s.status {
+ case "running":
+ // do nothing but print current status, idempotent
+ printStatus(opts.longStatus, c1, c2, datacenter, name, s.promPort, s)
+ break
+ case "not running", "stopped", "killed":
+ promPort := getPollerPrometheusPort(p, opts)
s = startPoller(name, promPort, opts)
printStatus(opts.longStatus, c1, c2, datacenter, name, s.promPort, s)
- } else {
+ default:
fmt.Printf("can't verify status of [%s]: kill poller and try again\n", name)
}
}
@@ -199,30 +212,18 @@ func doManageCmd(cmd *cobra.Command, args []string) {
printBreak(opts.longStatus, c1, c2)
}
-// Trace status of a poller. This is partially guesswork and
-// there is no guarantee that we will be always correct
+// Get status of a poller. This is partially guesswork and
+// there is no guarantee that this will always be correct.
// The general logic is:
-// - if no PID file, assume poller exited or never started
-// - if PID file exists, but no process with PID exists, assume poller interrupted
-// - if a process with PID is running, assume poller is running if it has expected cmdline
+// - use prep to find a matching poller
+// - the check that the found pid has a harvest tag in its environ
//
// Returns:
// @status - status of poller
// @pid - PID of poller (0 means no PID)
func getStatus(pollerName string) *pollerStatus {
- s := &pollerStatus{}
- // running poller should have written PID to file
- pidFp := path.Join(HarvestPidPath, pollerName+".pid")
-
- // no PID file, assume process exited or never started
- if data, err := ioutil.ReadFile(pidFp); err != nil {
- s.status = "not running"
- // corrupt PID should never happen
- // might be a sign of system failure or unexpected shutdown
- } else if s.pid, err = strconv.Atoi(string(data)); err != nil {
- s.status = "invalid pid"
- }
+ s := &pollerStatus{status: "not running"}
// docker dummy status
if os.Getenv("HARVEST_DOCKER") == "yes" {
@@ -230,6 +231,18 @@ func getStatus(pollerName string) *pollerStatus {
return s
}
+ pids, err := util.GetPid(pollerName)
+ if err != nil {
+ return s
+ }
+ if len(pids) != 1 {
+ if len(pids) > 1 {
+ fmt.Printf("exepcted one pid for %s, instead pids=%+v\n", pollerName, pids)
+ }
+ return s
+ }
+ s.pid = pids[0]
+
// no valid PID stops here
if s.pid < 1 {
return s
@@ -245,109 +258,75 @@ func getStatus(pollerName string) *pollerStatus {
if os.IsPermission(err) {
fmt.Println("Insufficient privileges to send signal to process")
}
- s.status = "unknown: " + err.Error()
return s
- // process not running, but did not clean PID file
- // maybe it just exited, so give it a chance to clean
- /*
- time.Sleep(500 * time.Millisecond)
- if clean_pidf(pid_fp) {
- return "interrupted", pid
- }
- return "exited", pid
- */
}
- // process is running, validate that it's the poller we're looking fore
- // since PID might have changed (although very unlikely)
- if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cmdline", s.pid)); err == nil {
- cmdline := string(bytes.ReplaceAll(data, []byte("\x00"), []byte(" ")))
-
- if checkPollerIdentity(cmdline, pollerName) {
- s.status = "running"
+ // process is running. GetPid ensures this is the correct poller
+ // Extract cmdline args for status struct
+ if cmdline, err := util.GetCmdLine(s.pid); err == nil {
+ s.status = "running"
- if strings.Contains(cmdline, "--profiling") {
- r := regexp.MustCompile(`--profiling (\d+)`)
- matches := r.FindStringSubmatch(cmdline)
- if len(matches) > 0 {
- s.profilingPort = matches[1]
- }
+ if strings.Contains(cmdline, "--profiling") {
+ r := regexp.MustCompile(`--profiling (\d+)`)
+ matches := r.FindStringSubmatch(cmdline)
+ if len(matches) > 0 {
+ s.profilingPort = matches[1]
}
+ }
- if strings.Contains(cmdline, "--promPort") {
- r := regexp.MustCompile(`--promPort (\d+)`)
- matches := r.FindStringSubmatch(cmdline)
- if len(matches) > 0 {
- s.promPort = matches[1]
- }
+ if strings.Contains(cmdline, "--promPort") {
+ r := regexp.MustCompile(`--promPort (\d+)`)
+ matches := r.FindStringSubmatch(cmdline)
+ if len(matches) > 0 {
+ s.promPort = matches[1]
}
}
}
-
- // if status is not running, either process just exited and cmdline was unavailable
- // or cmdline did not confirm process is the poller we're looking for
- if s.status == "" {
- s.status = "unknown/unmatched"
- }
-
return s
}
-func checkPollerIdentity(cmdline, pollerName string) bool {
- if x := strings.Fields(cmdline); len(x) == 0 || !strings.HasSuffix(x[0], "poller") {
- return false
- }
-
- if !strings.Contains(cmdline, "--daemon") {
- return false
- }
-
- if !strings.Contains(cmdline, "--poller") {
- return false
- }
-
- x := strings.SplitAfter(cmdline, "--poller ")
- if len(x) != 2 {
- return false
- }
-
- if y := strings.Fields(x[1]); len(y) == 0 || y[0] != pollerName {
- return false
+func stopGhostPollers(search string, skipPoller []string) {
+ pids, err := util.GetPids(search)
+ if err != nil {
+ fmt.Printf("Error while executing pgrep %v \n", err)
+ return
}
- return true
-}
-
-func killPoller(pollerName string) *pollerStatus {
-
- defer cleanPidFile(pollerName)
-
- // attempt to get pid from pid file
- s := getStatus(pollerName)
-
- // attempt to get pid from "ps aux"
- if s.pid < 1 {
- data, err := exec.Command("ps", "aux").Output()
+ for _, p := range pids {
+ c, err := util.GetCmdLine(p)
if err != nil {
- fmt.Println("ps aux: ", err)
- return s
+ fmt.Printf("Missing pid %d %v \n", p, err)
+ continue
}
- for _, line := range strings.Split(string(data), "\n") {
- // BSD format should have 11 columns
- // last column can contain whitespace, so we should get at least 11
- if fields := strings.Fields(line); len(fields) > 10 {
- // CLI args are everything after 10th column
- if checkPollerIdentity(strings.Join(fields[10:], " "), pollerName) {
- if x, err := strconv.Atoi(fields[1]); err == nil {
- s.pid = x
- }
- break
+ // skip if this poller is defined in harvest config
+ var skip bool
+ for _, s := range skipPoller {
+ if util.ContainsWholeWord(c, s) {
+ skip = true
+ break
+ }
+ }
+ // if poller doesn't exists in harvest config
+ if !skip {
+ proc, err := os.FindProcess(p)
+ if err != nil {
+ fmt.Printf("process not found for pid %d %v \n", p, err)
+ continue
+ }
+ // send terminate signal
+ if err := proc.Signal(syscall.SIGTERM); err != nil {
+ if os.IsPermission(err) {
+ fmt.Printf("Insufficient priviliges to terminate process %v \n", err)
}
}
}
}
+}
+
+func killPoller(pollerName string) *pollerStatus {
- // stop if couldn't find pid
+ s := getStatus(pollerName)
+ // exit if pid was not found
if s.pid < 1 {
return s
}
@@ -384,7 +363,7 @@ func stopPoller(pollerName string) *pollerStatus {
// send terminate signal
if err := proc.Signal(syscall.SIGTERM); err != nil {
if os.IsPermission(err) {
- fmt.Println("Insufficient priviliges to terminate process")
+ fmt.Println("Insufficient privileges to terminate process")
s.status = "stopping failed"
return s
}
@@ -408,7 +387,7 @@ func stopPoller(pollerName string) *pollerStatus {
return killPoller(pollerName)
}
-func startPoller(pollerName string, promPort string, opts *options) *pollerStatus {
+func startPoller(pollerName string, promPort int, opts *options) *pollerStatus {
argv := make([]string, 5)
argv[0] = path.Join(HarvestHomePath, "bin", "poller")
@@ -417,9 +396,9 @@ func startPoller(pollerName string, promPort string, opts *options) *pollerStatu
argv[3] = "--loglevel"
argv[4] = strconv.Itoa(opts.loglevel)
- if len(promPort) != 0 {
+ if promPort != 0 {
argv = append(argv, "--promPort")
- argv = append(argv, promPort)
+ argv = append(argv, strconv.Itoa(promPort))
}
if opts.debug {
argv = append(argv, "--debug")
@@ -458,6 +437,7 @@ func startPoller(pollerName string, promPort string, opts *options) *pollerStatu
if opts.foreground {
cmd := exec.Command(argv[0], argv[1:]...)
+ cmd.Env = append(os.Environ(), util.HarvestTag)
//fmt.Println(cmd.String())
fmt.Println("starting in foreground, enter CTRL+C or close terminal to stop poller")
_ = os.Stdout.Sync()
@@ -476,14 +456,6 @@ func startPoller(pollerName string, promPort string, opts *options) *pollerStatu
argv = append(argv, "--daemon")
- // if pid directory doesn't exist, create full path, otherwise poller will complain
- if info, err := os.Stat(HarvestPidPath); err != nil || !info.IsDir() {
- // don't abort on error, since another poller might have done the job
- if err = os.MkdirAll(HarvestPidPath, 0755); err != nil && !os.IsExist(err) {
- fmt.Printf("error mkdir [%s]: %v\n", HarvestPidPath, err)
- }
- }
-
// special case if we are in container, don't actually daemonize
if os.Getenv("HARVEST_DOCKER") == "yes" {
cmd := exec.Command(argv[0], argv[1:]...)
@@ -499,41 +471,24 @@ func startPoller(pollerName string, promPort string, opts *options) *pollerStatu
}
cmd := exec.Command(path.Join(HarvestHomePath, "bin", "daemonize"), argv...)
+ cmd.Env = append(os.Environ(), util.HarvestTag)
//fmt.Println(cmd.String())
if err := cmd.Start(); err != nil {
fmt.Println(err)
os.Exit(1)
}
- // Poller should immediately write its PID to file at startup
// Allow for some delay and retry checking status a few times
- time.Sleep(50 * time.Millisecond)
- for i := 0; i < 10; i += 1 {
- // @TODO, handle situation when PID is regained by some other process
+ for i := 0; i < 2; i += 1 {
if s := getStatus(pollerName); s.pid > 0 {
return s
}
- time.Sleep(50 * time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
}
return getStatus(pollerName)
}
-// Clean PID file if it exists
-// Return value indicates wether PID file existed
-func cleanPidFile(pollerName string) bool {
- fp := path.Join(HarvestPidPath, pollerName+".pid")
- if err := os.Remove(fp); err != nil {
- if os.IsPermission(err) {
- fmt.Printf("Error: you have no permission to remove [%s]\n", fp)
- } else if !os.IsNotExist(err) {
- fmt.Printf("Error: %v\n", err)
- }
- return false
- }
- return true
-}
-
// print status of poller, first two arguments are column lengths
func printStatus(long bool, c1, c2 int, dc, pn, promPort string, s *pollerStatus) {
fmt.Printf("%s%s ", dc, strings.Repeat(" ", c1-len(dc)))
@@ -602,18 +557,19 @@ func closeDial(dial *net.TCPListener) {
_ = dial.Close()
}
-func getPollerPrometheusPort(p *node.Node, opts *options) string {
- var promPort string
+func getPollerPrometheusPort(p *node.Node, opts *options) int {
+ var promPort int
var err error
+
// check first if poller argument has promPort defined
// else in exporter config of poller
if opts.promPort != 0 {
- promPort = strconv.Itoa(opts.promPort)
+ promPort = opts.promPort
} else {
- promPort, err = conf.GetPrometheusExporterPorts(p, opts.config)
+ promPort, err = conf.GetPrometheusExporterPorts(p.GetNameS())
if err != nil {
fmt.Println(err)
- promPort = "error"
+ return 0
}
}
return promPort
@@ -627,6 +583,7 @@ func init() {
rootCmd.AddCommand(manageCmd("restart", true))
rootCmd.AddCommand(manageCmd("kill", true))
rootCmd.AddCommand(config.ConfigCmd, zapi.ZapiCmd, grafana.GrafanaCmd, stub.NewCmd)
+ rootCmd.AddCommand(generate.Cmd)
rootCmd.AddCommand(doctor.Cmd)
rootCmd.PersistentFlags().StringVar(&opts.config, "config", "./harvest.yml", "harvest config file path")
@@ -712,7 +669,7 @@ Feedback
func manageCmd(use string, shouldHide bool) *cobra.Command {
return &cobra.Command{
Use: fmt.Sprintf("%s [POLLER...]", use),
- Short: "stop/restart/status/kill - all or individual pollers",
+ Short: "Stop/restart/status/kill - all or individual pollers",
Long: "Harvest Manager - manage your pollers",
Args: cobra.ArbitraryArgs,
Hidden: shouldHide,
diff --git a/cmd/poller/collector/collector.go b/cmd/poller/collector/collector.go
index 273e4150b..284aebad5 100644
--- a/cmd/poller/collector/collector.go
+++ b/cmd/poller/collector/collector.go
@@ -18,14 +18,13 @@ package collector
import (
"goharvest2/pkg/conf"
"goharvest2/pkg/logging"
- "path"
"reflect"
+ "runtime/debug"
"strconv"
"strings"
"sync"
"time"
- "goharvest2/pkg/dload"
"goharvest2/pkg/errors"
"goharvest2/pkg/matrix"
"goharvest2/pkg/tree/node"
@@ -44,7 +43,7 @@ import (
// Note that many of the functions required by the interface
// are only there to facilitate "inheritance" through AbstractCollector.
type Collector interface {
- Init() error
+ Init(*AbstractCollector) error
Start(*sync.WaitGroup)
GetName() string
GetObject() string
@@ -59,11 +58,12 @@ type Collector interface {
SetMetadata(*matrix.Matrix)
WantedExporters(configFp string) []string
LinkExporter(exporter.Exporter)
- LoadPlugins(*node.Node) error
+ LoadPlugins(*node.Node, Collector) error
+ LoadPlugin(string, *plugin.AbstractPlugin) plugin.Plugin
}
-// CollectorStatus defines the possible states of a collector
-var CollectorStatus = [3]string{
+// Status defines the possible states of a collector
+var Status = [3]string{
"up",
"standby",
"failed",
@@ -126,9 +126,9 @@ func New(name, object string, options *options.Options, params *node.Node) *Abst
// data: 10s
// instance: 20s
//
-// then we expect that the collector has methods PollDdta and PollInstance
+// then we expect that the collector has methods PollData and PollInstance
// that need to be invoked every 10 and 20 seconds respectively.
-// Names of the polls are arbitrary, only "ddta" is a special case, since
+// Names of the polls are arbitrary, only "data" is a special case, since
// plugins are executed after the data poll (this might change).
func Init(c Collector) error {
@@ -146,7 +146,7 @@ func Init(c Collector) error {
s := schedule.New()
// Each task will be mapped to a collector method
- // Example: "data" will be alligned to method PollData()
+ // Example: "data" will be aligned to method PollData()
for _, task := range tasks.GetChildren() {
methodName := "Poll" + strings.Title(task.GetNameS())
@@ -191,7 +191,7 @@ func Init(c Collector) error {
// Initialize Plugins
if plugins := params.GetChildS("plugins"); plugins != nil {
- if err := c.LoadPlugins(plugins); err != nil {
+ if err := c.LoadPlugins(plugins, c); err != nil {
return err
}
}
@@ -217,7 +217,7 @@ func Init(c Collector) error {
//md.AddLabel("task", "")
//md.AddLabel("interval", "")
- // add tasks of the collecor as metadata instances
+ // add tasks of the collector as metadata instances
for _, task := range s.GetTasks() {
instance, _ := md.NewInstance(task.Name)
instance.SetLabel("task", task.Name)
@@ -235,7 +235,11 @@ func Init(c Collector) error {
// Start will run the collector in an infinity loop
func (me *AbstractCollector) Start(wg *sync.WaitGroup) {
-
+ defer func() {
+ if r := recover(); r != nil {
+ me.Logger.Error().Stack().Err(errors.New(errors.GO_ROUTINE_PANIC, string(debug.Stack()))).Msg("Collector panicked")
+ }
+ }()
defer wg.Done()
// keep track of connection errors
@@ -414,13 +418,13 @@ func (me *AbstractCollector) AddCollectCount(n uint64) {
// GetStatus returns current state of the collector
func (me *AbstractCollector) GetStatus() (uint8, string, string) {
- return me.Status, CollectorStatus[me.Status], me.Message
+ return me.Status, Status[me.Status], me.Message
}
// SetStatus sets the current state of the collector to one
// of the values defined by CollectorStatus
func (me *AbstractCollector) SetStatus(status uint8, msg string) {
- if status < 0 || status >= uint8(len(CollectorStatus)) {
+ if status < 0 || status >= uint8(len(Status)) {
panic("invalid status code " + strconv.Itoa(int(status)))
}
me.Status = status
@@ -468,9 +472,13 @@ func (me *AbstractCollector) LinkExporter(e exporter.Exporter) {
me.Exporters = append(me.Exporters, e)
}
-// LoadPlugins loads built-in plugins or dynamically loads custom plugins
-// and adds them to the collector
-func (me *AbstractCollector) LoadPlugins(params *node.Node) error {
+func (me *AbstractCollector) LoadPlugin(s string, abc *plugin.AbstractPlugin) plugin.Plugin {
+ return nil
+}
+
+//LoadPlugins loads built-in plugins or dynamically loads custom plugins
+//and adds them to the collector
+func (me *AbstractCollector) LoadPlugins(params *node.Node, c Collector) error {
var p plugin.Plugin
var abc *plugin.AbstractPlugin
@@ -490,20 +498,8 @@ func (me *AbstractCollector) LoadPlugins(params *node.Node) error {
me.Logger.Debug().Msgf("loaded built-in plugin [%s]", name)
// case 2: available as dynamic plugin
} else {
- binpath := path.Join(me.Options.HomePath, "bin", "plugins", strings.ToLower(me.Name))
- module, err := dload.LoadFuncFromModule(binpath, strings.ToLower(name), "New")
- if err != nil {
- //logger.Error(c.LongName, "load plugin [%s]: %v", name, err)
- return errors.New(errors.ERR_DLOAD, "plugin "+name+": "+err.Error())
- }
-
- NewFunc, ok := module.(func(*plugin.AbstractPlugin) plugin.Plugin)
- if !ok {
- //logger.Error(c.LongName, "load plugin [%s]: New() has not expected signature", name)
- return errors.New(errors.ERR_DLOAD, name+": New()")
- }
- p = NewFunc(abc)
- me.Logger.Debug().Msgf("loaded dynamic plugin [%s]", name)
+ p = c.LoadPlugin(name, abc)
+ me.Logger.Debug().Msgf("loaded plugin [%s]", name)
}
if err := p.Init(); err != nil {
diff --git a/cmd/poller/collector/helpers.go b/cmd/poller/collector/helpers.go
index 7f1ac9295..6fd6117b0 100644
--- a/cmd/poller/collector/helpers.go
+++ b/cmd/poller/collector/helpers.go
@@ -38,7 +38,7 @@ func ImportTemplate(confPath, confFn, collectorName string) (*node.Node, error)
// ImportSubTemplate retrieves the best matching subtemplate of a collector object.
//
// This method is only applicable to the Zapi/ZapiPerf collectors which have
-// multiple objects and each object is forked as a seperate collector.
+// multiple objects and each object is forked as a separate collector.
// The subtemplates are sorted in subdirectories that serve as "tag" for the
// matching ONTAP version. ImportSubTemplate will attempt to choose the subtemplate
// with closest matching ONTAP version.
@@ -60,7 +60,7 @@ func (c *AbstractCollector) ImportSubTemplate(model, filename string, version [3
pathPrefix = path.Join(c.Options.HomePath, "conf/", strings.ToLower(c.Name), model)
c.Logger.Debug().Msgf("Looking for best-fitting template in [%s]", pathPrefix)
- // check for available versons, those are the subdirectories that include filename
+ // check for available versions, those are the subdirectories that include filename
availableVersions = make(map[string]bool)
if files, err := ioutil.ReadDir(pathPrefix); err == nil {
for _, file := range files {
diff --git a/cmd/poller/options/options.go b/cmd/poller/options/options.go
index 366d65fb7..f4e636d55 100644
--- a/cmd/poller/options/options.go
+++ b/cmd/poller/options/options.go
@@ -24,11 +24,10 @@ type Options struct {
Daemon bool // if true, Poller is started as daemon
Debug bool // if true, Poller is started in debug mode
// this mostly means that no data will be exported
- PromPort string // HTTP port that is assigned to Poller and can be used by the Prometheus exporter
+ PromPort int // HTTP port that is assigned to Poller and can be used by the Prometheus exporter
Config string // filepath of Harvest config (defaults to "harvest.yml") can be relative or absolute path
HomePath string // path to harvest home (usually "/opt/harvest")
LogPath string // log files location (usually "/var/log/harvest")
- PidPath string // pid files location (usually "/var/run/harvest")
LogLevel int // logging level, 0 for trace, 5 for fatal
Version string // harvest version
Hostname string // hostname of the machine harvest is running
@@ -44,11 +43,10 @@ func (o *Options) String() string {
fmt.Sprintf("%s = %v", "Daemon", o.Daemon),
fmt.Sprintf("%s = %v", "Debug", o.Debug),
fmt.Sprintf("%s = %d", "Profiling", o.Profiling),
- fmt.Sprintf("%s = %s", "PromPort", o.PromPort),
+ fmt.Sprintf("%s = %d", "PromPort", o.PromPort),
fmt.Sprintf("%s = %d", "LogLevel", o.LogLevel),
fmt.Sprintf("%s = %s", "HomePath", o.HomePath),
fmt.Sprintf("%s = %s", "LogPath", o.LogPath),
- fmt.Sprintf("%s = %s", "PidPath", o.PidPath),
fmt.Sprintf("%s = %s", "Config", o.Config),
fmt.Sprintf("%s = %s", "Hostname", o.Hostname),
fmt.Sprintf("%s = %s", "Version", o.Version),
@@ -69,6 +67,4 @@ func SetPathsAndHostname(args *Options) {
args.HomePath = conf.GetHarvestHomePath()
args.LogPath = conf.GetHarvestLogPath()
-
- args.PidPath = conf.GetHarvestPidPath()
}
diff --git a/cmd/poller/plugin/plugin.go b/cmd/poller/plugin/plugin.go
index 634df1de3..7f9c7704c 100644
--- a/cmd/poller/plugin/plugin.go
+++ b/cmd/poller/plugin/plugin.go
@@ -1,13 +1,13 @@
-/*
- * Copyright NetApp Inc, 2021 All rights reserved
+//Copyright NetApp Inc, 2021 All rights reserved
+/*
Package plugin provides abstractions for plugins, as well as
a number of generic built-in plugins. Plugins allow to customize
and manipulate data from collectors and sometimes collect additional
data without changing the sourcecode of collectors. Multiple plugins
can be put in a pipeline, they are executed in the same order as they
are defined in the collector's config file.
- Harvest architecuture defines three types of plugins:
+ Harvest architecture defines three types of plugins:
**built-in**
Statically compiled, generic plugins. "Generic" means
@@ -28,11 +28,13 @@
package plugin
import (
+ "fmt"
"goharvest2/cmd/poller/options"
"goharvest2/pkg/errors"
"goharvest2/pkg/logging"
"goharvest2/pkg/matrix"
"goharvest2/pkg/tree/node"
+ "sync"
)
// Plugin defines the methods of a plugin
@@ -42,6 +44,57 @@ type Plugin interface {
Run(*matrix.Matrix) ([]*matrix.Matrix, error)
}
+var (
+ modules = make(map[string]ModuleInfo)
+ modulesMu sync.RWMutex
+)
+
+// GetModule returns module information from its ID (full name).
+func GetModule(name string) (ModuleInfo, error) {
+ modulesMu.RLock()
+ defer modulesMu.RUnlock()
+ m, ok := modules[name]
+ if !ok {
+ return ModuleInfo{}, fmt.Errorf("module not registered: %s", name)
+ }
+ return m, nil
+}
+
+func RegisterModule(instance Module) {
+ mod := instance.HarvestModule()
+
+ if mod.ID == "" {
+ panic("module missing ID")
+ }
+ if mod.ID == "harvest" || mod.ID == "admin" {
+ panic(fmt.Sprintf("module ID '%s' is reserved", mod.ID))
+ }
+ if mod.New == nil {
+ panic("missing ModuleInfo.New")
+ }
+ if val := mod.New(); val == nil {
+ panic("ModuleInfo.New must return a non-nil module instance")
+ }
+ modulesMu.Lock()
+ defer modulesMu.Unlock()
+
+ if _, ok := modules[mod.ID]; ok {
+ panic(fmt.Sprintf("module already registered: %s", mod.ID))
+ }
+ modules[mod.ID] = mod
+}
+
+type Module interface {
+ HarvestModule() ModuleInfo
+}
+
+type ModuleInfo struct {
+ // name of module
+ ID string
+
+ New func() Module
+}
+
// AbstractPlugin implements methods of the Plugin interface, except Run()
type AbstractPlugin struct {
Parent string
diff --git a/cmd/poller/poller.go b/cmd/poller/poller.go
index 91e5abb7e..783285554 100644
--- a/cmd/poller/poller.go
+++ b/cmd/poller/poller.go
@@ -16,8 +16,8 @@
and exporters, ping the target system, generate metadata and do some
housekeeping.
- Usually the poller will run as a daemon. In this case it will create
- a PID file and write logs to a file. For debugging and testing
+ Usually the poller will run as a daemon. In this case it will
+ write logs to a file. For debugging and testing
it can also be started as a foreground process, in this case
logs are sent to STDOUT.
*/
@@ -26,13 +26,18 @@ package main
import (
"fmt"
"github.com/spf13/cobra"
+ _ "goharvest2/cmd/collectors/unix"
+ _ "goharvest2/cmd/collectors/zapi/collector"
+ _ "goharvest2/cmd/collectors/zapiperf"
+ "goharvest2/cmd/exporters/influxdb"
+ "goharvest2/cmd/exporters/prometheus"
"goharvest2/cmd/harvest/version"
"goharvest2/cmd/poller/collector"
"goharvest2/cmd/poller/exporter"
"goharvest2/cmd/poller/options"
+ "goharvest2/cmd/poller/plugin"
"goharvest2/cmd/poller/schedule"
"goharvest2/pkg/conf"
- "goharvest2/pkg/dload"
"goharvest2/pkg/errors"
"goharvest2/pkg/logging"
"goharvest2/pkg/matrix"
@@ -43,7 +48,6 @@ import (
"os/exec"
"os/signal"
"path"
- "plugin"
"runtime"
"strconv"
"strings"
@@ -53,17 +57,17 @@ import (
// default params
var (
- pollerSchedule string = "60s"
- logFileName string = ""
- logMaxMegaBytes int = logging.DefaultLogMaxMegaBytes // 10MB
- logMaxBackups int = logging.DefaultLogMaxBackups
- logMaxAge int = logging.DefaultLogMaxAge
+ pollerSchedule = "60s"
+ logFileName = ""
+ logMaxMegaBytes = logging.DefaultLogMaxMegaBytes // 10MB
+ logMaxBackups = logging.DefaultLogMaxBackups
+ logMaxAge = logging.DefaultLogMaxAge
)
// init with default configuration by default it gets logged both to console and harvest.log
-var logger *logging.Logger = logging.Get()
+var logger = logging.Get()
-// signals to catch
+// SIGNALS to catch
var SIGNALS = []os.Signal{
syscall.SIGHUP,
syscall.SIGINT,
@@ -79,23 +83,16 @@ var deprecatedCollectors = map[string]string{
// Poller is the instance that starts and monitors a
// group of collectors and exporters as a single UNIX process
type Poller struct {
- name string
- target string
- options *options.Options
- pid int
- pidf string
- schedule *schedule.Schedule
- collectors []collector.Collector
- exporters []exporter.Exporter
- exporter_params *node.Node
- params *node.Node
- metadata *matrix.Matrix
- status *matrix.Matrix
-}
-
-// New returns a new instance of Poller
-func New() *Poller {
- return &Poller{}
+ name string
+ target string
+ options *options.Options
+ schedule *schedule.Schedule
+ collectors []collector.Collector
+ exporters []exporter.Exporter
+ exporterParams *node.Node
+ params *node.Node
+ metadata *matrix.Matrix
+ status *matrix.Matrix
}
// Init starts Poller, reads parameters, opens zeroLog handler, initializes metadata,
@@ -121,8 +118,8 @@ func (me *Poller) Init() error {
}
if me.params, err = conf.GetPoller(me.options.Config, me.name); err != nil {
- // seperate logger is not yet configured as it depends on setting logMaxMegaBytes, logMaxFiles later
- // Using default insance of logger which logs below error to harvest.log
+ // separate logger is not yet configured as it depends on setting logMaxMegaBytes, logMaxFiles later
+ // Using default instance of logger which logs below error to harvest.log
logging.SubLogger("Poller", me.name).Error().Stack().Err(err).Msg("read config")
return err
}
@@ -176,17 +173,11 @@ func (me *Poller) Init() error {
go me.handleSignals(signalChannel)
logger.Debug().Msgf("set signal handler for %v", SIGNALS)
- // write PID to file
- if err = me.registerPid(); err != nil {
- logger.Warn().Msgf("failed to write PID file: %v", err)
- return err
- }
-
// announce startup
if me.options.Daemon {
- logger.Info().Msgf("started as daemon [pid=%d] [pid file=%s]", me.pid, me.pidf)
+ logger.Info().Msgf("started as daemon [pid=%d]", os.Getpid())
} else {
- logger.Info().Msgf("started in foreground [pid=%d]", me.pid)
+ logger.Info().Msgf("started in foreground [pid=%d]", os.Getpid())
}
// load parameters from config (harvest.yml)
@@ -221,9 +212,9 @@ func (me *Poller) Init() error {
// initialize our metadata, the metadata will host status of our
// collectors and exporters, as well as ping stats to target host
- me.load_metadata()
+ me.loadMetadata()
- if me.exporter_params, err = conf.GetExporters(me.options.Config); err != nil {
+ if me.exporterParams, err = conf.GetExporters(me.options.Config); err != nil {
logger.Warn().Msgf("read exporter params: %v", err)
// @TODO just warn or abort?
}
@@ -252,7 +243,7 @@ func (me *Poller) Init() error {
continue
}
- if err = me.load_collector(c, ""); err != nil {
+ if err = me.loadCollector(c, ""); err != nil {
logger.Error().Stack().Err(err).Msgf("load collector (%s):", c)
}
}
@@ -422,44 +413,15 @@ func (me *Poller) Run() {
}
}
-// Stop gracefully exits the program by closing zeroLog file and removing pid file
+// Stop gracefully exits the program by closing zeroLog
func (me *Poller) Stop() {
- logger.Info().Msgf("cleaning up and stopping [pid=%d]", me.pid)
-
- if me.options.Daemon {
- if err := os.Remove(me.pidf); err != nil {
- logger.Error().Stack().Err(err).Msg("clean pid file")
- } else {
- logger.Debug().Msgf("cleaned pid file [%s]", me.pidf)
- }
- }
-}
-
-// get PID and write to file if we are daemon
-func (me *Poller) registerPid() error {
- me.pid = os.Getpid()
-
- if me.options.Daemon {
-
- me.pidf = path.Join(me.options.PidPath, me.name+".pid")
-
- file, err := os.Create(me.pidf)
-
- if err == nil {
- if _, err = file.WriteString(strconv.Itoa(me.pid)); err == nil {
- file.Sync()
- }
- file.Close()
- }
- return err
- }
- return nil
+ logger.Info().Msgf("cleaning up and stopping [pid=%d]", os.Getpid())
}
// set up signal disposition
-func (me *Poller) handleSignals(signal_channel chan os.Signal) {
+func (me *Poller) handleSignals(signalChannel chan os.Signal) {
for {
- sig := <-signal_channel
+ sig := <-signalChannel
logger.Info().Msgf("caught signal [%s]", sig)
me.Stop()
os.Exit(0)
@@ -487,20 +449,15 @@ func (me *Poller) ping() (float32, bool) {
// dynamically load and initialize a collector
// if there are more than one objects defined for a collector,
// then multiple collectors will be initialized
-func (me *Poller) load_collector(class, object string) error {
+func (me *Poller) loadCollector(class, object string) error {
var (
err error
- sym plugin.Symbol
- binpath string
template, custom *node.Node
collectors []collector.Collector
col collector.Collector
)
- // path to the shared object (.so file)
- binpath = path.Join(me.options.HomePath, "bin", "collectors")
-
// throw warning for deprecated collectors
if r, d := deprecatedCollectors[strings.ToLower(class)]; d {
if r != "" {
@@ -510,15 +467,6 @@ func (me *Poller) load_collector(class, object string) error {
}
}
- if sym, err = dload.LoadFuncFromModule(binpath, strings.ToLower(class), "New"); err != nil {
- return err
- }
-
- NewFunc, ok := sym.(func(*collector.AbstractCollector) collector.Collector)
- if !ok {
- return errors.New(errors.ERR_DLOAD, "New() has not expected signature")
- }
-
// load the template file(s) of the collector where we expect to find
// object name or list of objects
if template, err = collector.ImportTemplate(me.options.HomePath, "default.yaml", class); err != nil {
@@ -539,14 +487,16 @@ func (me *Poller) load_collector(class, object string) error {
object = template.GetChildContentS("object")
}
- // if object is defined, we only initialize 1 subcollector / object
+ // if object is defined, we only initialize 1 sub-collector / object
if object != "" {
- col = NewFunc(collector.New(class, object, me.options, template.Copy()))
- if err = col.Init(); err != nil {
- logger.Error().Msgf("init collector (%s:%s): %v", class, object, err)
- } else {
- collectors = append(collectors, col)
- logger.Debug().Msgf("initialized collector (%s:%s)", class, object)
+ col, err = me.newCollector(class, object, template)
+ if col != nil {
+ if err != nil {
+ logger.Error().Msgf("init collector (%s:%s): %v", class, object, err)
+ } else {
+ collectors = append(collectors, col)
+ logger.Debug().Msgf("initialized collector (%s:%s)", class, object)
+ }
}
// if template has list of objects, initialize 1 subcollector for each
} else if objects := template.GetChildS("objects"); objects != nil {
@@ -570,8 +520,12 @@ func (me *Poller) load_collector(class, object string) error {
continue
}
- col = NewFunc(collector.New(class, object.GetNameS(), me.options, template.Copy()))
- if err = col.Init(); err != nil {
+ col, err = me.newCollector(class, object.GetNameS(), template)
+ if col == nil {
+ logger.Warn().Msgf("collector is nil for collector-object (%s:%s)", class, object.GetNameS())
+ continue
+ }
+ if err != nil {
logger.Warn().Msgf("init collector-object (%s:%s): %v", class, object.GetNameS(), err)
if errors.IsErr(err, errors.ERR_CONNECTION) {
logger.Warn().Msgf("aborting collector (%s)", class)
@@ -590,17 +544,20 @@ func (me *Poller) load_collector(class, object string) error {
logger.Debug().Msgf("initialized (%s) with %d objects", class, len(collectors))
// link each collector with requested exporter & update metadata
for _, col = range collectors {
-
+ if col == nil {
+ logger.Warn().Msg("ignoring nil collector")
+ continue
+ }
name := col.GetName()
obj := col.GetObject()
- for _, exp_name := range col.WantedExporters(me.options.Config) {
- logger.Trace().Msgf("exp_name %s", exp_name)
- if exp := me.load_exporter(exp_name); exp != nil {
+ for _, expName := range col.WantedExporters(me.options.Config) {
+ logger.Trace().Msgf("expName %s", expName)
+ if exp := me.loadExporter(expName); exp != nil {
col.LinkExporter(exp)
- logger.Debug().Msgf("linked (%s:%s) to exporter (%s)", name, obj, exp_name)
+ logger.Debug().Msgf("linked (%s:%s) to exporter (%s)", name, obj, expName)
} else {
- logger.Warn().Msgf("exporter (%s) requested by (%s:%s) not available", exp_name, name, obj)
+ logger.Warn().Msgf("exporter (%s) requested by (%s:%s) not available", expName, name, obj)
}
}
@@ -618,24 +575,41 @@ func (me *Poller) load_collector(class, object string) error {
return nil
}
+func (me *Poller) newCollector(class string, object string, template *node.Node) (collector.Collector, error) {
+ name := "harvest.collector." + strings.ToLower(class)
+ mod, err := plugin.GetModule(name)
+ if err != nil {
+ logger.Error().Msgf("error getting module %s", name)
+ return nil, err
+ }
+ inst := mod.New()
+ col, ok := inst.(collector.Collector)
+ if !ok {
+ logger.Error().Msgf("collector '%s' is not a Collector", name)
+ return nil, errors.New(errors.ERR_NO_COLLECTOR, "no collectors")
+ }
+ delegate := collector.New(class, object, me.options, template.Copy())
+ err = col.Init(delegate)
+ return col, err
+}
+
// returns exporter that matches to name, if exporter is not loaded
// tries to load and return
-func (me *Poller) load_exporter(name string) exporter.Exporter {
+func (me *Poller) loadExporter(name string) exporter.Exporter {
var (
- err error
- sym plugin.Symbol
- binpath, class string
- params *node.Node
- exp exporter.Exporter
+ err error
+ class string
+ params *node.Node
+ exp exporter.Exporter
)
// stop here if exporter is already loaded
- if exp = me.get_exporter(name); exp != nil {
+ if exp = me.getExporter(name); exp != nil {
return exp
}
- if params = me.exporter_params.GetChildS(name); params == nil {
+ if params = me.exporterParams.GetChildS(name); params == nil {
logger.Warn().Msgf("exporter (%s) not defined in config", name)
return nil
}
@@ -645,20 +619,16 @@ func (me *Poller) load_exporter(name string) exporter.Exporter {
return nil
}
- binpath = path.Join(me.options.HomePath, "bin", "exporters")
-
- if sym, err = dload.LoadFuncFromModule(binpath, strings.ToLower(class), "New"); err != nil {
- logger.Error().Msgf("dload: %v", err.Error())
- return nil
- }
-
- NewFunc, ok := sym.(func(*exporter.AbstractExporter) exporter.Exporter)
- if !ok {
- logger.Error().Msg("New() has not expected signature")
+ absExp := exporter.New(class, name, me.options, params)
+ switch class {
+ case "Prometheus":
+ exp = prometheus.New(absExp)
+ case "InfluxDB":
+ exp = influxdb.New(absExp)
+ default:
+ logger.Error().Msgf("no exporter of name:type %s:%s", name, class)
return nil
}
-
- exp = NewFunc(exporter.New(class, name, me.options, params))
if err = exp.Init(); err != nil {
logger.Error().Msgf("init exporter (%s): %v", name, err)
return nil
@@ -679,7 +649,7 @@ func (me *Poller) load_exporter(name string) exporter.Exporter {
}
-func (me *Poller) get_exporter(name string) exporter.Exporter {
+func (me *Poller) getExporter(name string) exporter.Exporter {
for _, exp := range me.exporters {
if exp.GetName() == name {
return exp
@@ -689,7 +659,7 @@ func (me *Poller) get_exporter(name string) exporter.Exporter {
}
// initialize matrices to be used as metadata
-func (me *Poller) load_metadata() {
+func (me *Poller) loadMetadata() {
me.metadata = matrix.New("poller", "metadata_component")
me.metadata.NewMetricUint8("status")
@@ -720,9 +690,9 @@ var pollerCmd = &cobra.Command{
Run: startPoller,
}
-func startPoller(cmd *cobra.Command, _ []string) {
+func startPoller(_ *cobra.Command, _ []string) {
//cmd.DebugFlags() // uncomment to print flags
- poller := New()
+ poller := &Poller{}
poller.options = &args
if poller.Init() != nil {
// error already logger by poller
@@ -746,7 +716,7 @@ func init() {
flags.BoolVar(&args.Daemon, "daemon", false, "Start as daemon")
flags.IntVarP(&args.LogLevel, "loglevel", "l", 2, "Logging level (0=trace, 1=debug, 2=info, 3=warning, 4=error, 5=critical)")
flags.IntVar(&args.Profiling, "profiling", 0, "If profiling port > 0, enables profiling via localhost:PORT/debug/pprof/")
- flags.StringVar(&args.PromPort, "promPort", "", "Prometheus Port")
+ flags.IntVar(&args.PromPort, "promPort", 0, "Prometheus Port")
flags.StringVar(&args.Config, "config", configPath, "harvest config file path")
flags.StringSliceVarP(&args.Collectors, "collectors", "c", []string{}, "only start these collectors (overrides harvest.yml)")
flags.StringSliceVarP(&args.Objects, "objects", "o", []string{}, "only start these objects (overrides collector config)")
diff --git a/cmd/tools/doctor/doctor.go b/cmd/tools/doctor/doctor.go
index 4cec850e7..94f09cd24 100644
--- a/cmd/tools/doctor/doctor.go
+++ b/cmd/tools/doctor/doctor.go
@@ -3,18 +3,27 @@ package doctor
import (
"fmt"
"github.com/spf13/cobra"
+ "goharvest2/pkg/color"
"goharvest2/pkg/conf"
"gopkg.in/yaml.v3"
"io/ioutil"
"os"
+ "strings"
)
type options struct {
ShouldPrintConfig bool
+ Color string
}
var opts = &options{
ShouldPrintConfig: false,
+ Color: "auto",
+}
+
+type validation struct {
+ isValid bool
+ invalid []string // collect invalid results
}
var Cmd = &cobra.Command{
@@ -41,10 +50,12 @@ func doDoctor(path string) {
checkAll(path, contents)
}
+// checkAll runs all doctor checks
+// If all checks succeed, print nothing and exit with a return code of 0
+// Otherwise, print what failed and exit with a return code of 1
func checkAll(path string, contents []byte) {
- // TODO add checks here, see https://github.com/NetApp/harvest/issues/16
- // print nothing and exit with 0 when all checks pass
-
+ // See https://github.com/NetApp/harvest/issues/16 for more checks to add
+ color.DetectConsole(opts.Color)
// Validate that the config file can be parsed
harvestConfig := &conf.HarvestConfig{}
err := yaml.Unmarshal(contents, harvestConfig)
@@ -53,7 +64,96 @@ func checkAll(path string, contents []byte) {
os.Exit(1)
return
}
- os.Exit(0)
+
+ anyFailed := false
+ anyFailed = !checkUniquePromPorts(*harvestConfig).isValid || anyFailed
+ anyFailed = !checkExporterTypes(*harvestConfig).isValid || anyFailed
+
+ if anyFailed {
+ os.Exit(1)
+ } else {
+ os.Exit(0)
+ }
+}
+
+// checkExporterTypes validates that all exporters are of valid types
+func checkExporterTypes(config conf.HarvestConfig) validation {
+ if config.Exporters == nil {
+ return validation{}
+ }
+ invalidTypes := make(map[string]string)
+ for name, exporter := range *config.Exporters {
+ if exporter.Type == nil {
+ continue
+ }
+ switch *exporter.Type {
+ case "Prometheus", "InfluxDB":
+ break
+ default:
+ invalidTypes[name] = *exporter.Type
+ }
+ }
+
+ valid := validation{isValid: true}
+
+ if len(invalidTypes) > 0 {
+ valid.isValid = false
+ fmt.Printf("%s Unknown Exporter types found\n", color.Colorize("Error:", color.Red))
+ fmt.Println("These are probably misspellings or the wrong case.")
+ fmt.Println("Exporter types must start with a capital letter.")
+ fmt.Println("The following exporters are unknown:")
+ for name, eType := range invalidTypes {
+ valid.invalid = append(valid.invalid, eType)
+ fmt.Printf(" exporter named: [%s] has unknown type: [%s]\n", color.Colorize(name, color.Red), color.Colorize(eType, color.Yellow))
+ }
+ fmt.Println()
+ }
+ return valid
+}
+
+// checkUniquePromPorts checks that all Prometheus exporters
+// that specify a port, do so uniquely
+func checkUniquePromPorts(config conf.HarvestConfig) validation {
+ if config.Exporters == nil {
+ return validation{}
+ }
+ // Add all exporters that have a port to a
+ // map of portNum -> list of names
+ seen := make(map[int][]string)
+ for name, exporter := range *config.Exporters {
+ if exporter.Port == nil || exporter.Type == nil || *exporter.Type != "Prometheus" {
+ continue
+ }
+ previous := seen[*exporter.Port]
+ previous = append(previous, name)
+ seen[*exporter.Port] = previous
+ }
+
+ valid := validation{isValid: true}
+ for _, exporterNames := range seen {
+ if len(exporterNames) == 1 {
+ continue
+ }
+ valid.isValid = false
+ for _, name := range exporterNames {
+ valid.invalid = append(valid.invalid, name)
+ }
+ break
+ }
+
+ if !valid.isValid {
+ fmt.Printf("%s: Exporter PromPort conflict\n", color.Colorize("Error", color.Red))
+ fmt.Println(" Prometheus exporters must specify unique ports. Change the following exporters to use unique ports:")
+ for port, exporterNames := range seen {
+ if len(exporterNames) == 1 {
+ continue
+ }
+ names := strings.Join(exporterNames, ", ")
+ fmt.Printf(" port: [%s] duplicateExporters: [%s]\n", color.Colorize(port, color.Red), color.Colorize(names, color.Yellow))
+ }
+ fmt.Println()
+ }
+ return valid
}
func printRedactedConfig(path string, contents []byte) string {
@@ -79,6 +179,7 @@ func printRedactedConfig(path string, contents []byte) string {
}
func sanitize(nodes []*yaml.Node) {
+ // Update this list when there are additional tokens to sanitize
sanitizeWords := []string{"username", "password", "grafana_api_token", "token",
"host", "addr"}
for i, node := range nodes {
@@ -86,10 +187,12 @@ func sanitize(nodes []*yaml.Node) {
continue
}
if node.Kind == yaml.ScalarNode && node.ShortTag() == "!!str" {
- // Update this list of conditionals if there are other tokens you want to sanitize
value := node.Value
for _, word := range sanitizeWords {
if value == word {
+ if nodes[i-1].Value == "auth_style" {
+ continue
+ }
nodes[i+1].SetString("-REDACTED-")
}
}
@@ -120,4 +223,6 @@ func init() {
false,
"print config to console with sensitive info redacted",
)
+
+ Cmd.Flags().StringVar(&opts.Color, "color", "auto", "When to use colors. One of: auto | always | never. Auto will guess based on tty.")
}
diff --git a/cmd/tools/doctor/doctor_test.go b/cmd/tools/doctor/doctor_test.go
index 755a9056d..ad4b20036 100644
--- a/cmd/tools/doctor/doctor_test.go
+++ b/cmd/tools/doctor/doctor_test.go
@@ -15,6 +15,7 @@ func TestRedaction(t *testing.T) {
assertRedacted(t, "# foo\nusername: pass\n#foot", `username: -REDACTED-`)
assertRedacted(t, `host: 1.2.3.4`, `host: -REDACTED-`)
assertRedacted(t, `addr: 1.2.3.4`, `addr: -REDACTED-`)
+ assertRedacted(t, "auth_style: password\nusername: cat", "auth_style: password\nusername: -REDACTED-")
}
func assertRedacted(t *testing.T, input, redacted string) {
@@ -27,10 +28,10 @@ func assertRedacted(t *testing.T, input, redacted string) {
}
func TestConfigToStruct(t *testing.T) {
- path := "testConfig.yml"
+ path := "testdata/testConfig.yml"
err := conf.LoadHarvestConfig(path)
if err != nil {
- return
+ panic(err)
}
if conf.Config.Defaults.Password != "123#abc" {
t.Fatalf(`expected harvestConfig.Defaults.Password to be 123#abc, actual=[%+v]`,
@@ -46,15 +47,63 @@ func TestConfigToStruct(t *testing.T) {
*conf.Config.Defaults.Collectors)
}
- allowedRegexes := (*conf.Config.Exporters)["influxy"].AllowedAddrsRegex
+ allowedRegexes := (*conf.Config.Exporters)["prometheus"].AllowedAddrsRegex
if (*allowedRegexes)[0] != "^192.168.0.\\d+$" {
t.Fatalf(`expected allow_addrs_regex to be ^192.168.0.\d+$ actual=%+v`,
(*allowedRegexes)[0])
}
- collectors := (*conf.Config.Pollers)["infinity2"].Collectors
+ influxyAddr := (*conf.Config.Exporters)["influxy"].Addr
+ if (*influxyAddr) != "localhost" {
+ t.Fatalf(`expected addr to be "localhost", actual=%+v`, (*influxyAddr))
+ }
+
+ influxyURL := (*conf.Config.Exporters)["influxz"].Url
+ if (*influxyURL) != "www.example.com/influxdb" {
+ t.Fatalf(`expected addr to be "www.example.com/influxdb", actual=%+v`, (*influxyURL))
+ }
+
+ infinity := (*conf.Config.Pollers)["infinity2"]
+ collectors := infinity.Collectors
if (*collectors)[0] != "Zapi" {
t.Fatalf(`expected infinity2 collectors to contain Zapi actual=%+v`,
(*collectors)[0])
}
+ if infinity.IsKfs != nil && !*infinity.IsKfs {
+ t.Fatalf(`expected infinity2 is_kfs to be false, but was true`)
+ }
+ sim1 := (*conf.Config.Pollers)["sim-0001"]
+ if !*sim1.IsKfs {
+ t.Fatalf(`expected sim-0001 is_kfs to be true, but was false`)
+ }
+}
+
+func TestUniquePromPorts(t *testing.T) {
+ path := "testdata/testConfig.yml"
+ err := conf.LoadHarvestConfig(path)
+ if err != nil {
+ panic(err)
+ }
+ valid := checkUniquePromPorts(conf.Config)
+ if valid.isValid {
+ t.Fatal(`expected isValid to be false since there are duplicate prom ports, actual was isValid=true`)
+ }
+ if len(valid.invalid) != 2 {
+ t.Fatalf(`expected checkUniquePromPorts to return 2 invalid results, actual was %s`, valid.invalid)
+ }
+}
+
+func TestExporterTypesAreValid(t *testing.T) {
+ path := "testdata/testConfig.yml"
+ err := conf.LoadHarvestConfig(path)
+ if err != nil {
+ panic(err)
+ }
+ valid := checkExporterTypes(conf.Config)
+ if valid.isValid {
+ t.Fatalf(`expected isValid to be false since there are invalid exporter types, actual was %+v`, valid)
+ }
+ if valid.invalid[0] != "Foo" {
+ t.Fatalf(`expected invalid exporter of type Foo, actual was %+v`, valid)
+ }
}
diff --git a/cmd/tools/doctor/testConfig.yml b/cmd/tools/doctor/testdata/testConfig.yml
similarity index 91%
rename from cmd/tools/doctor/testConfig.yml
rename to cmd/tools/doctor/testdata/testConfig.yml
index 8776b6378..94f2f4343 100644
--- a/cmd/tools/doctor/testConfig.yml
+++ b/cmd/tools/doctor/testdata/testConfig.yml
@@ -7,6 +7,8 @@ Exporters:
exporter: Prometheus
addr: 0.0.0.0
port: 12990
+ allow_addrs_regex:
+ - ^192.168.0.\d+$
pluto: # name of your exporter, can be any valid yaml string
exporter: Prometheus
port: 12990 # ignored since this exporter contains a consul block
@@ -22,9 +24,14 @@ Exporters:
bucket: harvest
org: harvest
token: abcdefghijklmnopqrstuvwxyz
- allow_addrs_regex:
- - ^192.168.0.\d+$
-
+ influxz:
+ exporter: InfluxDB
+ url: www.example.com/influxdb
+ bucket: harvest
+ org: harvest
+ token: abcdefghijklmnopqrstuvwxyz
+ invalid-type:
+ exporter: Foo
Defaults:
collectors:
- Zapi
@@ -78,4 +85,7 @@ Pollers:
addr: 10.0.0.2
collectors: [ Zapi ]
+ sim-0001:
+ is_kfs: true
+
ll: grafana_api_token grafana_api_token
diff --git a/cmd/tools/generate/generate.go b/cmd/tools/generate/generate.go
new file mode 100644
index 000000000..d2e90ff94
--- /dev/null
+++ b/cmd/tools/generate/generate.go
@@ -0,0 +1,52 @@
+package generate
+
+import (
+ "github.com/spf13/cobra"
+ "goharvest2/pkg/color"
+ "goharvest2/pkg/conf"
+ "os"
+ "text/template"
+)
+
+var Cmd = &cobra.Command{
+ Use: "generate",
+ Short: "Generate Harvest related files",
+ Long: "Generate Harvest related files",
+}
+
+var systemdCmd = &cobra.Command{
+ Use: "systemd",
+ Short: "generate Harvest systemd target for all pollers defined in config",
+ Run: doSystemd,
+}
+
+func doSystemd(cmd *cobra.Command, _ []string) {
+ var config = cmd.Root().PersistentFlags().Lookup("config")
+ generateSystemd(config.Value.String())
+}
+
+func generateSystemd(path string) {
+ err := conf.LoadHarvestConfig(path)
+ if err != nil {
+ return
+ }
+ if conf.Config.Pollers == nil {
+ return
+ }
+ t, err := template.New("target.tmpl").ParseFiles("service/contrib/target.tmpl")
+ if err != nil {
+ panic(err)
+ }
+ color.DetectConsole("")
+ println("Save the following to " + color.Colorize("/etc/systemd/system/harvest.target", color.Green) +
+ " or " + color.Colorize("| sudo tee /etc/systemd/system/harvest.target", color.Green))
+ println("and then run " + color.Colorize("systemctl daemon-reload", color.Green))
+ err = t.Execute(os.Stdout, conf.Config)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func init() {
+ Cmd.AddCommand(systemdCmd)
+}
diff --git a/cmd/tools/grafana/grafana.go b/cmd/tools/grafana/grafana.go
index 5f07c7eb9..53759e32e 100644
--- a/cmd/tools/grafana/grafana.go
+++ b/cmd/tools/grafana/grafana.go
@@ -35,19 +35,20 @@ var (
)
type options struct {
- command string // one of: import, export, clean
- addr string // URL of Grafana server (e.g. "http://localhost:3000")
- token string // API token issued by Grafana server
- dir string // Directory from which to import dashboards (e.g. "opt/harvest/grafana/prometheus")
- folder string // Grafana folder where to upload from where to download dashboards
- folderId int64
- folderUid string
- datasource string
- variable bool
- client *http.Client
- headers http.Header
- config string
- useHttps bool
+ command string // one of: import, export, clean
+ addr string // URL of Grafana server (e.g. "http://localhost:3000")
+ token string // API token issued by Grafana server
+ dir string // Directory from which to import dashboards (e.g. "opt/harvest/grafana/prometheus")
+ folder string // Grafana folder where to upload from where to download dashboards
+ folderId int64
+ folderUid string
+ datasource string
+ variable bool
+ client *http.Client
+ headers http.Header
+ config string
+ useHttps bool
+ useInsecureTLS bool
}
func doExport(_ *cobra.Command, _ []string) {
@@ -108,15 +109,16 @@ func adjustOptions() {
opts.dir = path.Join(homePath, "grafana", opts.dir)
}
- // full URL
- opts.addr = strings.TrimPrefix(opts.addr, "http://")
- opts.addr = strings.TrimPrefix(opts.addr, "https://")
- opts.addr = strings.TrimSuffix(opts.addr, "/")
-
- if opts.useHttps {
- opts.addr = "https://" + opts.addr
- } else {
- opts.addr = "http://" + opts.addr
+ // When opt.addr starts with https don't change it
+ if !strings.HasPrefix(opts.addr, "https://") {
+ opts.addr = strings.TrimPrefix(opts.addr, "http://")
+ opts.addr = strings.TrimPrefix(opts.addr, "https://")
+ opts.addr = strings.TrimSuffix(opts.addr, "/")
+ if opts.useHttps {
+ opts.addr = "https://" + opts.addr
+ } else {
+ opts.addr = "http://" + opts.addr
+ }
}
}
@@ -265,7 +267,7 @@ func checkToken(opts *options, ignoreConfig bool) error {
if params, err = conf.LoadConfig(configPath); err != nil {
return err
} else if params == nil {
- return errors.New(fmt.Sprintf("config [%s] not found", configPath))
+ return fmt.Errorf("config [%s] not found", configPath)
}
if tools = params.GetChildS("Tools"); tools != nil {
@@ -290,7 +292,8 @@ func checkToken(opts *options, ignoreConfig bool) error {
opts.client = &http.Client{Timeout: time.Duration(clientTimeout) * time.Second}
if strings.HasPrefix(opts.addr, "https://") {
- opts.client.Transport = &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
+ tlsConfig := &tls.Config{InsecureSkipVerify: opts.useInsecureTLS}
+ opts.client.Transport = &http.Transport{TLSClientConfig: tlsConfig}
}
// send random request to validate token
result, status, code, err := sendRequest(opts, "GET", "/api/folders/aaaaaaa", nil)
@@ -502,7 +505,7 @@ var opts = &options{}
var GrafanaCmd = &cobra.Command{
Use: "grafana",
- Short: "import/export Grafana dashboards",
+ Short: "Import/export Grafana dashboards",
Long: "Grafana tool - Import/Export Grafana dashboards",
}
@@ -531,4 +534,5 @@ func init() {
GrafanaCmd.PersistentFlags().StringVarP(&opts.datasource, "datasource", "s", grafanaDataSource, "Grafana datasource for the dashboards")
GrafanaCmd.PersistentFlags().BoolVarP(&opts.variable, "variable", "v", false, "use datasource as variable, overrides: --datasource")
GrafanaCmd.PersistentFlags().BoolVarP(&opts.useHttps, "https", "S", false, "use HTTPS")
+ GrafanaCmd.PersistentFlags().BoolVarP(&opts.useInsecureTLS, "insecure", "k", false, "Allow insecure server connections when using SSL")
}
diff --git a/cmd/tools/grafana/grafana_test.go b/cmd/tools/grafana/grafana_test.go
index 119dcee53..83e4e35d8 100644
--- a/cmd/tools/grafana/grafana_test.go
+++ b/cmd/tools/grafana/grafana_test.go
@@ -17,3 +17,32 @@ func TestCheckVersion(t *testing.T) {
}
}
}
+
+func TestHttpsAddr(t *testing.T) {
+ opts.addr = "https://1.1.1.1:3000"
+ adjustOptions()
+ if opts.addr != "https://1.1.1.1:3000" {
+ t.Errorf("Expected opts.addr to be %s but got %s", "https://1.1.1.1:3000", opts.addr)
+ }
+
+ opts.addr = "https://1.1.1.1:3000"
+ opts.useHttps = false // addr takes precedence over useHttps
+ adjustOptions()
+ if opts.addr != "https://1.1.1.1:3000" {
+ t.Errorf("Expected opts.addr to be %s but got %s", "https://1.1.1.1:3000", opts.addr)
+ }
+
+ opts.addr = "http://1.1.1.1:3000"
+ adjustOptions()
+ if opts.addr != "http://1.1.1.1:3000" {
+ t.Errorf("Expected opts.addr to be %s but got %s", "http://1.1.1.1:3000", opts.addr)
+ }
+
+ // Old way of specifying https
+ opts.addr = "http://1.1.1.1:3000"
+ opts.useHttps = true
+ adjustOptions()
+ if opts.addr != "https://1.1.1.1:3000" {
+ t.Errorf("Expected opts.addr to be %s but got %s", "https://1.1.1.1:3000", opts.addr)
+ }
+}
diff --git a/conf/zapi/7mode/8.6.0/volume.yaml b/conf/zapi/7mode/8.6.0/volume.yaml
index c4fef4eb7..88efe279a 100644
--- a/conf/zapi/7mode/8.6.0/volume.yaml
+++ b/conf/zapi/7mode/8.6.0/volume.yaml
@@ -40,7 +40,4 @@ export_options:
- style
instance_labels:
- state
- graphite_leafs:
- - svm.{svm}.vol.{volume}
- - node.{node}.aggr.{aggr}.vol.{volume}
diff --git a/conf/zapi/cdot/9.8.0/disk.yaml b/conf/zapi/cdot/9.8.0/disk.yaml
index 3222242e8..8f7713215 100644
--- a/conf/zapi/cdot/9.8.0/disk.yaml
+++ b/conf/zapi/cdot/9.8.0/disk.yaml
@@ -13,11 +13,13 @@ counters:
- ^disk-type => type
- ^is-shared => shared
- ^model => model
+ - ^serial-number => serial_number
- ^shelf => shelf
- ^shelf-bay => shelf_bay
- disk-ownership-info:
- ^home-node-name => node
- ^owner-node-name => owner_node
+ - ^is-failed => failed
- disk-stats-info:
# - average-latency
# - disk-io-kbps
@@ -27,7 +29,7 @@ counters:
- sectors-written
- disk-raid-info:
- disk-outage-info:
- - reason => outage
+ - ^reason => outage
plugins:
- LabelAgent:
@@ -45,5 +47,7 @@ export_options:
- shared
- shelf
- shelf_bay
+ - serial_number
+ - failed
graphite_leafs:
- node.{node}.disk.{disk} # take last part
diff --git a/conf/zapi/cdot/9.8.0/lun.yaml b/conf/zapi/cdot/9.8.0/lun.yaml
index 9a1ddae97..9df4b5f9f 100644
--- a/conf/zapi/cdot/9.8.0/lun.yaml
+++ b/conf/zapi/cdot/9.8.0/lun.yaml
@@ -31,5 +31,3 @@ export_options:
- svm
instance_labels:
- state
- graphite_leafs:
- - svm.{svm}.volume.{volume}.lun.{lun}
diff --git a/conf/zapi/cdot/9.8.0/snapmirror.yaml b/conf/zapi/cdot/9.8.0/snapmirror.yaml
index 6c90b4329..798d1021d 100644
--- a/conf/zapi/cdot/9.8.0/snapmirror.yaml
+++ b/conf/zapi/cdot/9.8.0/snapmirror.yaml
@@ -55,8 +55,3 @@ export_options:
- destination_node_limit
- source_node_limit
- group_type
- graphite_leafs:
- - node.{destination_node}.snapmirror.dest.{destination_volume}
- - node.{source_node}.snapmirror.src.{source_volume}
- - svm.{destination_vserver}.snapmirror.dest.{destination_volume}
- - svm.{source_vserver}.snapmirror.src.{source_volume}
diff --git a/conf/zapiperf/7mode/8.2.5/fcp.yaml b/conf/zapiperf/7mode/8.2.5/fcp.yaml
index fe205c92e..ae8e4ec73 100644
--- a/conf/zapiperf/7mode/8.2.5/fcp.yaml
+++ b/conf/zapiperf/7mode/8.2.5/fcp.yaml
@@ -23,7 +23,6 @@ plugins:
export_options:
instance_keys:
- - node
- port
- speed
graphite_leafs:
diff --git a/conf/zapiperf/7mode/8.2.5/hostadapter.yaml b/conf/zapiperf/7mode/8.2.5/hostadapter.yaml
index c935ad6fb..4b7894807 100644
--- a/conf/zapiperf/7mode/8.2.5/hostadapter.yaml
+++ b/conf/zapiperf/7mode/8.2.5/hostadapter.yaml
@@ -14,5 +14,3 @@ counters:
export_options:
instance_keys:
- hostadapter
- graphite_leafs:
- - node.{node}.hostadapter.{hostadapter}
diff --git a/conf/zapiperf/7mode/8.2.5/nfsv3_node.yaml b/conf/zapiperf/7mode/8.2.5/nfsv3_node.yaml
index 4b7696256..5011c8611 100644
--- a/conf/zapiperf/7mode/8.2.5/nfsv3_node.yaml
+++ b/conf/zapiperf/7mode/8.2.5/nfsv3_node.yaml
@@ -8,7 +8,7 @@ global_labels:
counters:
- instance_name => node
- - nfsv3_ops => ops
+ - nfsv3_ops => total_ops
- nfsv3_read_ops => read_ops
- nfsv3_write_ops => write_ops
- nfsv3_read_latency => read_avg_latency
@@ -16,8 +16,7 @@ counters:
- nfsv3_avg_op_latency => latency
export_options:
- instance_keys:
- - nfsv
+ require_instance_keys: false
graphite_leafs:
- node.{node}.nfsv3
diff --git a/conf/zapiperf/7mode/8.2.5/nfsv4_node.yaml b/conf/zapiperf/7mode/8.2.5/nfsv4_node.yaml
index 88d37ab91..8d8d31658 100644
--- a/conf/zapiperf/7mode/8.2.5/nfsv4_node.yaml
+++ b/conf/zapiperf/7mode/8.2.5/nfsv4_node.yaml
@@ -9,14 +9,13 @@ global_labels:
counters:
- instance_name => node
- nfsv3_avg_latency => latency
- - nfsv4_ops => ops
+ - nfsv4_ops => total_ops
- nfsv4_ready_latency => read_avg_latency
- nfsv4_write_latency => write_avg_latency
- nfsv4_read_ops => read_ops
- nfsv4_write_ops => write_ops
export_options:
- instance_keys:
- - nfsv
+ require_instance_keys: false
graphite_leafs:
- node.{node}.nfsv4
diff --git a/conf/zapiperf/7mode/8.2.5/nic_common.yaml b/conf/zapiperf/7mode/8.2.5/nic_common.yaml
index 52fddfc0c..fc74932c2 100644
--- a/conf/zapiperf/7mode/8.2.5/nic_common.yaml
+++ b/conf/zapiperf/7mode/8.2.5/nic_common.yaml
@@ -39,6 +39,4 @@ export_options:
- speed
- state
- type
- graphite_leafs:
- - node.{node_name}.eth_port.{instance_name}
diff --git a/conf/zapiperf/7mode/8.2.5/processor.yaml b/conf/zapiperf/7mode/8.2.5/processor.yaml
index 3b16f4af5..5254b7606 100644
--- a/conf/zapiperf/7mode/8.2.5/processor.yaml
+++ b/conf/zapiperf/7mode/8.2.5/processor.yaml
@@ -13,7 +13,7 @@ counters:
plugins:
Aggregator:
- - node<>node_cpu
+ - node<>node_cpu
# only export node-level averages from plugin
# set this true or comment, to get data for each cpu
diff --git a/conf/zapiperf/7mode/8.2.5/wafl_hya_sizer.yaml b/conf/zapiperf/7mode/8.2.5/wafl_hya_sizer.yaml
index 196287b1d..cdc2cb207 100644
--- a/conf/zapiperf/7mode/8.2.5/wafl_hya_sizer.yaml
+++ b/conf/zapiperf/7mode/8.2.5/wafl_hya_sizer.yaml
@@ -12,5 +12,3 @@ counters:
export_options:
require_instance_keys: false
- graphite_leafs:
- - node.{node}.flashpool.{flashpool}
diff --git a/conf/zapiperf/cdot/9.8.0/cifs_node.yaml b/conf/zapiperf/cdot/9.8.0/cifs_node.yaml
index 926021032..ba32d49df 100644
--- a/conf/zapiperf/cdot/9.8.0/cifs_node.yaml
+++ b/conf/zapiperf/cdot/9.8.0/cifs_node.yaml
@@ -19,8 +19,6 @@ counters:
export_options:
instance_keys:
- node
- graphite_leafs:
- - node.{node}.cifs
override:
- cifs_op_count: rate
diff --git a/conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml b/conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
index cca528264..9515dd1e3 100644
--- a/conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
+++ b/conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
@@ -23,8 +23,6 @@ counters:
export_options:
instance_keys:
- svm
- graphite_leafs:
- - svm.{svm}.cifs
override:
- cifs_op_count: rate
diff --git a/conf/zapiperf/cdot/9.8.0/copy_manager.yaml b/conf/zapiperf/cdot/9.8.0/copy_manager.yaml
index c11799675..712528840 100644
--- a/conf/zapiperf/cdot/9.8.0/copy_manager.yaml
+++ b/conf/zapiperf/cdot/9.8.0/copy_manager.yaml
@@ -19,5 +19,3 @@ override:
export_options:
instance_keys:
- svm
- graphite_leafs:
- - svm.{svm}.copy_manager
diff --git a/conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml b/conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
index e607d33a2..3630a6865 100644
--- a/conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
+++ b/conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
@@ -34,5 +34,3 @@ override:
export_options:
instance_keys:
- node
- graphite_leafs:
- - node.{node}.flashcache.{flashcache}
diff --git a/conf/zapiperf/cdot/9.8.0/fcp.yaml b/conf/zapiperf/cdot/9.8.0/fcp.yaml
index 8a1fe85ef..41cc5bcc8 100644
--- a/conf/zapiperf/cdot/9.8.0/fcp.yaml
+++ b/conf/zapiperf/cdot/9.8.0/fcp.yaml
@@ -48,5 +48,3 @@ export_options:
- node
- port
- speed
- graphite_leafs:
- - node.{node}.fcp.{port}
diff --git a/conf/zapiperf/cdot/9.8.0/fcp_lif.yaml b/conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
index bc24e8e23..5b2778dcd 100644
--- a/conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
+++ b/conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
@@ -29,5 +29,3 @@ export_options:
- svm
- node
- port
- graphite_leafs:
- - svm.{svm}.fcp_lif.{lif}
diff --git a/conf/zapiperf/cdot/9.8.0/fcvi.yaml b/conf/zapiperf/cdot/9.8.0/fcvi.yaml
index 82ce88cc8..1d4873d4c 100644
--- a/conf/zapiperf/cdot/9.8.0/fcvi.yaml
+++ b/conf/zapiperf/cdot/9.8.0/fcvi.yaml
@@ -16,5 +16,3 @@ counters:
export_options:
instance_keys:
- node
- graphite_leafs:
- - node.{node}.fcvi.{fcvi}
diff --git a/conf/zapiperf/cdot/9.8.0/hostadapter.yaml b/conf/zapiperf/cdot/9.8.0/hostadapter.yaml
index 9e9c947b0..00b397c4f 100644
--- a/conf/zapiperf/cdot/9.8.0/hostadapter.yaml
+++ b/conf/zapiperf/cdot/9.8.0/hostadapter.yaml
@@ -15,5 +15,3 @@ export_options:
instance_keys:
- node
- hostadapter
- graphite_leafs:
- - node.{node}.hostadapter.{hostadapter}
diff --git a/conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml b/conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
index cb4ec708e..d8fb84d69 100644
--- a/conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
+++ b/conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
@@ -30,5 +30,3 @@ export_options:
- lif
- node
- svm
- graphite_leafs:
- - svm.{svm}.iscsi_lif.{lif}
diff --git a/conf/zapiperf/cdot/9.8.0/lif.yaml b/conf/zapiperf/cdot/9.8.0/lif.yaml
index 1d6ef2674..93abc81fa 100644
--- a/conf/zapiperf/cdot/9.8.0/lif.yaml
+++ b/conf/zapiperf/cdot/9.8.0/lif.yaml
@@ -22,5 +22,3 @@ export_options:
- node
- svm
- port
- graphite_leafs:
- - svm.{svm}.lif.{lif}
diff --git a/conf/zapiperf/cdot/9.8.0/lun.yaml b/conf/zapiperf/cdot/9.8.0/lun.yaml
index 4ef613741..23c387264 100644
--- a/conf/zapiperf/cdot/9.8.0/lun.yaml
+++ b/conf/zapiperf/cdot/9.8.0/lun.yaml
@@ -45,5 +45,3 @@ export_options:
- lun # edited by plugin
- volume # added by plugin
- svm
- graphite_leafs:
- - svm.{svm}.vol.{volume}.{lun}
diff --git a/conf/zapiperf/cdot/9.8.0/nfsv3.yaml b/conf/zapiperf/cdot/9.8.0/nfsv3.yaml
index 16d0da7f0..879cf3e5c 100644
--- a/conf/zapiperf/cdot/9.8.0/nfsv3.yaml
+++ b/conf/zapiperf/cdot/9.8.0/nfsv3.yaml
@@ -91,6 +91,3 @@ rewrite_as_label:
export_options:
instance_keys:
- svm
- - nfsv
- graphite_leafs:
- - svm.{svm}.nfsv3
diff --git a/conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml b/conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
index bcfe2ea80..dab93031a 100644
--- a/conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
+++ b/conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
@@ -8,7 +8,7 @@ global_labels:
counters:
- instance_name => node
- - nfsv3_ops => ops
+ - nfsv3_ops => total_ops # "nfs_ops" already used in system_node.yaml
- nfsv3_read_ops => read_ops
- nfsv3_write_ops => write_ops
- nfsv3_throughput => throughput
@@ -91,7 +91,3 @@ override:
export_options:
instance_keys:
- node
- - nfsv
- graphite_leafs:
- - node.{node}.nfsv3
-
diff --git a/conf/zapiperf/cdot/9.8.0/nfsv4.yaml b/conf/zapiperf/cdot/9.8.0/nfsv4.yaml
index 0aa19f8ab..7a03d5f52 100644
--- a/conf/zapiperf/cdot/9.8.0/nfsv4.yaml
+++ b/conf/zapiperf/cdot/9.8.0/nfsv4.yaml
@@ -94,13 +94,6 @@ rewrite_as_label:
- _avg_latency => request_latency: latency_type
- _total => request_total: request_type
-export_options:
- instance_keys:
- - svm
- - nfsv
- graphite_leafs:
- - svm.{svm}.nfsv4
-
override:
- access_total: rate
- close_total: rate
@@ -141,3 +134,8 @@ override:
- setclientid_total: rate
- verify_total: rate
- write_total: rate
+
+export_options:
+ instance_keys:
+ - svm
+
diff --git a/conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml b/conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
index 6a9f39f7f..ddc91ed40 100644
--- a/conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
+++ b/conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
@@ -125,9 +125,6 @@ rewrite_as_label:
export_options:
instance_keys:
- svm
- - nfsv
- graphite_leafs:
- - svm.{svm}.nfsv4_1
override:
- access_total: rate
diff --git a/conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml b/conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
index e0d181461..0c154ecc9 100644
--- a/conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
+++ b/conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
@@ -9,7 +9,7 @@ global_labels:
counters:
- instance_name => node
- latency
- - total_ops => ops
+ - total_ops
- nfs41_read_throughput => read_throughput
- nfs41_throughput => throughput
- nfs41_write_throughput => write_throughput
@@ -118,13 +118,6 @@ counters:
- want_delegation_avg_latency
- write_avg_latency
-export_options:
- instance_keys:
- - node
- - nfsv
- graphite_leafs:
- - node.{node}.nfsv4_1
-
override:
- access_total: rate
- backchannel_ctl_total: rate
@@ -179,3 +172,7 @@ override:
- verify_total: rate
- want_delegation_total: rate
- write_total: rate
+
+export_options:
+ instance_keys:
+ - node
diff --git a/conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml b/conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
index b930e034a..52c701399 100644
--- a/conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
+++ b/conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
@@ -9,7 +9,7 @@ global_labels:
counters:
- instance_name => node
- latency
- - total_ops => ops
+ - total_ops
- nfs4_read_throughput => read_throughput
- nfs4_throughput => throughput
- nfs4_write_throughput => write_throughput
@@ -94,13 +94,6 @@ rewrite_as_label:
- _avg_latency => request_latency: latency_type
- _total => request_total: request_type
-export_options:
- instance_keys:
- - node
- - nfsv
- graphite_leafs:
- - node.{node}.nfsv4
-
override:
- access_total: rate
- close_total: rate
@@ -141,3 +134,7 @@ override:
- setclientid_total: rate
- verify_total: rate
- write_total: rate
+
+export_options:
+ instance_keys:
+ - node
diff --git a/conf/zapiperf/cdot/9.8.0/nic_common.yaml b/conf/zapiperf/cdot/9.8.0/nic_common.yaml
index 16a463108..a53097084 100644
--- a/conf/zapiperf/cdot/9.8.0/nic_common.yaml
+++ b/conf/zapiperf/cdot/9.8.0/nic_common.yaml
@@ -40,6 +40,4 @@ export_options:
- speed
- state
- type
- graphite_leafs:
- - node.{node_name}.eth_port.{instance_name}
diff --git a/conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml b/conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml
index aba885984..7f62e465c 100644
--- a/conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml
+++ b/conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml
@@ -19,5 +19,3 @@ export_options:
instance_keys:
- node
- fabricpool
- graphite_leafs:
- - node.{node}.fabricpool
diff --git a/conf/zapiperf/cdot/9.8.0/path.yaml b/conf/zapiperf/cdot/9.8.0/path.yaml
index 56e7dad79..a624b152d 100644
--- a/conf/zapiperf/cdot/9.8.0/path.yaml
+++ b/conf/zapiperf/cdot/9.8.0/path.yaml
@@ -27,6 +27,4 @@ export_options:
- node
- hostadapter # from plugin
- target_wwpn # from plugin
- graphite_leafs:
- - node.{node}.hostadapter.{hostadapter}.target_wwpn.{target_wwpn}
diff --git a/conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml b/conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
index 4f4967d49..46f43cc03 100644
--- a/conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
+++ b/conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
@@ -26,6 +26,3 @@ export_options:
- node
- aggr
- disk_type
-
- graphite_leafs:
- - node.{node}.headroom.aggr.{aggr}.{disk_type}
diff --git a/conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml b/conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
index 0853bff5d..99c84cd3d 100644
--- a/conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
+++ b/conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
@@ -20,5 +20,3 @@ counters:
export_options:
instance_keys:
- node
- graphite_leafs:
- - node.{node}.headroom.processor
diff --git a/conf/zapiperf/cdot/9.8.0/system_node.yaml b/conf/zapiperf/cdot/9.8.0/system_node.yaml
index 32f6a243e..ecdf3b412 100644
--- a/conf/zapiperf/cdot/9.8.0/system_node.yaml
+++ b/conf/zapiperf/cdot/9.8.0/system_node.yaml
@@ -35,5 +35,3 @@ counters:
export_options:
instance_keys:
- node
- graphite_leafs:
- - node.{node}.system
diff --git a/conf/zapiperf/cdot/9.8.0/token_manager.yaml b/conf/zapiperf/cdot/9.8.0/token_manager.yaml
index afb3f508b..538eff61a 100644
--- a/conf/zapiperf/cdot/9.8.0/token_manager.yaml
+++ b/conf/zapiperf/cdot/9.8.0/token_manager.yaml
@@ -27,5 +27,3 @@ export_options:
instance_keys:
- node
- token
- graphite_leafs:
- - node.{node}.msft_odx
diff --git a/conf/zapiperf/cdot/9.8.0/volume.yaml b/conf/zapiperf/cdot/9.8.0/volume.yaml
index acf65561c..9b2ea539f 100644
--- a/conf/zapiperf/cdot/9.8.0/volume.yaml
+++ b/conf/zapiperf/cdot/9.8.0/volume.yaml
@@ -32,5 +32,3 @@ export_options:
- svm
- aggr
- type
- graphite_leafs:
- - svm.{svm}.{node}.{aggr}.{volume}
diff --git a/conf/zapiperf/cdot/9.8.0/volume_node.yaml b/conf/zapiperf/cdot/9.8.0/volume_node.yaml
index d34e73fb1..1a6e626c7 100644
--- a/conf/zapiperf/cdot/9.8.0/volume_node.yaml
+++ b/conf/zapiperf/cdot/9.8.0/volume_node.yaml
@@ -41,5 +41,3 @@ counters:
export_options:
instance_keys:
- node
- graphite_leafs:
- - node.{node}.vol_summary
diff --git a/conf/zapiperf/cdot/9.8.0/wafl.yaml b/conf/zapiperf/cdot/9.8.0/wafl.yaml
index fb3639ed7..e8ccb1809 100644
--- a/conf/zapiperf/cdot/9.8.0/wafl.yaml
+++ b/conf/zapiperf/cdot/9.8.0/wafl.yaml
@@ -41,5 +41,3 @@ override:
export_options:
instance_keys:
- node
- graphite_leafs:
- - node.{node}.wafl
diff --git a/conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yaml b/conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yaml
index afefb1740..98aca0b4d 100644
--- a/conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yaml
+++ b/conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yaml
@@ -15,8 +15,6 @@ export_options:
instance_keys:
- svm
- volume
- graphite_leafs:
- - svm.{svm}.vol.{volume}
override:
- cloud_bin_operation: delta
diff --git a/conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml b/conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
index 841d2b40b..610ebf9cf 100644
--- a/conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
+++ b/conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
@@ -31,5 +31,3 @@ override:
export_options:
instance_keys:
- node
- graphite_leafs:
- - node.{node}.flashpool.{flashpool}
diff --git a/conf/zapiperf/cdot/9.8.0/wafl_hya_sizer.yaml b/conf/zapiperf/cdot/9.8.0/wafl_hya_sizer.yaml
index 1a1c8b266..e42d4de36 100644
--- a/conf/zapiperf/cdot/9.8.0/wafl_hya_sizer.yaml
+++ b/conf/zapiperf/cdot/9.8.0/wafl_hya_sizer.yaml
@@ -13,5 +13,3 @@ counters:
export_options:
instance_keys:
- node
- graphite_leafs:
- - node.{node}.flashpool.{flashpool}
diff --git a/conf/zapiperf/cdot/9.8.0/workload.yaml b/conf/zapiperf/cdot/9.8.0/workload.yaml
new file mode 100644
index 000000000..5140a9d0e
--- /dev/null
+++ b/conf/zapiperf/cdot/9.8.0/workload.yaml
@@ -0,0 +1,52 @@
+
+# object Workload provides counters about workload usage
+
+name: Workload
+query: workload
+object: qos
+
+instance_key: uuid
+
+# recommended to use large interval, since workload objects are expensive
+schedule:
+ - counter: 1200s
+ - instance: 600s
+ - data: 180s
+
+counters:
+ - instance_name
+ - instance_uuid
+ - latency
+ - ops
+ - read_data
+ - read_io_type
+ - read_latency
+ - read_ops
+ - sequential_reads
+ - sequential_writes
+ - total_data
+ - write_data
+ - write_latency
+ - write_ops
+
+override:
+ - read_io_type_base: delta
+
+qos_labels:
+ - vserver => svm
+ - volume
+ - qtree
+ - lun
+ - file
+ - policy-group
+ - wid
+
+export_options:
+ instance_keys:
+ - wid
+ - svm
+ - volume
+ - qtree
+ - lun
+ - file
+ - policy_group
diff --git a/conf/zapiperf/cdot/9.8.0/workload_detail.yaml b/conf/zapiperf/cdot/9.8.0/workload_detail.yaml
new file mode 100644
index 000000000..d14475b65
--- /dev/null
+++ b/conf/zapiperf/cdot/9.8.0/workload_detail.yaml
@@ -0,0 +1,66 @@
+
+# object provides latency breakdown per service or delay center
+
+name: WorkloadDetail
+query: workload_detail
+object: qos_detail
+
+instance_key: name
+
+# recommended to use large interval, since workload objects are expensive
+schedule:
+ - counter: 1200s
+ - instance: 600s
+ - data: 180s
+
+counters:
+ - instance_name
+ - instance_uuid
+ - service_time
+ - wait_time
+ - visits
+
+resource_map:
+ CPU_dblade: backend
+ CPU_nblade: frontend
+ DELAY_CENTER_CLOUD_IO: cloud
+ DELAY_CENTER_CLUSTER_INTERCONNECT: cluster
+ DELAY_CENTER_DISK_IO: disk
+ DELAY_CENTER_NETWORK: network
+ DELAY_CENTER_NVLOG_TRANSFER: nvlog
+ DELAY_CENTER_QOS_LIMIT: throttle
+ DELAY_CENTER_WAFL_SUSP_OTHER: suspend
+ # CPU_dblade_background :
+ # CPU_exempt :
+ # CPU_ha :
+ # CPU_idle :
+ # CPU_kahuna :
+ # CPU_network :
+ # CPU_protocol :
+ # CPU_raid :
+ # CPU_wafl_exempt :
+ # DELAY_CENTER_FLEXCACHE_RAL :
+ # DELAY_CENTER_FLEXCACHE_SPINHI :
+ # DELAY_CENTER_SYNC_REPL :
+ # DELAY_CENTER_WAFL_SUSP_CP :
+ # DISK_SSD_OTHER :
+
+qos_labels:
+ - name
+ - vserver => svm
+ - volume
+ - qtree
+ - lun
+ - file
+ - policy-group
+ - wid
+
+export_options:
+ instance_keys:
+ - wid
+ - svm
+ - volume
+ - qtree
+ - lun
+ - file
+ - policy_group
diff --git a/conf/zapiperf/cdot/9.8.0/workload_detail_volume.yaml b/conf/zapiperf/cdot/9.8.0/workload_detail_volume.yaml
new file mode 100644
index 000000000..f426a6111
--- /dev/null
+++ b/conf/zapiperf/cdot/9.8.0/workload_detail_volume.yaml
@@ -0,0 +1,64 @@
+
+# object provides latency breakdown per service or delay center per volume
+name: WorkloadDetailVolume
+query: workload_detail_volume
+object: qos_detail_volume
+
+instance_key: name
+
+# recommended to use large interval, since workload objects are expensive
+schedule:
+ - counter: 1200s
+ - instance: 600s
+ - data: 180s
+
+counters:
+ - instance_name
+ - instance_uuid
+ - service_time
+ - wait_time
+ - visits
+
+resource_map:
+ CPU_dblade : backend
+ CPU_nblade : frontend
+ DELAY_CENTER_CLOUD_IO : cloud
+ DELAY_CENTER_CLUSTER_INTERCONNECT : cluster
+ DELAY_CENTER_DISK_IO : disk
+ DELAY_CENTER_NETWORK : network
+ DELAY_CENTER_NVLOG_TRANSFER : nvlog
+ DELAY_CENTER_QOS_LIMIT : throttle
+ DELAY_CENTER_WAFL_SUSP_OTHER : suspend
+ # CPU_dblade_background :
+ # CPU_exempt :
+ # CPU_ha :
+ # CPU_idle :
+ # CPU_kahuna :
+ # CPU_network :
+ # CPU_protocol :
+ # CPU_raid :
+ # CPU_wafl_exempt :
+ # DELAY_CENTER_FLEXCACHE_RAL :
+ # DELAY_CENTER_FLEXCACHE_SPINHI :
+ # DELAY_CENTER_SYNC_REPL :
+ # DELAY_CENTER_WAFL_SUSP_CP :
+ # DISK_SSD_OTHER :
+
+qos_labels:
+ - vserver => svm
+ - volume
+ - qtree
+ - lun
+ - file
+ - policy-group
+ - wid
+
+export_options:
+ instance_keys:
+ - wid
+ - svm
+ - volume
+ - qtree
+ - lun
+ - file
+ - policy_group
diff --git a/conf/zapiperf/cdot/9.8.0/workload_volume.yaml b/conf/zapiperf/cdot/9.8.0/workload_volume.yaml
new file mode 100644
index 000000000..abfb5b840
--- /dev/null
+++ b/conf/zapiperf/cdot/9.8.0/workload_volume.yaml
@@ -0,0 +1,54 @@
+
+# object provides counters per volume for workloads tracked via "autovol"
+# (i.e. not in a policy group)
+
+name: WorkloadVolume
+query: workload_volume
+object: qos_volume
+
+# recommended to use large interval, since workload objects are expensive
+schedule:
+ - counter: 1200s
+ - instance: 600s
+ - data: 180s
+
+instance_key: name
+
+counters:
+ - instance_name
+ - instance_uuid
+ - latency
+ - ops
+ - read_data
+ - read_io_type
+ - read_latency
+ - read_ops
+ - sequential_reads
+ - sequential_writes
+ - total_data
+ - write_data
+ - write_latency
+ - write_ops
+
+
+override:
+ - read_io_type_base: delta
+
+qos_labels:
+ - vserver => svm
+ - volume
+ - qtree
+ - lun
+ - file
+ - policy-group
+ - wid
+
+export_options:
+ instance_keys:
+ - wid
+ - svm
+ - volume
+ - qtree
+ - lun
+ - file
+ - policy_group
diff --git a/conf/zapiperf/default.yaml b/conf/zapiperf/default.yaml
index 5a5672f08..079c69d5e 100644
--- a/conf/zapiperf/default.yaml
+++ b/conf/zapiperf/default.yaml
@@ -44,3 +44,9 @@ objects:
FcpLif: fcp_lif.yaml
CopyManager: copy_manager.yaml
WAFLCompBin: wafl_comp_aggr_vol_bin.yaml
+
+# Uncomment to collect workload/QOS counters
+# Workload: workload.yaml
+# WorkloadDetail: workload_detail.yaml
+# WorkloadVolume: workload_volume.yaml
+# WorkloadDetailVolume: workload_detail_volume.yaml
\ No newline at end of file
diff --git a/deb/build-deb.sh b/deb/build-deb.sh
index fa3df4c45..2ea6a5ff8 100755
--- a/deb/build-deb.sh
+++ b/deb/build-deb.sh
@@ -21,7 +21,7 @@ rm -rf "$BUILD"
mkdir -p "$BUILD"
mkdir -p "$BUILD/opt/harvest/bin/"
cp -r "$SRC/grafana" "$SRC/conf" "$BUILD/opt/harvest/"
-cp "$SRC/harvest.example.yml" "$BUILD/opt/harvest/"
+cp "$SRC/harvest.yml" "$BUILD/opt/harvest/"
cp -r "$SRC/pkg/" "$SRC/cmd/" "$SRC/docs/" "$BUILD/opt/harvest/"
cp -r "$SRC/rpm/" "$SRC/deb/" "$SRC/service/" "$SRC/.git" "$BUILD/opt/harvest/"
cp "$SRC/Makefile" "$SRC/README.md" "$SRC/LICENSE" "$SRC/go.mod" "$SRC/go.sum" "$BUILD/opt/harvest/"
diff --git a/deb/postinst b/deb/postinst
index 669598f5c..38feedaf3 100755
--- a/deb/postinst
+++ b/deb/postinst
@@ -4,21 +4,33 @@
# Copyright NetApp Inc, 2021 All rights reserved
#
+# After installation/upgrade, copy latest harvest.yml as harvest.example.yml
+if [ -e /opt/harvest/harvest.yml ]; then
+ rm -rf /opt/harvest/harvest.example.yml
+ cp /opt/harvest/harvest.yml /opt/harvest/harvest.example.yml
+ echo " --> copy new harvest.yml as [/opt/harvest/harvest.example.yml]"
+fi
+# restore old harvest.yml in case of upgrade
+if [ -e /opt/harvest/backup_harvest.yml ]; then
+ rm -rf /opt/harvest/harvest.yml
+ cp /opt/harvest/backup_harvest.yml /opt/harvest/harvest.yml
+ rm -rf /opt/harvest/backup_harvest.yml
+ echo " --> restoring backup_harvest.yml as [/opt/harvest/harvest.yml]"
+fi
+
echo " --> set directory ownership"
chown -R harvest:harvest /opt/harvest
mkdir -p /var/log/harvest
-mkdir -p /var/run/harvest
chown -R harvest:harvest /var/log/harvest
-chown -R harvest:harvest /var/run/harvest
+chmod -R u+s /opt/harvest/bin
+chmod -R g+s /opt/harvest/bin
echo " --> copying service"
cp /opt/harvest/service/harvest.service /etc/systemd/system/
chmod 664 /etc/systemd/system/harvest.service
-if [ ! -e /opt/harvest/harvest.yml ]; then
- cp /opt/harvest/harvest.example.yml /opt/harvest/harvest.yml
- echo " --> use default config file [/opt/harvest/harvest.yml]"
-fi
+
systemctl daemon-reload
systemctl start harvest.service
systemctl enable harvest.service
-echo " --> harvest service started through systemctl"
\ No newline at end of file
+echo " --> harvest service started through systemctl"
+echo " --> harvest installed location /opt/harvest"
\ No newline at end of file
diff --git a/deb/postrm b/deb/postrm
index 76a0c9f2d..4c89afa13 100755
--- a/deb/postrm
+++ b/deb/postrm
@@ -6,7 +6,8 @@
if [[ "$1" == "purge" || "$1" == "remove" ]]; then
if [ "$1" == "purge" ]; then
- rm -r /var/log/harvest && echo " --> delete log directory"
+ rm -rf /opt/harvest && echo " --> removing /opt/harvest folder"
+ rm -r /var/log/harvest && echo " --> delete log directory"
fi
if systemctl list-units --full -all | grep -Fq 'harvest.service'; then
echo " --> removing systemctl harvest files "
@@ -15,7 +16,6 @@ if [[ "$1" == "purge" || "$1" == "remove" ]]; then
systemctl daemon-reload
systemctl reset-failed
fi
- rm -rf /var/run/harvest && echo " --> delete pid directory"
if [ -e /usr/bin/harvest ]; then
unlink /usr/bin/harvest && echo " --> delete link"
fi
@@ -25,5 +25,11 @@ if [[ "$1" == "purge" || "$1" == "remove" ]]; then
if [ $(getent group harvest) ]; then
groupdel harvest && echo " --> delete harvest group"
fi
+ if [ -e /opt/harvest/backup_harvest.yml ]; then
+ rm -rf /opt/harvest/harvest.yml
+ cp /opt/harvest/backup_harvest.yml /opt/harvest/harvest.yml
+ rm -rf /opt/harvest/backup_harvest.yml
+ echo " --> restoring backup_harvest.yml as [/opt/harvest/harvest.yml]"
+ fi
echo " --> cleanup & uninstall complete"
fi
diff --git a/deb/preinst b/deb/preinst
index 0c9d70771..5c24449e3 100755
--- a/deb/preinst
+++ b/deb/preinst
@@ -8,9 +8,19 @@ if [[ "$1" == "install" || "$1" == "upgrade" ]]; then
if systemctl list-units --full -all | grep -Fq 'harvest.service'; then
systemctl stop harvest && echo "stopped harvest ..."
fi
+ # Stop any harvest process which are not under systemctl
+ ps -ef | grep -w -- --poller | grep -w bin/poller | awk '{print $2}' | xargs -r kill
+ if [ $? -ne 0 ]; then
+ echo " --> Error while stopping pollers"
+ fi
if [ -e /usr/bin/harvest ]; then
unlink /usr/bin/harvest && echo " --> delete link"
fi
+ # Backup existing harvest.yml to avoid overwrite from new binary
+ if [ -e /opt/harvest/harvest.yml ]; then
+ cp /opt/harvest/harvest.yml /opt/harvest/backup_harvest.yml
+ echo " --> Backing up existing config file as [/opt/harvest/backup_harvest.yml]"
+ fi
getent group harvest > /dev/null 2>&1 || addgroup -q --system harvest && echo " --> group created"
getent passwd harvest > /dev/null 2>&1 || adduser -q --system --no-create-home \
--ingroup harvest --disabled-password --shell /bin/false harvest && echo " --> user created"
diff --git a/deb/prerm b/deb/prerm
index b0d3a675b..72ef0113f 100755
--- a/deb/prerm
+++ b/deb/prerm
@@ -6,3 +6,13 @@
echo " --> stop harvest for $1"
systemctl stop harvest
+# Stop any harvest process which are not under systemctl
+ps -ef | grep -w -- --poller | grep -w bin/poller | awk '{print $2}' | xargs -r kill
+if [ $? -ne 0 ]; then
+ echo " --> Error while stopping pollers"
+fi
+if [ -e /opt/harvest/harvest.yml ]; then
+ echo " --> backing up harvest conf"
+ cp /opt/harvest/harvest.yml /opt/harvest/backup_harvest.yml
+ echo " --> Backing up existing config file as [/opt/harvest/backup_harvest.yml]"
+fi
\ No newline at end of file
diff --git a/docker/allPollerInOneContainer/Dockerfile b/docker/allPollerInOneContainer/Dockerfile
index 34d3a0b39..bed85fbee 100644
--- a/docker/allPollerInOneContainer/Dockerfile
+++ b/docker/allPollerInOneContainer/Dockerfile
@@ -19,7 +19,7 @@ COPY . .
RUN make build VERSION=$VERSION RELEASE=$RELEASE
-RUN cp -a $BUILD_DIR/harvest.example.yml $INSTALL_DIR/harvest.yml
+RUN cp -a $BUILD_DIR/harvest.yml $INSTALL_DIR/harvest.yml
RUN cp -aR bin $BUILD_DIR/conf $BUILD_DIR/docs $BUILD_DIR/grafana $BUILD_DIR/docker/allPollerInOneContainer/docker-entrypoint.sh $INSTALL_DIR && \
chmod +x $INSTALL_DIR/docker-entrypoint.sh
diff --git a/docs/AuthAndPermissions.md b/docs/AuthAndPermissions.md
index 30c82f544..08af9b61b 100644
--- a/docs/AuthAndPermissions.md
+++ b/docs/AuthAndPermissions.md
@@ -108,7 +108,7 @@ Change directory to your Harvest home directory (replace `/opt/harvest/` if this
$ cd /opt/harvest/
```
-Generate an SSL cert and key pair with the following command. Note that it's preferred to generate these files using the hostname of the local machine. The command bellow assumes `debian8` as our hostname name and `harvest2` as the user we created in the previous step:
+Generate an SSL cert and key pair with the following command. Note that it's preferred to generate these files using the hostname of the local machine. The command below assumes `debian8` as our hostname name and `harvest2` as the user we created in the previous step:
```bash
openssl req -x509 -nodes -days 1095 -newkey rsa:2048 -keyout cert/debian8.key \
diff --git a/go.mod b/go.mod
index ca58b4d86..daa16cfcb 100644
--- a/go.mod
+++ b/go.mod
@@ -4,8 +4,11 @@ go 1.15
require (
github.com/hashicorp/go-version v1.3.0
+ github.com/pkg/errors v0.9.1
github.com/rs/zerolog v1.22.0
github.com/spf13/cobra v1.1.3
+ golang.org/x/sys v0.0.0-20210603125802-9665404d3644 // indirect
+ golang.org/x/term v0.0.0-20210503060354-a79de5458b56
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
)
diff --git a/go.sum b/go.sum
index fed2110fa..32384c24c 100644
--- a/go.sum
+++ b/go.sum
@@ -242,7 +242,12 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20210503060354-a79de5458b56 h1:b8jxX3zqjpqb2LklXPzKSGJhzyxCOZSz8ncv8Nv+y7w=
+golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
diff --git a/grafana/prometheus/harvest_dashboard_aggregate.json b/grafana/prometheus/harvest_dashboard_aggregate.json
index 4ddb5f0ef..7123cf231 100644
--- a/grafana/prometheus/harvest_dashboard_aggregate.json
+++ b/grafana/prometheus/harvest_dashboard_aggregate.json
@@ -329,7 +329,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(aggr_space_total{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\",aggr=~\"$Aggregate\"})",
"interval": "",
"legendFormat": "",
@@ -387,7 +387,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(aggr_space_available{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\",aggr=~\"$Aggregate\"})",
"interval": "",
"legendFormat": "",
@@ -994,7 +994,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "aggr_space_available{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\",aggr=~\"$Aggregate\"}",
"interval": "",
"legendFormat": "{{node}} - {{aggr}}",
diff --git a/grafana/prometheus/harvest_dashboard_cluster.json b/grafana/prometheus/harvest_dashboard_cluster.json
index 471399126..2167865e1 100644
--- a/grafana/prometheus/harvest_dashboard_cluster.json
+++ b/grafana/prometheus/harvest_dashboard_cluster.json
@@ -225,7 +225,7 @@
"repeatDirection": "v",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "node_labels{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}",
"format": "table",
"instant": true,
@@ -235,7 +235,7 @@
"refId": "A"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "node_status{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}",
"format": "table",
"hide": false,
@@ -417,7 +417,7 @@
"repeatDirection": "h",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "cluster_status{datacenter=~\"$Datacenter\",cluster=\"$Cluster\"}",
"instant": true,
"interval": "",
@@ -497,7 +497,7 @@
"repeatDirection": "h",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(node_cpu_busy{datacenter=~\"$Datacenter\",cluster=\"$Cluster\"})",
"instant": false,
"interval": "",
@@ -571,7 +571,7 @@
"repeatDirection": "h",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(node_disk_busy{datacenter=~\"$Datacenter\",cluster=\"$Cluster\"})",
"instant": false,
"interval": "",
@@ -762,7 +762,7 @@
"repeatDirection": "v",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "cluster_subsystem_suppressed_alerts{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}",
"format": "table",
"hide": false,
@@ -773,7 +773,7 @@
"refId": "B"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "cluster_subsystem_outstanding_alerts{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}",
"format": "table",
"hide": false,
@@ -784,7 +784,7 @@
"refId": "C"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "cluster_subsystem_status{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}",
"format": "table",
"instant": true,
@@ -891,7 +891,7 @@
"repeatDirection": "h",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(aggr_space_used_percent{datacenter=~\"$Datacenter\",cluster=\"$Cluster\"})",
"instant": false,
"interval": "",
@@ -915,7 +915,7 @@
},
"id": 97,
"panels": [],
- "title": "Thoughput",
+ "title": "Throughput",
"type": "row"
},
{
@@ -1273,7 +1273,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(aggr_disk_busy{datacenter=\"$Datacenter\",cluster=~\"$Cluster\"}) by (cluster, aggr)",
"interval": "",
"legendFormat": "{{cluster}} - {{aggr}}",
@@ -1376,7 +1376,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(aggr_space_used_percent{datacenter=\"$Datacenter\",cluster=~\"$Cluster\"}) by (cluster, aggr)",
"interval": "",
"legendFormat": "{{cluster}} - {{aggr}}",
@@ -1479,7 +1479,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(node_cpu_busy{datacenter=\"$Datacenter\",cluster=~\"$Cluster\"}) by (cluster, node)",
"interval": "",
"legendFormat": "{{cluster}} - {{node}}",
diff --git a/grafana/prometheus/harvest_dashboard_metadata.json b/grafana/prometheus/harvest_dashboard_metadata.json
index b7bd99dc2..f3964e336 100644
--- a/grafana/prometheus/harvest_dashboard_metadata.json
+++ b/grafana/prometheus/harvest_dashboard_metadata.json
@@ -4165,7 +4165,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg by (name, target) (metadata_component_count{hostname=~\"$Hostname\",poller=~\"$Poller\",type=\"exporter\"})",
"hide": false,
"interval": "",
@@ -4173,7 +4173,7 @@
"refId": "C"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg by (exporter, target) (metadata_exporter_count{hostname=~\"$Hostname\",poller=~\"$Poller\"})",
"hide": false,
"interval": "",
diff --git a/grafana/prometheus/harvest_dashboard_network.json b/grafana/prometheus/harvest_dashboard_network.json
index 24a1780d5..8b2150b4d 100644
--- a/grafana/prometheus/harvest_dashboard_network.json
+++ b/grafana/prometheus/harvest_dashboard_network.json
@@ -276,7 +276,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(fcp_read_data{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\",port=~\"$FCP\"})",
"interval": "",
"legendFormat": "",
@@ -1464,7 +1464,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "fcp_util_percent{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"}",
"format": "table",
"hide": false,
@@ -1672,7 +1672,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "topk($TopResources, fcp_write_data{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "{{node}} - {{port}}",
diff --git a/grafana/prometheus/harvest_dashboard_node.json b/grafana/prometheus/harvest_dashboard_node.json
index 167c765d4..9e19559c2 100644
--- a/grafana/prometheus/harvest_dashboard_node.json
+++ b/grafana/prometheus/harvest_dashboard_node.json
@@ -129,7 +129,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "count( count by (node) (node_status{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"}) )",
"format": "time_series",
"hide": false,
@@ -199,7 +199,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "count(count by (aggr) (aggr_status{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"}))",
"format": "time_series",
"hide": false,
@@ -259,7 +259,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(volume_avg_latency{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "",
@@ -315,7 +315,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(volume_write_data+volume_read_data{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "",
@@ -371,7 +371,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(volume_total_ops{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "",
@@ -425,7 +425,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(node_cpu_busy{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "",
@@ -490,7 +490,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg by(node) (volume_avg_latency{datacenter=\"$Datacenter\",cluster=\"$Cluster\"})",
"interval": "",
"legendFormat": "{{node}}",
@@ -550,7 +550,7 @@
"description": "",
"fieldConfig": {
"defaults": {
- "unit": "Âĩs"
+ "unit": "Bps"
},
"overrides": []
},
@@ -594,7 +594,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum by (node) (volume_write_data+volume_read_data{datacenter=\"$Datacenter\",cluster=\"$Cluster\"})",
"interval": "",
"legendFormat": "{{node}}",
@@ -605,7 +605,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Thoughput",
+ "title": "Throughput",
"tooltip": {
"shared": true,
"sort": 2,
@@ -623,7 +623,7 @@
"yaxes": [
{
"$$hashKey": "object:195",
- "format": "Âĩs",
+ "format": "Bps",
"label": null,
"logBase": 1,
"max": null,
@@ -654,7 +654,7 @@
"description": "",
"fieldConfig": {
"defaults": {
- "unit": "Âĩs"
+ "unit": "iops"
},
"overrides": []
},
@@ -698,7 +698,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum by(node) (volume_total_ops{datacenter=\"$Datacenter\",cluster=\"$Cluster\"})",
"interval": "",
"legendFormat": "{{node}}",
@@ -727,7 +727,7 @@
"yaxes": [
{
"$$hashKey": "object:427",
- "format": "Âĩs",
+ "format": "iops",
"label": null,
"logBase": 1,
"max": null,
@@ -791,7 +791,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(node_disk_busy{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "",
@@ -870,21 +870,21 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(volume_read_latency{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "Read",
"refId": "A"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(volume_write_latency{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "Write",
"refId": "B"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(volume_other_latency{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "Other",
@@ -944,7 +944,7 @@
"description": "",
"fieldConfig": {
"defaults": {
- "unit": "Âĩs"
+ "unit": "Bps"
},
"overrides": []
},
@@ -988,7 +988,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(volume_read_data{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"intervalFactor": 1,
@@ -996,7 +996,7 @@
"refId": "A"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(volume_write_data{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"intervalFactor": 1,
@@ -1008,7 +1008,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Thoughput",
+ "title": "Throughput",
"tooltip": {
"shared": true,
"sort": 2,
@@ -1026,7 +1026,7 @@
"yaxes": [
{
"$$hashKey": "object:581",
- "format": "Âĩs",
+ "format": "Bps",
"label": null,
"logBase": 1,
"max": null,
@@ -1057,7 +1057,7 @@
"description": "",
"fieldConfig": {
"defaults": {
- "unit": "Âĩs"
+ "unit": "iops"
},
"overrides": []
},
@@ -1101,21 +1101,21 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(volume_read_ops{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "Read",
"refId": "A"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(volume_write_ops{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "Write",
"refId": "B"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(volume_other_ops{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "Other",
@@ -1144,7 +1144,7 @@
"yaxes": [
{
"$$hashKey": "object:659",
- "format": "Âĩs",
+ "format": "iops",
"label": null,
"logBase": 1,
"max": null,
@@ -1217,7 +1217,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(node_cpu_busy{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"hide": false,
"interval": "",
@@ -1225,7 +1225,7 @@
"refId": "A"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(node_cpu_domain_busy{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\",metric=\"kahuna\"})",
"hide": false,
"interval": "",
@@ -1233,7 +1233,7 @@
"refId": "C"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(node_disk_busy{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"hide": false,
"interval": "",
@@ -1241,7 +1241,7 @@
"refId": "B"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "abs(avg(wafl_cp_phase_times{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\",metric=\"P2_FLUSH\"}) - 100)",
"hide": true,
"interval": "",
@@ -1363,7 +1363,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg by(metric) (wafl_read_io_type{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"hide": false,
"interval": "",
@@ -1424,7 +1424,7 @@
"description": "",
"fieldConfig": {
"defaults": {
- "unit": "Âĩs"
+ "unit": "iops"
},
"overrides": []
},
@@ -1468,7 +1468,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(node_cifs_ops{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"hide": false,
"interval": "",
@@ -1476,7 +1476,7 @@
"refId": "A"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(node_nfs_ops{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"hide": false,
"interval": "",
@@ -1484,7 +1484,7 @@
"refId": "B"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(node_fcp_ops{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"hide": false,
"interval": "",
@@ -1528,7 +1528,7 @@
"yaxes": [
{
"$$hashKey": "object:737",
- "format": "Âĩs",
+ "format": "iops",
"label": null,
"logBase": 1,
"max": null,
@@ -1615,7 +1615,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "node_avg_processor_busy{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"}",
"hide": false,
"interval": "",
@@ -1737,7 +1737,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg by(metric) (node_cpu_domain_busy{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"hide": false,
"interval": "",
@@ -1849,7 +1849,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(node_cifs_latency{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "",
@@ -1908,7 +1908,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(node_cifs_ops{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "",
@@ -1930,7 +1930,7 @@
"description": "",
"fieldConfig": {
"defaults": {
- "unit": "Âĩs"
+ "unit": "iops"
},
"overrides": []
},
@@ -1974,7 +1974,7 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum by (metric) (node_cifs_op_count{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "{{metric}}",
@@ -2003,7 +2003,7 @@
"yaxes": [
{
"$$hashKey": "object:736",
- "format": "Âĩs",
+ "format": "iops",
"label": null,
"logBase": 1,
"max": null,
@@ -2076,28 +2076,28 @@
"steppedLine": false,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(node_cifs_connections{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "Connections",
"refId": "A"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(node_cifs_established_sessions{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "Established Sessions",
"refId": "B"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(node_cifs_signed_sessions{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "Signed Sessions",
"refId": "C"
},
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(node_cifs_open_files{datacenter=\"$Datacenter\",cluster=\"$Cluster\",node=~\"$Node\"})",
"interval": "",
"legendFormat": "Open Files",
@@ -2388,7 +2388,7 @@
"description": "",
"fieldConfig": {
"defaults": {
- "unit": "Âĩs"
+ "unit": "Bps"
},
"overrides": []
},
@@ -2460,7 +2460,7 @@
},
"yaxes": [
{
- "format": "Âĩs",
+ "format": "Bps",
"label": null,
"logBase": 1,
"max": null,
@@ -2490,7 +2490,7 @@
"description": "",
"fieldConfig": {
"defaults": {
- "unit": "Âĩs"
+ "unit": "iops"
},
"overrides": []
},
@@ -2562,7 +2562,7 @@
},
"yaxes": [
{
- "format": "Âĩs",
+ "format": "iops",
"label": null,
"logBase": 1,
"max": null,
@@ -2823,7 +2823,7 @@
"description": "",
"fieldConfig": {
"defaults": {
- "unit": "Âĩs"
+ "unit": "Bps"
},
"overrides": []
},
@@ -2895,7 +2895,7 @@
},
"yaxes": [
{
- "format": "Âĩs",
+ "format": "Bps",
"label": null,
"logBase": 1,
"max": null,
@@ -2925,7 +2925,7 @@
"description": "",
"fieldConfig": {
"defaults": {
- "unit": "Âĩs"
+ "unit": "iops"
},
"overrides": []
},
@@ -2997,7 +2997,7 @@
},
"yaxes": [
{
- "format": "Âĩs",
+ "format": "iops",
"label": null,
"logBase": 1,
"max": null,
diff --git a/grafana/prometheus/harvest_dashboard_shelf.json b/grafana/prometheus/harvest_dashboard_shelf.json
index a9bf87901..883eb075d 100644
--- a/grafana/prometheus/harvest_dashboard_shelf.json
+++ b/grafana/prometheus/harvest_dashboard_shelf.json
@@ -213,7 +213,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(shelf_temperature_reading{datacenter=\"$Datacenter\",cluster=\"$Cluster\"})",
"interval": "",
"legendFormat": "",
@@ -287,7 +287,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(shelf_fan_rpm{datacenter=\"$Datacenter\",cluster=\"$Cluster\"})",
"interval": "",
"legendFormat": "",
@@ -361,7 +361,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "avg(shelf_sensor_reading{datacenter=\"$Datacenter\",cluster=\"$Cluster\"})",
"interval": "",
"legendFormat": "",
@@ -435,7 +435,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(sum_over_time(shelf_psu_power_drawn{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}[24h]))",
"interval": "",
"legendFormat": "",
@@ -483,6 +483,18 @@
}
]
},
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "shelf"
+ },
+ "properties": [
+ {
+ "id": "custom.displayMode",
+ "value": "json-view"
+ }
+ ]
+ },
{
"matcher": {
"id": "byName",
@@ -732,7 +744,7 @@
"repeatDirection": "h",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "shelf_temperature_reading{datacenter=\"$Datacenter\",cluster=\"$Cluster\",shelf=~\"$Shelf\"}",
"interval": "",
"legendFormat": "{{shelf}} ¡ {{sensor_id}}",
@@ -800,7 +812,7 @@
"repeat": "FanShelf",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "shelf_fan_rpm{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}",
"interval": "",
"legendFormat": "{{shelf}} ¡ {{fan_id}}",
@@ -883,7 +895,7 @@
"repeat": null,
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "shelf_voltage_reading{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}",
"interval": "",
"legendFormat": "{{shelf}} ¡ {{sensor_id}}",
@@ -948,7 +960,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(shelf_psu_power_drawn{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}) by (shelf, psu_id)",
"interval": "",
"legendFormat": "{{shelf}} - PSU {{psu_id}}",
@@ -1013,7 +1025,7 @@
"pluginVersion": "7.5.4",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "sum(shelf_psu_power_rating{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}) by (shelf, psu_id)",
"interval": "",
"legendFormat": "{{shelf}} - PSU {{psu_id}}",
@@ -1094,7 +1106,7 @@
"repeat": "SensShelf",
"targets": [
{
- "exemplar": true,
+ "exemplar": false,
"expr": "shelf_sensor_reading{datacenter=\"$Datacenter\",cluster=\"$Cluster\"}",
"interval": "",
"legendFormat": "S{{sensor_id}}",
diff --git a/grafana/prometheus/harvest_dashboard_volume.json b/grafana/prometheus/harvest_dashboard_volume.json
index 37bb49573..f0898ff28 100644
--- a/grafana/prometheus/harvest_dashboard_volume.json
+++ b/grafana/prometheus/harvest_dashboard_volume.json
@@ -407,7 +407,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Top Average Thoughput",
+ "title": "Top Average Throughput",
"tooltip": {
"shared": true,
"sort": 2,
@@ -1003,7 +1003,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Read Thoughput",
+ "title": "Read Throughput",
"tooltip": {
"shared": true,
"sort": 2,
@@ -1312,7 +1312,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Read Thoughput",
+ "title": "Read Throughput",
"tooltip": {
"shared": true,
"sort": 2,
diff --git a/harvest.example.yml b/harvest.yml
similarity index 100%
rename from harvest.example.yml
rename to harvest.yml
diff --git a/pkg/api/ontapi/zapi/client.go b/pkg/api/ontapi/zapi/client.go
index 11b979167..b1abcccd4 100644
--- a/pkg/api/ontapi/zapi/client.go
+++ b/pkg/api/ontapi/zapi/client.go
@@ -1,9 +1,7 @@
-/*
- * Copyright NetApp Inc, 2021 All rights reserved
+// Copyright NetApp Inc, 2021 All rights reserved
-Package zapi provides type Client for connecting to a C-dot or 7-mode
-Ontap cluster and sending API requests using the ZAPI protocol.
-*/
+// Package zapi provides type Client for connecting to a C-dot or 7-mode
+// ONTAP cluster and sending API requests using the ZAPI protocol.
package zapi
import (
@@ -66,8 +64,11 @@ func New(config *node.Node) (*Client, error) {
return nil, errors.New(errors.MISSING_PARAM, "addr")
}
- url = "https://" + addr + ":443/servlets/netapp.servlets.admin.XMLrequest_filer"
-
+ if config.GetChildContentS("is_kfs") == "true" {
+ url = "https://" + addr + ":8443/servlets/netapp.servlets.admin.XMLrequest_filer"
+ } else {
+ url = "https://" + addr + ":443/servlets/netapp.servlets.admin.XMLrequest_filer"
+ }
// create a request object that will be used for later requests
if request, err = http.NewRequest("POST", url, nil); err != nil {
return nil, err
@@ -76,7 +77,7 @@ func New(config *node.Node) (*Client, error) {
request.Header.Set("Content-type", "text/xml")
request.Header.Set("Charset", "utf-8")
- // by default, encorce secure TLS, if not requested otherwise by user
+ // by default, enforce secure TLS, if not requested otherwise by user
if x := config.GetChildContentS("use_insecure_tls"); x != "" {
if useInsecureTLS, err = strconv.ParseBool(x); err != nil {
client.Logger.Error().Stack().Err(err).Msg("use_insecure_tls")
@@ -106,10 +107,6 @@ func New(config *node.Node) (*Client, error) {
}
} else {
- if !useInsecureTLS {
- return nil, errors.New(errors.INVALID_PARAM, "use_insecure_tls is false, but no certificates")
- }
-
username := config.GetChildContentS("username")
password := config.GetChildContentS("password")
@@ -147,7 +144,7 @@ func New(config *node.Node) (*Client, error) {
return &client, nil
}
-// init connects to the cluster and retrieves system info
+// Init connects to the cluster and retrieves system info
// it will give up after retries
func (c *Client) Init(retries int) error {
var err error
@@ -169,7 +166,7 @@ func (c *Client) IsClustered() bool {
return c.system.clustered
}
-// Version returns version of the ONTAP server (generation, manjor and minor)
+// Version returns version of the ONTAP server (generation, major and minor)
func (c *Client) Version() [3]int {
return c.system.version
}
@@ -203,7 +200,7 @@ func (c *Client) BuildRequestString(request string) error {
}
// BuildRequest builds an API request from the node query
-// root element of the request is usualy the API name (e.g. "volume-get-iter") and
+// root element of the request is usually the API name (e.g. "volume-get-iter") and
// its children are the attributes requested
func (c *Client) BuildRequest(request *node.Node) error {
return c.buildRequest(request, false)
@@ -243,14 +240,14 @@ func (c *Client) buildRequest(query *node.Node, forceCluster bool) error {
}
// Invoke will issue the API request and return server response
-// this method should only be callled after building the request
+// this method should only be called after building the request
func (c *Client) Invoke() (*node.Node, error) {
result, _, _, err := c.invoke(false)
return result, err
}
// InvokeBatchRequest will issue API requests in series, once there
-// are no more instances returned by the server, returned results will be nill
+// are no more instances returned by the server, returned results will be nil
// Use the returned tag for subsequent calls to this method
func (c *Client) InvokeBatchRequest(request *node.Node, tag string) (*node.Node, string, error) {
// wasteful of course, need to rewrite later @TODO
@@ -258,7 +255,7 @@ func (c *Client) InvokeBatchRequest(request *node.Node, tag string) (*node.Node,
return results, tag, err
}
-// InvokeBatchRequestWithTimers does the same as InvokeBatchRequest, but it also
+// InvokeBatchWithTimers does the same as InvokeBatchRequest, but it also
// returns API time and XML parse time
func (c *Client) InvokeBatchWithTimers(request *node.Node, tag string) (*node.Node, string, time.Duration, time.Duration, error) {
@@ -315,13 +312,13 @@ func (c *Client) InvokeRequest(request *node.Node) (*node.Node, error) {
// InvokeWithTimers invokes the request and returns parsed XML response and timers:
// API wait time and XML parse time.
-// This method should only be callled after building the request
+// This method should only be called after building the request
func (c *Client) InvokeWithTimers() (*node.Node, time.Duration, time.Duration, error) {
return c.invoke(true)
}
-// InvokeWithTimers invokes the request and returns the raw server response
-// This method should only be callled after building the request
+// InvokeRaw invokes the request and returns the raw server response
+// This method should only be called after building the request
func (c *Client) InvokeRaw() ([]byte, error) {
var (
response *http.Response
diff --git a/pkg/api/ontapi/zapi/client_test.go b/pkg/api/ontapi/zapi/client_test.go
new file mode 100644
index 000000000..96aaabe70
--- /dev/null
+++ b/pkg/api/ontapi/zapi/client_test.go
@@ -0,0 +1,62 @@
+package zapi
+
+import (
+ "goharvest2/pkg/tree/node"
+ "testing"
+)
+
+func TestNew(t *testing.T) {
+ type args struct {
+ config *node.Node
+ }
+
+ type test struct {
+ name string
+ config *node.Node
+ wantErr bool
+ }
+
+ certificatePollerFail := node.NewS("test")
+ certificatePollerFail.NewChildS("datacenter", "cluster-01")
+ certificatePollerFail.NewChildS("addr", "localhost")
+ certificatePollerFail.NewChildS("auth_style", "certificate_auth")
+ certificatePollerFail.NewChildS("use_insecure_tls", "false")
+
+ certificatePollerPass := node.NewS("test")
+ certificatePollerPass.NewChildS("datacenter", "cluster-01")
+ certificatePollerPass.NewChildS("addr", "localhost")
+ certificatePollerPass.NewChildS("auth_style", "certificate_auth")
+ certificatePollerPass.NewChildS("use_insecure_tls", "false")
+ certificatePollerPass.NewChildS("ssl_cert", "testdata/ubuntu.pem")
+ certificatePollerPass.NewChildS("ssl_key", "testdata/ubuntu.key")
+
+ basicAuthPollerFail := node.NewS("test")
+ basicAuthPollerFail.NewChildS("datacenter", "cluster-01")
+ basicAuthPollerFail.NewChildS("addr", "localhost")
+ basicAuthPollerFail.NewChildS("auth_style", "basic_auth")
+ basicAuthPollerFail.NewChildS("use_insecure_tls", "false")
+
+ basicAuthPollerPass := node.NewS("test")
+ basicAuthPollerPass.NewChildS("datacenter", "cluster-01")
+ basicAuthPollerPass.NewChildS("addr", "localhost")
+ basicAuthPollerPass.NewChildS("auth_style", "basic_auth")
+ basicAuthPollerPass.NewChildS("use_insecure_tls", "false")
+ basicAuthPollerPass.NewChildS("username", "username")
+ basicAuthPollerPass.NewChildS("password", "password")
+
+ tests := []test{
+ test{"missing_certificate_keys", certificatePollerFail, true},
+ test{"correct_certificate_configuration", certificatePollerPass, false},
+ test{"missing_username_password", basicAuthPollerFail, true},
+ test{"correct_basic_auth_configuration", basicAuthPollerPass, false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := New(tt.config)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ })
+ }
+}
diff --git a/pkg/api/ontapi/zapi/testdata/ubuntu.key b/pkg/api/ontapi/zapi/testdata/ubuntu.key
new file mode 100644
index 000000000..c25b806ff
--- /dev/null
+++ b/pkg/api/ontapi/zapi/testdata/ubuntu.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDR2DQqbNTD38gU
+2B0btCMfoeUvQYjFgKCKMfU94z5eA2Lgtzq44LHy2jgm7vx5pEQ39vzIb9JMyVa+
+R5lGUSqbsYoZhYdCAC5YMhdc4tULXaUiWdyOjuiUbtkF3MZjJhQww47EW8GlUivp
+928yhpNJ84NPgZtUfnm1Jq2NtDC56j/Q5aULEYZ/A8Ab3V+H7KKiYlpWomOzA6/a
+x8p7dPWjtJFBG4n67fxA2JgINNiZl52/6FKBfPOxwVoFdNkGiDDtLXMS93Ms1fy2
+6jBm4nIdswn7WlfEK1cmcQc6NNLruKzRmZr9j5BtR9brjmH5pLUGGmVy1NfGeKg8
+z8q+BkOTAgMBAAECggEAWpk01IhOSA8q7EzuLFLWw0Z7k66ApXzGU0QEdObSu3ZM
+z2ZsdPuV+JrcmXqfCMmyJlN9ifA1ZK+cwuRYubI7aiL509EBt64qVCu0SIcqohCZ
+Rj5z6hyrL8K38j8CbDY5i2AzGHXhIkZa7VXF7ZOKrefz7nSvoohEdcCwN7jvPKSt
+E6g0NeOZQILK/2lpWEor/DuJnrJ+WDqjHfwo2+qAdY559HrPIzFacMoWw3MgFJVk
+Vw1IbiniNDXiq2jnxL9dpf3dZbwOd/Cxp4Fs6I+iMXdJU0CXC+MsPqkDDAasascH
+tWwJ2m9jFY2SCe7ec/MT+unGBgH7F0MxAVEY+gtH8QKBgQDu9ZzvMcTsPF8Sl3BW
+eo4ABFH9OlZIB84CL+zekmrrZLiNzPwCwB++WeGTpxekn15WCOGN5weMBWozA1KV
+zhweb2mJhVYo19Q00am91hsAjCH4yICT0uts2dI4n8IdzL7OJUMgh3QPZqvsuyfL
+xzr78GnmQmyMdiy1gcq1ipW7mwKBgQDgzxOMWNEhLRnGg4bJbsL/PhCZrDAyMMv0
+bnzNKoWij42lo95/fnoMe7g9hModXsq4TriHkt1pf/0V0wuWfO3Kze5L51Br8ZUG
+Kvr6D8GehmEr4k0cFpPrnyrwGyI5xZm3nrSY7d4TWiXnBuQ3n+tm5aGT+SdwyU4N
+xzAXlpSDaQKBgQCwPsWGAk488vYoTzvS/MdZKdaTEADtVZxTLqflLVbg0UPxilvh
+Z3TCP6bJ8XbVu5mnmRtLPzfoxWGjyHKT/RXuOLw9avO2YiGo527rjkwSJkk0JiX3
+YZy0hx+vFd3lfsrIXuFYz68BYPezPgLHIOXXRmXFJ/7JxquJH/az2UhAuwKBgEyp
+sUPiIecLZ0fQII3erLa4JpcRJhpnk5XWe87HWm0WQTqkSEL4EYziP/ESsI4VflId
+3kW1arYIhHkF0GTQU5MeaM4Potg8uRIyZAYUi3WQ4+BAOwWCGY13aQAacLB81y/L
++lBQhbPZwSUor3jcmz0qv9AjXjTTIUAHljnlCt5JAoGBAL57BTK+aicN3s6s7kuJ
+iXvsW42hKQFEiiPilE9k8atmKh60qQ2TrO4cqG+R+c+rOIF1fC7C2mcowv9IWIFW
+egAqY+xMq7NjSuIAXLtNX7W8J6mAIvSanp/+tY/cWGO32QxP58oSLtff3ya3xf4+
+CO6srFQTno6yi2se3j496iyq
+-----END PRIVATE KEY-----
diff --git a/pkg/api/ontapi/zapi/testdata/ubuntu.pem b/pkg/api/ontapi/zapi/testdata/ubuntu.pem
new file mode 100644
index 000000000..4e8878e40
--- /dev/null
+++ b/pkg/api/ontapi/zapi/testdata/ubuntu.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDBzCCAe+gAwIBAgIURxmOqU2PsYigk9dfISdlVQDJk6EwDQYJKoZIhvcNAQEL
+BQAwEzERMA8GA1UEAwwIaGFydmVzdDIwHhcNMjEwNjAxMTMyMDU5WhcNMjQwNTMx
+MTMyMDU5WjATMREwDwYDVQQDDAhoYXJ2ZXN0MjCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBANHYNCps1MPfyBTYHRu0Ix+h5S9BiMWAoIox9T3jPl4DYuC3
+OrjgsfLaOCbu/HmkRDf2/Mhv0kzJVr5HmUZRKpuxihmFh0IALlgyF1zi1QtdpSJZ
+3I6O6JRu2QXcxmMmFDDDjsRbwaVSK+n3bzKGk0nzg0+Bm1R+ebUmrY20MLnqP9Dl
+pQsRhn8DwBvdX4fsoqJiWlaiY7MDr9rHynt09aO0kUEbifrt/EDYmAg02JmXnb/o
+UoF887HBWgV02QaIMO0tcxL3cyzV/LbqMGbich2zCftaV8QrVyZxBzo00uu4rNGZ
+mv2PkG1H1uuOYfmktQYaZXLU18Z4qDzPyr4GQ5MCAwEAAaNTMFEwHQYDVR0OBBYE
+FJVTbSdz4FZ4JA7Pa8vrMc3osNwcMB8GA1UdIwQYMBaAFJVTbSdz4FZ4JA7Pa8vr
+Mc3osNwcMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAAKlALUD
+NtoQpJc64h+TDfsMqTFg0L0mve9y0w2ZSDklVcxB3Rv5fXR01Tbr3NDuh7j7P3Yx
+2t75DlTU9vBc9IX0qptGEVuB2e90m5Cr6lNOLoDSev5FHqul7h1Hv1eIJON+2DTW
+q+L4cpmkV4r5N41dP16F5q0x82YUkHKbnm3+JgPHd1ytNEGQXNWzo7se5Gmn0H8y
+MwS5TTkluUmZI0Xf+JJeU4EV3JNM4pwOAPgf30mZSvdQ+SA3YuavNLJu1bFWh+WZ
+Oo+V/gUXWhdhW2FADMQ2cu/kqPAFfFUeJuSXYWXCTbY/KPW4KF6I0ALg5TnaC8yA
+iLwmeXsKfFvj2fI=
+-----END CERTIFICATE-----
diff --git a/pkg/color/color.go b/pkg/color/color.go
index c44e5324b..4ed12ad31 100644
--- a/pkg/color/color.go
+++ b/pkg/color/color.go
@@ -1,8 +1,15 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
+
package color
+import (
+ "fmt"
+ "golang.org/x/term"
+ "os"
+)
+
var Bold string = "\033[1m"
var End string = "\033[0m"
var Italic string = "\033[3m"
@@ -17,3 +24,25 @@ var BlueBG string = "\033[46m"
var GreenBG string = "\033[42m"
var RedBG string = "\033[41m"
var PinkBG string = "\033[45m"
+
+var withColor = false
+
+func DetectConsole(option string) {
+ switch option {
+ case "never":
+ withColor = false
+ case "always":
+ withColor = true
+ default:
+ if term.IsTerminal(int(os.Stdout.Fd())) {
+ withColor = true
+ }
+ }
+}
+
+func Colorize(s interface{}, color string) string {
+ if withColor {
+ return fmt.Sprintf("%s%v\x1b[0m", color, s)
+ }
+ return fmt.Sprintf("%s", s)
+}
diff --git a/pkg/conf/conf.go b/pkg/conf/conf.go
index 51718c540..af7791631 100644
--- a/pkg/conf/conf.go
+++ b/pkg/conf/conf.go
@@ -1,6 +1,7 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
*/
+
package conf
import (
@@ -9,18 +10,27 @@ import (
"goharvest2/pkg/errors"
"goharvest2/pkg/tree"
"goharvest2/pkg/tree/node"
+ "goharvest2/pkg/util"
"gopkg.in/yaml.v3"
"io/ioutil"
"os"
"path"
+ "regexp"
+ "strconv"
)
// LoadConfig loads the config info from harvest.yml
func LoadConfig(configPath string) (*node.Node, error) {
configNode, err := tree.Import("yaml", configPath)
if configNode != nil {
- // Load HarvestConfig to rewrite - eventually all the code will be refactored to use HarvestConfig
- _ = LoadHarvestConfig(configPath)
+ // Load HarvestConfig to rewrite passwords - eventually all the code will be refactored to use HarvestConfig.
+ // This is needed because the current yaml parser does not handle password with special characters.
+ // E.g abc#123, that's because the # is interpreted as the beginning of a comment. The code below overwrites
+ // the incorrect password with the correct one by using a better yaml parser for each Poller and Default section
+ err := LoadHarvestConfig(configPath)
+ if err != nil {
+ return nil, err
+ }
pollers := configNode.GetChildS("Pollers")
if pollers != nil {
for _, poller := range pollers.GetChildren() {
@@ -45,18 +55,31 @@ func LoadConfig(configPath string) (*node.Node, error) {
}
var Config = HarvestConfig{}
+var configRead = false
func LoadHarvestConfig(configPath string) error {
+ if configRead {
+ return nil
+ }
contents, err := ioutil.ReadFile(configPath)
if err != nil {
fmt.Printf("error reading config file=[%s] %+v\n", configPath, err)
return err
}
err = yaml.Unmarshal(contents, &Config)
+ configRead = true
+ if err != nil {
+ fmt.Printf("error unmarshalling config file=[%s] %+v\n", configPath, err)
+ return err
+ }
+ // Until https://github.com/go-yaml/yaml/issues/717 is fixed
+ // read the yaml again to determine poller order
+ orderedConfig := OrderedConfig{}
+ err = yaml.Unmarshal(contents, &orderedConfig)
if err != nil {
- fmt.Printf("error reading config file=[%s] %+v\n", configPath, err)
return err
}
+ Config.PollersOrdered = orderedConfig.Pollers.namesInOrder
return nil
}
@@ -64,11 +87,11 @@ func SafeConfig(n *node.Node, fp string) error {
return tree.Export(n, "yaml", fp)
}
-func GetExporters(config_fp string) (*node.Node, error) {
+func GetExporters(configFp string) (*node.Node, error) {
var err error
var config, exporters *node.Node
- if config, err = LoadConfig(config_fp); err != nil {
+ if config, err = LoadConfig(configFp); err != nil {
return nil, err
}
@@ -80,34 +103,34 @@ func GetExporters(config_fp string) (*node.Node, error) {
return exporters, nil
}
-func GetPollerNames(config_fp string) ([]string, error) {
+func GetPollerNames(configFp string) ([]string, error) {
- var poller_names []string
+ var pollerNames []string
var config, pollers *node.Node
var err error
- if config, err = LoadConfig(config_fp); err != nil {
- return poller_names, err
+ if config, err = LoadConfig(configFp); err != nil {
+ return pollerNames, err
}
if pollers = config.GetChildS("Pollers"); pollers == nil {
- return poller_names, errors.New(errors.ERR_CONFIG, "[Pollers] section not found")
+ return pollerNames, errors.New(errors.ERR_CONFIG, "[Pollers] section not found")
}
- poller_names = make([]string, 0)
+ pollerNames = make([]string, 0)
for _, p := range pollers.GetChildren() {
- poller_names = append(poller_names, p.GetNameS())
+ pollerNames = append(pollerNames, p.GetNameS())
}
- return poller_names, nil
+ return pollerNames, nil
}
-func GetPollers(config_fp string) (*node.Node, error) {
+func GetPollers(configFp string) (*node.Node, error) {
var config, pollers, defaults *node.Node
var err error
- if config, err = LoadConfig(config_fp); err != nil {
+ if config, err = LoadConfig(configFp); err != nil {
return nil, err
}
@@ -124,16 +147,15 @@ func GetPollers(config_fp string) (*node.Node, error) {
return pollers, err
}
-func GetPoller(config_fp, poller_name string) (*node.Node, error) {
+func GetPoller(configFp, pollerName string) (*node.Node, error) {
var err error
var pollers, poller *node.Node
- if pollers, err = GetPollers(config_fp); err == nil {
- if poller = pollers.GetChildS(poller_name); poller == nil {
- err = errors.New(errors.ERR_CONFIG, "poller ["+poller_name+"] not found")
+ if pollers, err = GetPollers(configFp); err == nil {
+ if poller = pollers.GetChildS(pollerName); poller == nil {
+ err = errors.New(errors.ERR_CONFIG, "poller ["+pollerName+"] not found")
}
}
-
return poller, err
}
@@ -167,44 +189,102 @@ func GetHarvestLogPath() string {
return logPath
}
-func GetHarvestPidPath() string {
- var pidPath string
- if pidPath = os.Getenv("HARVEST_PIDS"); pidPath == "" {
- pidPath = "/var/run/harvest/"
- }
- return pidPath
-}
-
/*
-This method returns port configured in prometheus exporter for given poller
+GetPrometheusExporterPorts returns port configured in prometheus exporter for given poller
If there are more than 1 exporter configured for a poller then return string will have ports as comma seperated
*/
-func GetPrometheusExporterPorts(p *node.Node, configFp string) (string, error) {
- var port string
- exporters := p.GetChildS("exporters")
- if exporters != nil {
- exportChildren := exporters.GetAllChildContentS()
- definedExporters, err := GetExporters(configFp)
- if err != nil {
- return "", err
+func GetPrometheusExporterPorts(pollerName string) (int, error) {
+ var port int
+ var isPrometheusExporterConfigured bool
+
+ if len(promPortRangeMapping) == 0 {
+ loadPrometheusExporterPortRangeMapping()
+ }
+ exporters := (*Config.Pollers)[pollerName].Exporters
+
+ if exporters != nil && len(*exporters) > 0 {
+ for _, e := range *exporters {
+ exporter := (*Config.Exporters)[e]
+ if *exporter.Type == "Prometheus" {
+ isPrometheusExporterConfigured = true
+ if exporter.PortRange != nil {
+ ports := promPortRangeMapping[e]
+ for k := range ports.freePorts {
+ port = k
+ delete(ports.freePorts, k)
+ break
+ }
+ } else if *exporter.Port != 0 {
+ port = *exporter.Port
+ break
+ }
+ }
+ continue
}
- for _, ec := range exportChildren {
- promNode := definedExporters.GetChildS(ec)
- if promNode == nil {
- fmt.Printf("poller [%s] specified exporter [%s] that does not exist\n", p.GetNameS(), ec)
- continue
+ }
+ if port == 0 && isPrometheusExporterConfigured {
+ return port, errors.New(errors.ERR_CONFIG, "No free port found for poller "+pollerName)
+ } else {
+ return port, nil
+ }
+}
+
+type PortMap struct {
+ portSet []int
+ freePorts map[int]struct{}
+}
+
+func PortMapFromRange(address string, portRange *IntRange) PortMap {
+ portMap := PortMap{}
+ start := portRange.Min
+ end := portRange.Max
+ for i := start; i <= end; i++ {
+ portMap.portSet = append(portMap.portSet, i)
+ }
+ portMap.freePorts = util.CheckFreePorts(address, portMap.portSet)
+ return portMap
+}
+
+var promPortRangeMapping = make(map[string]PortMap)
+
+func loadPrometheusExporterPortRangeMapping() {
+ exporters := *Config.Exporters
+ for k, v := range exporters {
+ if *v.Type == "Prometheus" {
+ if v.PortRange != nil {
+ promPortRangeMapping[k] = PortMapFromRange(*v.Addr, v.PortRange)
+ }
+ }
+ }
+}
+
+type IntRange struct {
+ Min int
+ Max int
+}
+
+var rangeRegex, _ = regexp.Compile(`(\d+)\s*-\s*(\d+)`)
+
+func (i *IntRange) UnmarshalYAML(node *yaml.Node) error {
+ if node.Kind == yaml.ScalarNode && node.ShortTag() == "!!str" {
+ matches := rangeRegex.FindStringSubmatch(node.Value)
+ if len(matches) == 3 {
+ min, err1 := strconv.Atoi(matches[1])
+ max, err2 := strconv.Atoi(matches[2])
+ if err1 != nil {
+ return err1
}
- exporterType := promNode.GetChildContentS("exporter")
- if exporterType == "Prometheus" {
- currentPort := definedExporters.GetChildS(ec).GetChildContentS("port")
- port = currentPort
+ if err2 != nil {
+ return err2
}
+ i.Min = min
+ i.Max = max
}
}
- return port, nil
+ return nil
}
-// Returns unique type of exporters for the poller
+// GetUniqueExporters returns unique type of exporters for the poller
// For example: If 2 prometheus exporters are configured for a poller then last one defined is returned
func GetUniqueExporters(p *node.Node, configFp string) ([]string, error) {
var resultExporters []string
@@ -257,12 +337,15 @@ type Poller struct {
LogMaxFiles *int `yaml:"log_max_files,omitempty"`
Exporters *[]string `yaml:"exporters,omitempty"`
Collectors *[]string `yaml:"collectors,omitempty"`
+ IsKfs *bool `yaml:"is_kfs,omitempty"`
}
type Exporter struct {
Port *int `yaml:"port,omitempty"`
+ PortRange *IntRange `yaml:"port_range,omitempty"`
Type *string `yaml:"exporter,omitempty"`
Addr *string `yaml:"addr,omitempty"`
+ Url *string `yaml:"url,omitempty"`
LocalHttpAddr *string `yaml:"local_http_addr,omitempty"`
GlobalPrefix *string `yaml:"global_prefix,omitempty"`
AllowedAddrs *[]string `yaml:"allow_addrs,omitempty"`
@@ -279,9 +362,31 @@ type Exporter struct {
ClientTimeout *string `yaml:"client_timeout,omitempty"`
}
+type Pollers struct {
+ namesInOrder []string
+}
+
+func (i *Pollers) UnmarshalYAML(node *yaml.Node) error {
+ if node.Kind == yaml.MappingNode {
+ var namesInOrder []string
+ for _, n := range node.Content {
+ if n.Kind == yaml.ScalarNode && n.ShortTag() == "!!str" {
+ namesInOrder = append(namesInOrder, n.Value)
+ }
+ }
+ i.namesInOrder = namesInOrder
+ }
+ return nil
+}
+
+type OrderedConfig struct {
+ Pollers Pollers `yaml:"Pollers,omitempty"`
+}
+
type HarvestConfig struct {
- Tools *Tools `yaml:"Tools,omitempty"`
- Exporters *map[string]Exporter `yaml:"Exporters,omitempty"`
- Pollers *map[string]Poller `yaml:"Pollers,omitempty"`
- Defaults *Poller `yaml:"Defaults,omitempty"`
+ Tools *Tools `yaml:"Tools,omitempty"`
+ Exporters *map[string]Exporter `yaml:"Exporters,omitempty"`
+ Pollers *map[string]Poller `yaml:"Pollers,omitempty"`
+ Defaults *Poller `yaml:"Defaults,omitempty"`
+ PollersOrdered []string // poller names in same order as yaml config
}
diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go
index bff3842fd..749768f06 100644
--- a/pkg/errors/errors.go
+++ b/pkg/errors/errors.go
@@ -4,6 +4,7 @@
package errors
import (
+ "github.com/pkg/errors"
"strings"
)
@@ -23,23 +24,15 @@ const (
API_REQ_REJECTED = "api request rejected"
// @TODO, implement: API response is something like
// Insufficient privileges: user 'harvest2-user' does not have write access to this resource
- API_INSUF_PRIV = "api insufficient priviliges"
- ERR_DLOAD = "dynamic load"
- ERR_IMPLEMENT = "implementation error"
- ERR_SCHEDULE = "schedule error"
+ API_INSUF_PRIV = "api insufficient priviliges"
+ ERR_DLOAD = "dynamic load"
+ ERR_IMPLEMENT = "implementation error"
+ ERR_SCHEDULE = "schedule error"
+ GO_ROUTINE_PANIC = "goroutine panic"
)
-type Error struct {
- class string
- msg string
-}
-
-func (e Error) Error() string {
- return e.class + " => " + e.msg
-}
-
-func New(class, msg string) Error {
- return Error{class: class, msg: msg}
+func New(class, msg string) error {
+ return errors.New(class + " => " + msg)
}
func GetClass(err error) string {
diff --git a/pkg/logging/logger.go b/pkg/logging/logger.go
index 36a30663e..cfb02b148 100644
--- a/pkg/logging/logger.go
+++ b/pkg/logging/logger.go
@@ -91,8 +91,7 @@ func Configure(config LogConfig) *Logger {
var writers []io.Writer
if config.ConsoleLoggingEnabled {
- // writers = append(writers, zerolog.ConsoleWriter{Out: os.Stderr})
- writers = append(writers, os.Stdout)
+ writers = append(writers, zerolog.ConsoleWriter{Out: os.Stderr})
}
if config.FileLoggingEnabled {
writers = append(writers, newRollingFile(config))
@@ -101,7 +100,7 @@ func Configure(config LogConfig) *Logger {
zerolog.SetGlobalLevel(config.LogLevel)
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
- zeroLogger := zerolog.New(multiWriters).With().Str(config.PrefixKey, config.PrefixValue).Timestamp().Logger()
+ zeroLogger := zerolog.New(multiWriters).With().Caller().Str(config.PrefixKey, config.PrefixValue).Timestamp().Logger()
zeroLogger.Debug().
Bool("consoleLoggingEnabled", config.ConsoleLoggingEnabled).
diff --git a/pkg/matrix/metric.go b/pkg/matrix/metric.go
index e3bd40e4c..4015fd8e6 100644
--- a/pkg/matrix/metric.go
+++ b/pkg/matrix/metric.go
@@ -60,7 +60,7 @@ type Metric interface {
AddValueUint64(*Instance, uint64) error
AddValueFloat32(*Instance, float32) error
AddValueFloat64(*Instance, float64) error
- //AddValueString(*Instance, string) error
+ AddValueString(*Instance, string) error
//SetValueBytes(*Instance, []byte) error
SetValueNAN(*Instance)
@@ -204,3 +204,7 @@ func (me *AbstractMetric) DivideWithThreshold(s Metric, t int) error {
func (me *AbstractMetric) MultiplyByScalar(s int) error {
return errors.New(errors.ERR_IMPLEMENT, me.dtype)
}
+
+func (me *AbstractMetric) AddValueString(i *Instance, s string) error {
+ return errors.New(errors.ERR_IMPLEMENT, me.dtype)
+}
diff --git a/pkg/matrix/metric_float64.go b/pkg/matrix/metric_float64.go
index 57777aa04..d840546c8 100644
--- a/pkg/matrix/metric_float64.go
+++ b/pkg/matrix/metric_float64.go
@@ -154,6 +154,21 @@ func (me *MetricFloat64) AddValueFloat64(i *Instance, n float64) error {
return me.SetValueFloat64(i, m+n)
}
+func (me *MetricFloat64) AddValueString(i *Instance, v string) error {
+ var (
+ x, n float64
+ err error
+ has bool
+ )
+ if x, err = strconv.ParseFloat(v, 64); err != nil {
+ return err
+ }
+ if n, has = me.GetValueFloat64(i); has {
+ return me.SetValueFloat64(i, x+n)
+ }
+ return me.SetValueFloat64(i, x)
+}
+
// Read methods
func (me *MetricFloat64) GetValueInt(i *Instance) (int, bool) {
diff --git a/pkg/tree/node/node.go b/pkg/tree/node/node.go
index 4860f9db4..5c355641a 100644
--- a/pkg/tree/node/node.go
+++ b/pkg/tree/node/node.go
@@ -35,7 +35,7 @@ func NewXml(name []byte) *Node {
func NewXmlS(name string) *Node {
// ugly solution to support xml
- return &Node{XMLName: xml.Name{"", name}}
+ return &Node{XMLName: xml.Name{Local: name}}
}
func (n *Node) GetXmlNameS() string {
@@ -43,7 +43,7 @@ func (n *Node) GetXmlNameS() string {
}
func (n *Node) SetXmlNameS(name string) {
- n.XMLName = xml.Name{"", name}
+ n.XMLName = xml.Name{Local: name}
}
func (n *Node) GetName() []byte {
@@ -94,7 +94,7 @@ func (n *Node) AddAttr(attr xml.Attr) {
}
func (n *Node) NewAttrS(name, value string) {
- n.AddAttr(xml.Attr{Name: xml.Name{"", name}, Value: value})
+ n.AddAttr(xml.Attr{Name: xml.Name{Local: name}, Value: value})
}
func (n *Node) GetChildren() []*Node {
@@ -216,6 +216,14 @@ func (n *Node) GetAllChildContentS() []string {
return content
}
+func (n *Node) GetAllChildNamesS() []string {
+ names := make([]string, 0)
+ for _, ch := range n.Children {
+ names = append(names, ch.GetNameS())
+ }
+ return names
+}
+
func (n *Node) SetContent(content []byte) {
n.Content = content
}
diff --git a/pkg/util/portScanner.go b/pkg/util/portScanner.go
new file mode 100644
index 000000000..2dcd68108
--- /dev/null
+++ b/pkg/util/portScanner.go
@@ -0,0 +1,56 @@
+package util
+
+import (
+ "fmt"
+ "net"
+ "time"
+)
+
+func worker(address string, ports, results chan int) {
+ for p := range ports {
+ address := fmt.Sprintf("%s:%d", address, p)
+ conn, err := net.DialTimeout("tcp", address, 1*time.Second)
+ if err != nil {
+ results <- p
+ continue
+ }
+ conn.Close()
+ results <- 0
+ }
+}
+
+func CheckFreePorts(address string, ports []int) map[int]struct{} {
+ freeports := make(map[int]struct{})
+
+ if len(ports) > 0 {
+ // this channel will receive ports to be scanned
+ portsWorkers := make(chan int, 10)
+ // this channel will receive results of scanning
+ results := make(chan int)
+ // create a slice to store the results so that they can be sorted later.
+
+ // create a pool of workers
+ for i := 0; i < cap(portsWorkers); i++ {
+ go worker(address, portsWorkers, results)
+ }
+
+ // send ports to be scanned
+ go func() {
+ for _, p := range ports {
+ portsWorkers <- p
+ }
+ }()
+
+ for i := 0; i < len(ports); i++ {
+ port := <-results
+ if port != 0 {
+ freeports[port] = struct{}{}
+ }
+ }
+
+ // After all the work has been completed, close the channels
+ close(portsWorkers)
+ close(results)
+ }
+ return freeports
+}
diff --git a/pkg/util/util.go b/pkg/util/util.go
index afc7ba6ed..d6d8868c8 100644
--- a/pkg/util/util.go
+++ b/pkg/util/util.go
@@ -1,11 +1,23 @@
/*
* Copyright NetApp Inc, 2021 All rights reserved
+ */
- Package Description:
- Some helper methods.
-*/
package util
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+)
+
+// HarvestTag is injected into a poller's environment to disambiguate the process
+const HarvestTag = "IS_HARVEST=TRUE"
+
func MinLen(elements [][]string) int {
var min, i int
min = len(elements[0])
@@ -50,3 +62,87 @@ func EqualStringSlice(a, b []string) bool {
}
return true
}
+
+func readProcFile(path string) (string, error) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return "", err
+ }
+ result := string(bytes.ReplaceAll(data, []byte("\x00"), []byte(" ")))
+ return result, nil
+}
+
+func GetEnviron(pid int) (string, error) {
+ return readProcFile(fmt.Sprintf("/proc/%d/environ", pid))
+}
+
+func GetCmdLine(pid int) (string, error) {
+ return readProcFile(fmt.Sprintf("/proc/%d/cmdline", pid))
+}
+
+func RemoveEmptyStrings(s []string) []string {
+ var r []string
+ for _, str := range s {
+ if str != "" {
+ r = append(r, str)
+ }
+ }
+ return r
+}
+
+func GetPid(pollerName string) ([]int, error) {
+ // ($|\s) is included to match the poller name
+ // followed by a space or end of line - that way unix1 does not match unix11
+ search := fmt.Sprintf(`\-\-poller %s($|\s)`, pollerName)
+ return GetPids(search)
+}
+
+func GetPids(search string) ([]int, error) {
+ var result []int
+ var ee *exec.ExitError
+ var pe *os.PathError
+ data, err := exec.Command("pgrep", "-f", search).Output()
+ if errors.As(err, &ee) {
+ return result, nil // ran, but non-zero exit code
+ } else if errors.As(err, &pe) {
+ return result, err // "no such file ...", "permission denied" etc.
+ } else if err != nil {
+ return result, err // something really bad happened!
+ }
+ sdata := string(data)
+ pids := RemoveEmptyStrings(strings.Split(sdata, "\n"))
+ for _, pid := range pids {
+ p, err := strconv.Atoi(strings.TrimSpace(pid))
+ if err != nil {
+ return result, err
+ }
+
+ // Validate this is a Harvest process
+ environ, err := GetEnviron(p)
+ if err != nil {
+ if errors.As(err, &pe) {
+ // permission denied, no need to log
+ continue
+ }
+ fmt.Printf("err reading environ for search=%s pid=%d err=%+v\n", search, p, err)
+ continue
+ }
+ if strings.Contains(environ, HarvestTag) {
+ result = append(result, p)
+ }
+ }
+ return result, err
+}
+
+func ContainsWholeWord(source string, search string) bool {
+ if len(source) == 0 || len(search) == 0 {
+ return false
+ }
+ fields := strings.Fields(source)
+ for _, w := range fields {
+ if w == search {
+ return true
+ }
+ }
+ return false
+}
diff --git a/rpm/build-rpm.sh b/rpm/build-rpm.sh
index 495513e19..7a32d3a26 100755
--- a/rpm/build-rpm.sh
+++ b/rpm/build-rpm.sh
@@ -24,7 +24,7 @@ cp -r "$SRC/docs/" "$BUILD/harvest/"
cp -r "$SRC/conf/" "$BUILD/harvest/"
cp -r "$SRC/rpm/" "$BUILD/harvest/"
cp -r "$SRC/service/" "$BUILD/harvest/"
-cp "$SRC/harvest.example.yml" "$BUILD/harvest/"
+cp "$SRC/harvest.yml" "$BUILD/harvest/"
cp "$SRC/go.mod" "$BUILD/harvest/"
cp "$SRC/go.sum" "$BUILD/harvest/"
if [ -d "$SRC/vendor" ]; then
diff --git a/rpm/spec b/rpm/spec
index b4be45df0..5be160e3e 100644
--- a/rpm/spec
+++ b/rpm/spec
@@ -29,6 +29,16 @@ if [ $1 = 2 ]; then
if systemctl list-units --full -all | grep -Fq 'harvest.service'; then
systemctl stop harvest && echo "stopped harvest ..."
fi
+ # Stop any harvest process which are not under systemctl
+ ps -ef | grep -w -- --poller | grep -w bin/poller | awk '{print $2}' | xargs -r kill
+ if [ $? -ne 0 ]; then
+ echo " --> Error while stopping pollers"
+ fi
+fi
+# Backup existing harvest.yml to avoid overwrite from new binary
+if [ -e $RPM_BUILD_ROOT/opt/harvest/harvest.yml ]; then
+ cp $RPM_BUILD_ROOT/opt/harvest/harvest.yml $RPM_BUILD_ROOT/opt/harvest/backup_harvest.yml
+ echo " --> Backing up existing config file as [/opt/harvest/backup_harvest.yml]"
fi
%install
@@ -39,19 +49,28 @@ cd $RPM_BUILD_ROOT/opt/harvest
echo " --> installing harvest..."
cd $RPM_BUILD_ROOT/opt/harvest
mkdir -p $RPM_BUILD_ROOT/var/log/harvest
-mkdir -p $RPM_BUILD_ROOT/var/run/harvest
%post
-if [ ! -e $RPM_BUILD_ROOT/opt/harvest/harvest.yml ]; then
- cp $RPM_BUILD_ROOT/opt/harvest/harvest.example.yml $RPM_BUILD_ROOT/opt/harvest/harvest.yml
- echo " --> use default config file [/opt/harvest/harvest.yml]"
+# After installation/upgrade, copy latest harvest.yml as harvest.example.yml
+if [ -e $RPM_BUILD_ROOT/opt/harvest/harvest.yml ]; then
+ rm -rf $RPM_BUILD_ROOT/opt/harvest/harvest.example.yml
+ cp $RPM_BUILD_ROOT/opt/harvest/harvest.yml $RPM_BUILD_ROOT/opt/harvest/harvest.example.yml
+ echo " --> copy new harvest.yml as [/opt/harvest/harvest.example.yml]"
+fi
+# restore old harvest.yml in case of upgrade
+if [ -e $RPM_BUILD_ROOT/opt/harvest/backup_harvest.yml ]; then
+ rm -rf $RPM_BUILD_ROOT/opt/harvest/harvest.yml
+ cp $RPM_BUILD_ROOT/opt/harvest/backup_harvest.yml $RPM_BUILD_ROOT/opt/harvest/harvest.yml
+ rm -rf $RPM_BUILD_ROOT/opt/harvest/backup_harvest.yml
+ echo " --> restoring existing harvest.yml as [/opt/harvest/harvest.yml]"
fi
getent group harvest > /dev/null 2>&1 || groupadd -r harvest && echo " --> create harvest group"
getent passwd harvest > /dev/null 2>&1 || useradd -r -M -g harvest --shell=/sbin/nologin harvest && echo " --> create harvest user"
chown -R harvest:harvest $RPM_BUILD_ROOT/opt/harvest/
chown -R harvest:harvest $RPM_BUILD_ROOT/var/log/harvest/
-chown -R harvest:harvest $RPM_BUILD_ROOT/var/run/harvest/
+chmod -R u+s $RPM_BUILD_ROOT/opt/harvest/bin
+chmod -R g+s $RPM_BUILD_ROOT/opt/harvest/bin
echo " --> copying service"
cp $RPM_BUILD_ROOT/opt/harvest/service/harvest.service /etc/systemd/system/
chmod 664 /etc/systemd/system/harvest.service
@@ -65,9 +84,9 @@ systemctl start harvest.service
systemctl enable harvest.service
echo " --> harvest service started through systemctl"
if [ $1 = 1 ]; then
- echo " --> install complete!"
+ echo " --> install complete! harvest directory /opt/harvest"
else
- echo " --> updated harvest!"
+ echo " --> updated harvest! harvest directory /opt/harvest"
fi
%preun
@@ -75,6 +94,15 @@ fi
if [ $1 = 0 ]; then
echo "stopping harvest ..."
systemctl stop harvest
+ # Stop any harvest process which are not under systemctl
+ ps -ef | grep -w -- --poller | grep -w -- --daemon | awk '{print $2}' | xargs -r kill
+ if [ $? -ne 0 ]; then
+ echo " --> Error while stopping pollers"
+ fi
+ if [ -e /opt/harvest/harvest.yml ]; then
+ cp /opt/harvest/harvest.yml /opt/harvest/backup_harvest.yml
+ echo " --> Backing up existing config file as [/opt/harvest/backup_harvest.yml]"
+ fi
fi
%postun
@@ -85,11 +113,19 @@ if [ $1 = 0 ]; then
unlink /usr/bin/harvest && echo " --> delete link"
fi
rm -rf /var/run/harvest && echo " --> delete pid folder"
+ rm -rf /usr/lib/tmpfiles.d/harvest.conf && echo " --> removing tmpfiles.d harvest configuration"
echo " --> removing systemctl harvest files "
systemctl disable harvest.service
rm /etc/systemd/system/harvest.service
systemctl daemon-reload
systemctl reset-failed
+ # restore harvest.yml
+ if [ -e /opt/harvest/backup_harvest.yml ]; then
+ rm -rf /opt/harvest/harvest.yml
+ cp /opt/harvest/backup_harvest.yml /opt/harvest/harvest.yml
+ rm -rf /opt/harvest/backup_harvest.yml
+ echo " --> restoring backup_harvest.yml as [/opt/harvest/harvest.yml]"
+ fi
# keep logs at var/log/harvest
echo " --> kept log folder"
echo " --> uninstall complete"
@@ -102,5 +138,4 @@ fi
%files
/opt/harvest
/var/log/harvest
-/var/run/harvest
diff --git a/service/contrib/README.md b/service/contrib/README.md
new file mode 100644
index 000000000..22e0f3476
--- /dev/null
+++ b/service/contrib/README.md
@@ -0,0 +1,74 @@
+# Improved SystemD Integration
+
+Systemd instantiated units for each poller and a target to tie them together. Use wildcards to start|stop|restart
+
+## Poller Service via systemd instantiated services
+
+Create one instantiated service for a poller. Adjust paths as needed
+
+```
+echo '[Unit]
+Description="Poller instance %I"
+PartOf=harvest.target
+
+[Service]
+Type=simple
+Restart=on-failure
+WorkingDirectory=/path/to/harvest
+ExecStart=/path/to/harvest/bin/harvest --config /path/to/harvest/harvest.yml start -f %i' | sudo tee /etc/systemd/system/poller@.service
+```
+
+### Harvest Target
+
+Target files are how systemd groups a set of services together. We'll use it as a way to start|stop all pollers as a single unit. Nice on reboot or upgrade.
+
+This is more tedious to setup and change. We'll add a `harvest systemd generate` command that creates this target from a template.
+
+For now you'll need to create this yourself similar to the example below:
+
+```
+echo '[Unit]
+Description="Harvest"
+Wants=poller@unix1.service poller@unix2.service poller@unix3.service poller@unix4.service poller@unix5.service poller@unix6.service poller@unix7.service poller@unix8.service poller@unix9.service
+
+[Install]
+WantedBy=multi-user.target' | sudo tee /etc/systemd/system/harvest.target
+```
+
+### How to use
+
+`systemctl daemon-reload`
+
+Assuming your `harvest.yml` contains pollers like so:
+
+```
+cluster-01:
+ collectors:
+ - ZAPI
+unix2:
+ collectors:
+ - Unix
+...
+```
+
+Example commands to Manage Pollers
+
+```
+systemctl start poller@cluster-01 poller@unix2 ....
+
+systemctl list-units --type=service "poller*"
+
+systemctl status "poller*"
+
+systemctl stop "poller*"
+
+systemctl start|stop|restart harvest.target
+
+```
+
+### Systemd: Logs
+
+```
+journalctl -fu poller@cluster-01 # follow, tail like behavior for poller named cluster-01
+journalctl -u poller@unix2 # show logs for poller named unix2
+```
diff --git a/service/contrib/target.tmpl b/service/contrib/target.tmpl
new file mode 100644
index 000000000..fb1e5623c
--- /dev/null
+++ b/service/contrib/target.tmpl
@@ -0,0 +1,6 @@
+[Unit]
+Description="Harvest"
+Wants={{range .PollersOrdered}}poller@{{.}}.service {{end}}
+
+[Install]
+WantedBy=multi-user.target
diff --git a/service/harvest.service b/service/harvest.service
index a0f7736f6..873fef7f4 100644
--- a/service/harvest.service
+++ b/service/harvest.service
@@ -10,6 +10,9 @@ WorkingDirectory=/opt/harvest
ExecStart=/opt/harvest/bin/harvest \
restart \
--config /opt/harvest/harvest.yml
+ExecStop=/opt/harvest/bin/harvest \
+ stop \
+ --config /opt/harvest/harvest.yml
IgnoreSIGPIPE=no
KillMode=process
Restart=on-failure
diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md
new file mode 100644
index 000000000..dbae7f7be
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md
@@ -0,0 +1,25 @@
+# 1.3.0 (March 31, 2021)
+
+Please note that CHANGELOG.md does not exist in the source code prior to this release.
+
+FEATURES:
+ - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85))
+
+# 1.2.1 (June 17, 2020)
+
+BUG FIXES:
+ - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73))
+
+# 1.2.0 (April 23, 2019)
+
+FEATURES:
+ - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53))
+
+# 1.1.0 (Jan 07, 2019)
+
+FEATURES:
+ - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45))
+
+# 1.0.0 (August 24, 2018)
+
+Initial release.
diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE
new file mode 100644
index 000000000..c33dcc7c9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. âContributorâ
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. âContributor Versionâ
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributorâs Contribution.
+
+1.3. âContributionâ
+
+ means Covered Software of a particular Contributor.
+
+1.4. âCovered Softwareâ
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. âIncompatible With Secondary Licensesâ
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. âExecutable Formâ
+
+ means any form of the work other than Source Code Form.
+
+1.7. âLarger Workâ
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. âLicenseâ
+
+ means this document.
+
+1.9. âLicensableâ
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. âModificationsâ
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. âPatent Claimsâ of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. âSecondary Licenseâ
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. âSource Code Formâ
+
+ means the form of the work preferred for making modifications.
+
+1.14. âYouâ (or âYourâ)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, âYouâ includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, âcontrolâ means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third partyâs
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipientsâ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipientsâ
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an âas isâ basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ partyâs negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a partyâs ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - âIncompatible With Secondary Licensesâ Notice
+
+ This Source Code Form is âIncompatible
+ With Secondary Licensesâ, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md
new file mode 100644
index 000000000..851a337be
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/README.md
@@ -0,0 +1,66 @@
+# Versioning Library for Go
+[](https://circleci.com/gh/hashicorp/go-version/tree/master)
+[](https://godoc.org/github.com/hashicorp/go-version)
+
+go-version is a library for parsing versions and version constraints,
+and verifying versions against a set of constraints. go-version
+can sort a collection of versions properly, handles prerelease/beta
+versions, can increment versions, etc.
+
+Versions used with go-version must follow [SemVer](http://semver.org/).
+
+## Installation and Usage
+
+Package documentation can be found on
+[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
+
+Installation can be done with a normal `go get`:
+
+```
+$ go get github.com/hashicorp/go-version
+```
+
+#### Version Parsing and Comparison
+
+```go
+v1, err := version.NewVersion("1.2")
+v2, err := version.NewVersion("1.5+metadata")
+
+// Comparison example. There is also GreaterThan, Equal, and just
+// a simple Compare that returns an int allowing easy >=, <=, etc.
+if v1.LessThan(v2) {
+ fmt.Printf("%s is less than %s", v1, v2)
+}
+```
+
+#### Version Constraints
+
+```go
+v1, err := version.NewVersion("1.2")
+
+// Constraints example.
+constraints, err := version.NewConstraint(">= 1.0, < 1.4")
+if constraints.Check(v1) {
+ fmt.Printf("%s satisfies constraints %s", v1, constraints)
+}
+```
+
+#### Version Sorting
+
+```go
+versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"}
+versions := make([]*version.Version, len(versionsRaw))
+for i, raw := range versionsRaw {
+ v, _ := version.NewVersion(raw)
+ versions[i] = v
+}
+
+// After this, the versions are properly sorted
+sort.Sort(version.Collection(versions))
+```
+
+## Issues and Contributing
+
+If you find an issue with this library, please report an issue. If you'd
+like, we welcome any contributions. Fork this library and submit a pull
+request.
diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go
new file mode 100644
index 000000000..d05575961
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/constraint.go
@@ -0,0 +1,204 @@
+package version
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+)
+
+// Constraint represents a single constraint for a version, such as
+// ">= 1.0".
+type Constraint struct {
+ f constraintFunc
+ check *Version
+ original string
+}
+
+// Constraints is a slice of constraints. We make a custom type so that
+// we can add methods to it.
+type Constraints []*Constraint
+
+type constraintFunc func(v, c *Version) bool
+
+var constraintOperators map[string]constraintFunc
+
+var constraintRegexp *regexp.Regexp
+
+func init() {
+ constraintOperators = map[string]constraintFunc{
+ "": constraintEqual,
+ "=": constraintEqual,
+ "!=": constraintNotEqual,
+ ">": constraintGreaterThan,
+ "<": constraintLessThan,
+ ">=": constraintGreaterThanEqual,
+ "<=": constraintLessThanEqual,
+ "~>": constraintPessimistic,
+ }
+
+ ops := make([]string, 0, len(constraintOperators))
+ for k := range constraintOperators {
+ ops = append(ops, regexp.QuoteMeta(k))
+ }
+
+ constraintRegexp = regexp.MustCompile(fmt.Sprintf(
+ `^\s*(%s)\s*(%s)\s*$`,
+ strings.Join(ops, "|"),
+ VersionRegexpRaw))
+}
+
+// NewConstraint will parse one or more constraints from the given
+// constraint string. The string must be a comma-separated list of
+// constraints.
+func NewConstraint(v string) (Constraints, error) {
+ vs := strings.Split(v, ",")
+ result := make([]*Constraint, len(vs))
+ for i, single := range vs {
+ c, err := parseSingle(single)
+ if err != nil {
+ return nil, err
+ }
+
+ result[i] = c
+ }
+
+ return Constraints(result), nil
+}
+
+// Check tests if a version satisfies all the constraints.
+func (cs Constraints) Check(v *Version) bool {
+ for _, c := range cs {
+ if !c.Check(v) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Returns the string format of the constraints
+func (cs Constraints) String() string {
+ csStr := make([]string, len(cs))
+ for i, c := range cs {
+ csStr[i] = c.String()
+ }
+
+ return strings.Join(csStr, ",")
+}
+
+// Check tests if a constraint is validated by the given version.
+func (c *Constraint) Check(v *Version) bool {
+ return c.f(v, c.check)
+}
+
+func (c *Constraint) String() string {
+ return c.original
+}
+
+func parseSingle(v string) (*Constraint, error) {
+ matches := constraintRegexp.FindStringSubmatch(v)
+ if matches == nil {
+ return nil, fmt.Errorf("Malformed constraint: %s", v)
+ }
+
+ check, err := NewVersion(matches[2])
+ if err != nil {
+ return nil, err
+ }
+
+ return &Constraint{
+ f: constraintOperators[matches[1]],
+ check: check,
+ original: v,
+ }, nil
+}
+
+func prereleaseCheck(v, c *Version) bool {
+ switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; {
+ case cPre && vPre:
+ // A constraint with a pre-release can only match a pre-release version
+ // with the same base segments.
+ return reflect.DeepEqual(c.Segments64(), v.Segments64())
+
+ case !cPre && vPre:
+ // A constraint without a pre-release can only match a version without a
+ // pre-release.
+ return false
+
+ case cPre && !vPre:
+ // OK, except with the pessimistic operator
+ case !cPre && !vPre:
+ // OK
+ }
+ return true
+}
+
+//-------------------------------------------------------------------
+// Constraint functions
+//-------------------------------------------------------------------
+
+func constraintEqual(v, c *Version) bool {
+ return v.Equal(c)
+}
+
+func constraintNotEqual(v, c *Version) bool {
+ return !v.Equal(c)
+}
+
+func constraintGreaterThan(v, c *Version) bool {
+ return prereleaseCheck(v, c) && v.Compare(c) == 1
+}
+
+func constraintLessThan(v, c *Version) bool {
+ return prereleaseCheck(v, c) && v.Compare(c) == -1
+}
+
+func constraintGreaterThanEqual(v, c *Version) bool {
+ return prereleaseCheck(v, c) && v.Compare(c) >= 0
+}
+
+func constraintLessThanEqual(v, c *Version) bool {
+ return prereleaseCheck(v, c) && v.Compare(c) <= 0
+}
+
+func constraintPessimistic(v, c *Version) bool {
+ // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases
+ if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") {
+ return false
+ }
+
+ // If the version being checked is naturally less than the constraint, then there
+ // is no way for the version to be valid against the constraint
+ if v.LessThan(c) {
+ return false
+ }
+ // We'll use this more than once, so grab the length now so it's a little cleaner
+ // to write the later checks
+ cs := len(c.segments)
+
+ // If the version being checked has less specificity than the constraint, then there
+ // is no way for the version to be valid against the constraint
+ if cs > len(v.segments) {
+ return false
+ }
+
+ // Check the segments in the constraint against those in the version. If the version
+ // being checked, at any point, does not have the same values in each index of the
+ // constraints segments, then it cannot be valid against the constraint.
+ for i := 0; i < c.si-1; i++ {
+ if v.segments[i] != c.segments[i] {
+ return false
+ }
+ }
+
+ // Check the last part of the segment in the constraint. If the version segment at
+ // this index is less than the constraints segment at this index, then it cannot
+ // be valid against the constraint
+ if c.segments[cs-1] > v.segments[cs-1] {
+ return false
+ }
+
+ // If nothing has rejected the version by now, it's valid
+ return true
+}
diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod
new file mode 100644
index 000000000..f5285555f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/go.mod
@@ -0,0 +1 @@
+module github.com/hashicorp/go-version
diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go
new file mode 100644
index 000000000..8068834ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/version.go
@@ -0,0 +1,392 @@
+package version
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// The compiled regular expression used to test the validity of a version.
+var (
+ versionRegexp *regexp.Regexp
+ semverRegexp *regexp.Regexp
+)
+
+// The raw regular expression string used for testing the validity
+// of a version.
+const (
+ VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
+ `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
+ `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
+ `?`
+
+ // SemverRegexpRaw requires a separator between version and prerelease
+ SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
+ `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
+ `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
+ `?`
+)
+
+// Version represents a single version.
+type Version struct {
+ metadata string
+ pre string
+ segments []int64
+ si int
+ original string
+}
+
+func init() {
+ versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
+ semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$")
+}
+
+// NewVersion parses the given version and returns a new
+// Version.
+func NewVersion(v string) (*Version, error) {
+ return newVersion(v, versionRegexp)
+}
+
+// NewSemver parses the given version and returns a new
+// Version that adheres strictly to SemVer specs
+// https://semver.org/
+func NewSemver(v string) (*Version, error) {
+ return newVersion(v, semverRegexp)
+}
+
+func newVersion(v string, pattern *regexp.Regexp) (*Version, error) {
+ matches := pattern.FindStringSubmatch(v)
+ if matches == nil {
+ return nil, fmt.Errorf("Malformed version: %s", v)
+ }
+ segmentsStr := strings.Split(matches[1], ".")
+ segments := make([]int64, len(segmentsStr))
+ si := 0
+ for i, str := range segmentsStr {
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing version: %s", err)
+ }
+
+ segments[i] = int64(val)
+ si++
+ }
+
+ // Even though we could support more than three segments, if we
+ // got less than three, pad it with 0s. This is to cover the basic
+ // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
+ for i := len(segments); i < 3; i++ {
+ segments = append(segments, 0)
+ }
+
+ pre := matches[7]
+ if pre == "" {
+ pre = matches[4]
+ }
+
+ return &Version{
+ metadata: matches[10],
+ pre: pre,
+ segments: segments,
+ si: si,
+ original: v,
+ }, nil
+}
+
+// Must is a helper that wraps a call to a function returning (*Version, error)
+// and panics if error is non-nil.
+func Must(v *Version, err error) *Version {
+ if err != nil {
+ panic(err)
+ }
+
+ return v
+}
+
+// Compare compares this version to another version. This
+// returns -1, 0, or 1 if this version is smaller, equal,
+// or larger than the other version, respectively.
+//
+// If you want boolean results, use the LessThan, Equal,
+// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods.
+func (v *Version) Compare(other *Version) int {
+ // A quick, efficient equality check
+ if v.String() == other.String() {
+ return 0
+ }
+
+ segmentsSelf := v.Segments64()
+ segmentsOther := other.Segments64()
+
+ // If the segments are the same, we must compare on prerelease info
+ if reflect.DeepEqual(segmentsSelf, segmentsOther) {
+ preSelf := v.Prerelease()
+ preOther := other.Prerelease()
+ if preSelf == "" && preOther == "" {
+ return 0
+ }
+ if preSelf == "" {
+ return 1
+ }
+ if preOther == "" {
+ return -1
+ }
+
+ return comparePrereleases(preSelf, preOther)
+ }
+
+ // Get the highest specificity (hS), or if they're equal, just use segmentSelf length
+ lenSelf := len(segmentsSelf)
+ lenOther := len(segmentsOther)
+ hS := lenSelf
+ if lenSelf < lenOther {
+ hS = lenOther
+ }
+ // Compare the segments
+ // Because a constraint could have more/less specificity than the version it's
+ // checking, we need to account for a lopsided or jagged comparison
+ for i := 0; i < hS; i++ {
+ if i > lenSelf-1 {
+ // This means Self had the lower specificity
+ // Check to see if the remaining segments in Other are all zeros
+ if !allZero(segmentsOther[i:]) {
+ // if not, it means that Other has to be greater than Self
+ return -1
+ }
+ break
+ } else if i > lenOther-1 {
+ // this means Other had the lower specificity
+ // Check to see if the remaining segments in Self are all zeros -
+ if !allZero(segmentsSelf[i:]) {
+ //if not, it means that Self has to be greater than Other
+ return 1
+ }
+ break
+ }
+ lhs := segmentsSelf[i]
+ rhs := segmentsOther[i]
+ if lhs == rhs {
+ continue
+ } else if lhs < rhs {
+ return -1
+ }
+ // Otherwis, rhs was > lhs, they're not equal
+ return 1
+ }
+
+ // if we got this far, they're equal
+ return 0
+}
+
+func allZero(segs []int64) bool {
+ for _, s := range segs {
+ if s != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func comparePart(preSelf string, preOther string) int {
+ if preSelf == preOther {
+ return 0
+ }
+
+ var selfInt int64
+ selfNumeric := true
+ selfInt, err := strconv.ParseInt(preSelf, 10, 64)
+ if err != nil {
+ selfNumeric = false
+ }
+
+ var otherInt int64
+ otherNumeric := true
+ otherInt, err = strconv.ParseInt(preOther, 10, 64)
+ if err != nil {
+ otherNumeric = false
+ }
+
+ // if a part is empty, we use the other to decide
+ if preSelf == "" {
+ if otherNumeric {
+ return -1
+ }
+ return 1
+ }
+
+ if preOther == "" {
+ if selfNumeric {
+ return 1
+ }
+ return -1
+ }
+
+ if selfNumeric && !otherNumeric {
+ return -1
+ } else if !selfNumeric && otherNumeric {
+ return 1
+ } else if !selfNumeric && !otherNumeric && preSelf > preOther {
+ return 1
+ } else if selfInt > otherInt {
+ return 1
+ }
+
+ return -1
+}
+
+func comparePrereleases(v string, other string) int {
+ // the same pre release!
+ if v == other {
+ return 0
+ }
+
+ // split both pre releases for analyse their parts
+ selfPreReleaseMeta := strings.Split(v, ".")
+ otherPreReleaseMeta := strings.Split(other, ".")
+
+ selfPreReleaseLen := len(selfPreReleaseMeta)
+ otherPreReleaseLen := len(otherPreReleaseMeta)
+
+ biggestLen := otherPreReleaseLen
+ if selfPreReleaseLen > otherPreReleaseLen {
+ biggestLen = selfPreReleaseLen
+ }
+
+ // loop for parts to find the first difference
+ for i := 0; i < biggestLen; i = i + 1 {
+ partSelfPre := ""
+ if i < selfPreReleaseLen {
+ partSelfPre = selfPreReleaseMeta[i]
+ }
+
+ partOtherPre := ""
+ if i < otherPreReleaseLen {
+ partOtherPre = otherPreReleaseMeta[i]
+ }
+
+ compare := comparePart(partSelfPre, partOtherPre)
+ // if parts are equals, continue the loop
+ if compare != 0 {
+ return compare
+ }
+ }
+
+ return 0
+}
+
+// Core returns a new version constructed from only the MAJOR.MINOR.PATCH
+// segments of the version, without prerelease or metadata.
+func (v *Version) Core() *Version {
+ segments := v.Segments64()
+ segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2])
+ return Must(NewVersion(segmentsOnly))
+}
+
+// Equal tests if two versions are equal.
+func (v *Version) Equal(o *Version) bool {
+ if v == nil || o == nil {
+ return v == o
+ }
+
+ return v.Compare(o) == 0
+}
+
+// GreaterThan tests if this version is greater than another version.
+func (v *Version) GreaterThan(o *Version) bool {
+ return v.Compare(o) > 0
+}
+
+// GreaterThanOrEqual tests if this version is greater than or equal to another version.
+func (v *Version) GreaterThanOrEqual(o *Version) bool {
+ return v.Compare(o) >= 0
+}
+
+// LessThan tests if this version is less than another version.
+func (v *Version) LessThan(o *Version) bool {
+ return v.Compare(o) < 0
+}
+
+// LessThanOrEqual tests if this version is less than or equal to another version.
+func (v *Version) LessThanOrEqual(o *Version) bool {
+ return v.Compare(o) <= 0
+}
+
+// Metadata returns any metadata that was part of the version
+// string.
+//
+// Metadata is anything that comes after the "+" in the version.
+// For example, with "1.2.3+beta", the metadata is "beta".
+func (v *Version) Metadata() string {
+ return v.metadata
+}
+
+// Prerelease returns any prerelease data that is part of the version,
+// or blank if there is no prerelease data.
+//
+// Prerelease information is anything that comes after the "-" in the
+// version (but before any metadata). For example, with "1.2.3-beta",
+// the prerelease information is "beta".
+func (v *Version) Prerelease() string {
+ return v.pre
+}
+
+// Segments returns the numeric segments of the version as a slice of ints.
+//
+// This excludes any metadata or pre-release information. For example,
+// for a version "1.2.3-beta", segments will return a slice of
+// 1, 2, 3.
+func (v *Version) Segments() []int {
+ segmentSlice := make([]int, len(v.segments))
+ for i, v := range v.segments {
+ segmentSlice[i] = int(v)
+ }
+ return segmentSlice
+}
+
+// Segments64 returns the numeric segments of the version as a slice of int64s.
+//
+// This excludes any metadata or pre-release information. For example,
+// for a version "1.2.3-beta", segments will return a slice of
+// 1, 2, 3.
+func (v *Version) Segments64() []int64 {
+ result := make([]int64, len(v.segments))
+ copy(result, v.segments)
+ return result
+}
+
+// String returns the full version string included pre-release
+// and metadata information.
+//
+// This value is rebuilt according to the parsed segments and other
+// information. Therefore, ambiguities in the version string such as
+// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and
+// missing parts (1.0 => 1.0.0) will be made into a canonicalized form
+// as shown in the parenthesized examples.
+func (v *Version) String() string {
+ var buf bytes.Buffer
+ fmtParts := make([]string, len(v.segments))
+ for i, s := range v.segments {
+ // We can ignore err here since we've pre-parsed the values in segments
+ str := strconv.FormatInt(s, 10)
+ fmtParts[i] = str
+ }
+ fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
+ if v.pre != "" {
+ fmt.Fprintf(&buf, "-%s", v.pre)
+ }
+ if v.metadata != "" {
+ fmt.Fprintf(&buf, "+%s", v.metadata)
+ }
+
+ return buf.String()
+}
+
+// Original returns the original parsed version as-is, including any
+// potential whitespace, `v` prefix, etc.
+func (v *Version) Original() string {
+ return v.original
+}
diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go
new file mode 100644
index 000000000..cc888d43e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/version_collection.go
@@ -0,0 +1,17 @@
+package version
+
+// Collection is a type that implements the sort.Interface interface
+// so that versions can be sorted.
+type Collection []*Version
+
+func (v Collection) Len() int {
+ return len(v)
+}
+
+func (v Collection) Less(i, j int) bool {
+ return v[i].LessThan(v[j])
+}
+
+func (v Collection) Swap(i, j int) {
+ v[i], v[j] = v[j], v[i]
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 000000000..5f0d1fb6a
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 000000000..7a950d177
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 000000000..9d2d8a4ba
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 000000000..336142a5e
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,98 @@
+// +build windows
+// +build !go1.4
+
+package mousetrap
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // defined by the Win32 API
+ th32cs_snapprocess uintptr = 0x2
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
+ Process32First = kernel.MustFindProc("Process32FirstW")
+ Process32Next = kernel.MustFindProc("Process32NextW")
+)
+
+// ProcessEntry32 structure defined by the Win32 API
+type processEntry32 struct {
+ dwSize uint32
+ cntUsage uint32
+ th32ProcessID uint32
+ th32DefaultHeapID int
+ th32ModuleID uint32
+ cntThreads uint32
+ th32ParentProcessID uint32
+ pcPriClassBase int32
+ dwFlags uint32
+ szExeFile [syscall.MAX_PATH]uint16
+}
+
+func getProcessEntry(pid int) (pe *processEntry32, err error) {
+ snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
+ if snapshot == uintptr(syscall.InvalidHandle) {
+ err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
+ return
+ }
+ defer syscall.CloseHandle(syscall.Handle(snapshot))
+
+ var processEntry processEntry32
+ processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
+ ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32First: %v", e1)
+ return
+ }
+
+ for {
+ if processEntry.th32ProcessID == uint32(pid) {
+ pe = &processEntry
+ return
+ }
+
+ ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32Next: %v", e1)
+ return
+ }
+ }
+}
+
+func getppid() (pid int, err error) {
+ pe, err := getProcessEntry(os.Getpid())
+ if err != nil {
+ return
+ }
+
+ pid = int(pe.th32ParentProcessID)
+ return
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ ppid, err := getppid()
+ if err != nil {
+ return false
+ }
+
+ pe, err := getProcessEntry(ppid)
+ if err != nil {
+ return false
+ }
+
+ name := syscall.UTF16ToString(pe.szExeFile[:])
+ return name == "explorer.exe"
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
new file mode 100644
index 000000000..9a28e57c3
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
@@ -0,0 +1,46 @@
+// +build windows
+// +build go1.4
+
+package mousetrap
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(os.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 000000000..9159de03e
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+script:
+ - make check
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 000000000..835ba3e75
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 000000000..ce9d7cded
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test:
+ $(GO) test $(PKGS)
+
+vet: | test
+ $(GO) vet $(PKGS)
+
+staticcheck:
+ $(GO) get honnef.co/go/tools/cmd/staticcheck
+ staticcheck -checks all $(PKGS)
+
+misspell:
+ $(GO) get github.com/client9/misspell/cmd/misspell
+ misspell \
+ -locale GB \
+ -error \
+ *.md *.go
+
+unconvert:
+ $(GO) get github.com/mdempsky/unconvert
+ unconvert -v $(PKGS)
+
+ineffassign:
+ $(GO) get github.com/gordonklaus/ineffassign
+ find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+ $(GO) get mvdan.cc/unparam
+ unparam ./...
+
+errcheck:
+ $(GO) get github.com/kisielk/errcheck
+ errcheck $(PKGS)
+
+gofmt:
+ @echo Checking code is gofmted
+ @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 000000000..54dfdcb12
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,59 @@
+# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors) [](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
+## Contributing
+
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
+
+Before sending a PR, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 000000000..a932eade0
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 000000000..161aea258
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,288 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which when applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// together with the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error that does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported:
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively.
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface:
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// The returned errors.StackTrace type is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d\n", f, f)
+// }
+// }
+//
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 000000000..be0d10d0c
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+ stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+ return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 000000000..779a8348f
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,177 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ return fn.Name()
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s function name and path of source file relative to the compile time
+// GOPATH separated by \n\t (\n\t)
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ io.WriteString(s, f.name())
+ io.WriteString(s, "\n\t")
+ io.WriteString(s, f.file())
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ io.WriteString(s, strconv.Itoa(f.line()))
+ case 'n':
+ io.WriteString(s, funcname(f.name()))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+ name := f.name()
+ if name == "unknown" {
+ return []byte(name), nil
+ }
+ return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ io.WriteString(s, "\n")
+ f.Format(s, verb)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ st.formatSlice(s, verb)
+ }
+ case 's':
+ st.formatSlice(s, verb)
+ }
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+ io.WriteString(s, "[")
+ for i, f := range st {
+ if i > 0 {
+ io.WriteString(s, " ")
+ }
+ f.Format(s, verb)
+ }
+ io.WriteString(s, "]")
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
diff --git a/vendor/github.com/rs/zerolog/.gitignore b/vendor/github.com/rs/zerolog/.gitignore
new file mode 100644
index 000000000..8ebe58b15
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+tmp
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/rs/zerolog/CNAME b/vendor/github.com/rs/zerolog/CNAME
new file mode 100644
index 000000000..9ce57a6eb
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/CNAME
@@ -0,0 +1 @@
+zerolog.io
\ No newline at end of file
diff --git a/vendor/github.com/rs/zerolog/LICENSE b/vendor/github.com/rs/zerolog/LICENSE
new file mode 100644
index 000000000..677e07f7a
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Olivier Poitrey
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/rs/zerolog/README.md b/vendor/github.com/rs/zerolog/README.md
new file mode 100644
index 000000000..36ed93bbc
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/README.md
@@ -0,0 +1,693 @@
+# Zero Allocation JSON Logger
+
+[](https://godoc.org/github.com/rs/zerolog) [](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [](https://travis-ci.org/rs/zerolog) [](http://gocover.io/github.com/rs/zerolog)
+
+The zerolog package provides a fast and simple logger dedicated to JSON output.
+
+Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection.
+
+Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance.
+
+To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging).
+
+
+
+## Who uses zerolog
+
+Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list.
+
+## Features
+
+* [Blazing fast](#benchmarks)
+* [Low to zero allocation](#benchmarks)
+* [Leveled logging](#leveled-logging)
+* [Sampling](#log-sampling)
+* [Hooks](#hooks)
+* [Contextual fields](#contextual-logging)
+* `context.Context` integration
+* [Integration with `net/http`](#integration-with-nethttp)
+* [JSON and CBOR encoding formats](#binary-encoding)
+* [Pretty logging for development](#pretty-logging)
+* [Error Logging (with optional Stacktrace)](#error-logging)
+
+## Installation
+
+```bash
+go get -u github.com/rs/zerolog/log
+```
+
+## Getting Started
+
+### Simple Logging Example
+
+For simple logging, import the global logger package **github.com/rs/zerolog/log**
+
+```go
+package main
+
+import (
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+)
+
+func main() {
+ // UNIX Time is faster and smaller than most timestamps
+ zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
+
+ log.Print("hello world")
+}
+
+// Output: {"time":1516134303,"level":"debug","message":"hello world"}
+```
+> Note: By default log writes to `os.Stderr`
+> Note: The default log level for `log.Print` is *debug*
+
+### Contextual Logging
+
+**zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below:
+
+```go
+package main
+
+import (
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+)
+
+func main() {
+ zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
+
+ log.Debug().
+ Str("Scale", "833 cents").
+ Float64("Interval", 833.09).
+ Msg("Fibonacci is everywhere")
+
+ log.Debug().
+ Str("Name", "Tom").
+ Send()
+}
+
+// Output: {"level":"debug","Scale":"833 cents","Interval":833.09,"time":1562212768,"message":"Fibonacci is everywhere"}
+// Output: {"level":"debug","Name":"Tom","time":1562212768}
+```
+
+> You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types)
+
+### Leveled Logging
+
+#### Simple Leveled Logging Example
+
+```go
+package main
+
+import (
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+)
+
+func main() {
+ zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
+
+ log.Info().Msg("hello world")
+}
+
+// Output: {"time":1516134303,"level":"info","message":"hello world"}
+```
+
+> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this.
+
+**zerolog** allows for logging at the following levels (from highest to lowest):
+
+* panic (`zerolog.PanicLevel`, 5)
+* fatal (`zerolog.FatalLevel`, 4)
+* error (`zerolog.ErrorLevel`, 3)
+* warn (`zerolog.WarnLevel`, 2)
+* info (`zerolog.InfoLevel`, 1)
+* debug (`zerolog.DebugLevel`, 0)
+* trace (`zerolog.TraceLevel`, -1)
+
+You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level. Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant.
+
+#### Setting Global Log Level
+
+This example uses command-line flags to demonstrate various outputs depending on the chosen log level.
+
+```go
+package main
+
+import (
+ "flag"
+
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+)
+
+func main() {
+ zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
+ debug := flag.Bool("debug", false, "sets log level to debug")
+
+ flag.Parse()
+
+ // Default level for this example is info, unless debug flag is present
+ zerolog.SetGlobalLevel(zerolog.InfoLevel)
+ if *debug {
+ zerolog.SetGlobalLevel(zerolog.DebugLevel)
+ }
+
+ log.Debug().Msg("This message appears only when log level set to Debug")
+ log.Info().Msg("This message appears when log level set to Debug or Info")
+
+ if e := log.Debug(); e.Enabled() {
+ // Compute log output only if enabled.
+ value := "bar"
+ e.Str("foo", value).Msg("some debug message")
+ }
+}
+```
+
+Info Output (no flag)
+
+```bash
+$ ./logLevelExample
+{"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"}
+```
+
+Debug Output (debug flag set)
+
+```bash
+$ ./logLevelExample -debug
+{"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"}
+{"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"}
+{"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"}
+```
+
+#### Logging without Level or Message
+
+You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below.
+
+```go
+package main
+
+import (
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+)
+
+func main() {
+ zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
+
+ log.Log().
+ Str("foo", "bar").
+ Msg("")
+}
+
+// Output: {"time":1494567715,"foo":"bar"}
+```
+
+### Error Logging
+
+You can log errors using the `Err` method
+
+```go
+package main
+
+import (
+ "errors"
+
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+)
+
+func main() {
+ zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
+
+ err := errors.New("seems we have an error here")
+ log.Error().Err(err).Msg("")
+}
+
+// Output: {"level":"error","error":"seems we have an error here","time":1609085256}
+```
+
+> The default field name for errors is `error`, you can change this by setting `zerolog.ErrorFieldName` to meet your needs.
+
+#### Error Logging with Stacktrace
+
+Using `github.com/pkg/errors`, you can add a formatted stacktrace to your errors.
+
+```go
+package main
+
+import (
+ "github.com/pkg/errors"
+ "github.com/rs/zerolog/pkgerrors"
+
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+)
+
+func main() {
+ zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
+ zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
+
+ err := outer()
+ log.Error().Stack().Err(err).Msg("")
+}
+
+func inner() error {
+ return errors.New("seems we have an error here")
+}
+
+func middle() error {
+ err := inner()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func outer() error {
+ err := middle()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Output: {"level":"error","stack":[{"func":"inner","line":"20","source":"errors.go"},{"func":"middle","line":"24","source":"errors.go"},{"func":"outer","line":"32","source":"errors.go"},{"func":"main","line":"15","source":"errors.go"},{"func":"main","line":"204","source":"proc.go"},{"func":"goexit","line":"1374","source":"asm_amd64.s"}],"error":"seems we have an error here","time":1609086683}
+```
+
+> zerolog.ErrorStackMarshaler must be set in order for the stack to output anything.
+
+#### Logging Fatal Messages
+
+```go
+package main
+
+import (
+ "errors"
+
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+)
+
+func main() {
+ err := errors.New("A repo man spends his life getting into tense situations")
+ service := "myservice"
+
+ zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
+
+ log.Fatal().
+ Err(err).
+ Str("service", service).
+ Msgf("Cannot start %s", service)
+}
+
+// Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"}
+// exit status 1
+```
+
+> NOTE: Using `Msgf` generates one allocation even when the logger is disabled.
+
+
+### Create logger instance to manage different outputs
+
+```go
+logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
+
+logger.Info().Str("foo", "bar").Msg("hello world")
+
+// Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"}
+```
+
+### Sub-loggers let you chain loggers with additional context
+
+```go
+sublogger := log.With().
+ Str("component", "foo").
+ Logger()
+sublogger.Info().Msg("hello world")
+
+// Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"}
+```
+
+### Pretty logging
+
+To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`:
+
+```go
+log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
+
+log.Info().Str("foo", "bar").Msg("Hello world")
+
+// Output: 3:04PM INF Hello World foo=bar
+```
+
+To customize the configuration and formatting:
+
+```go
+output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339}
+output.FormatLevel = func(i interface{}) string {
+ return strings.ToUpper(fmt.Sprintf("| %-6s|", i))
+}
+output.FormatMessage = func(i interface{}) string {
+ return fmt.Sprintf("***%s****", i)
+}
+output.FormatFieldName = func(i interface{}) string {
+ return fmt.Sprintf("%s:", i)
+}
+output.FormatFieldValue = func(i interface{}) string {
+ return strings.ToUpper(fmt.Sprintf("%s", i))
+}
+
+log := zerolog.New(output).With().Timestamp().Logger()
+
+log.Info().Str("foo", "bar").Msg("Hello World")
+
+// Output: 2006-01-02T15:04:05Z07:00 | INFO | ***Hello World**** foo:BAR
+```
+
+### Sub dictionary
+
+```go
+log.Info().
+ Str("foo", "bar").
+ Dict("dict", zerolog.Dict().
+ Str("bar", "baz").
+ Int("n", 1),
+ ).Msg("hello world")
+
+// Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"}
+```
+
+### Customize automatic field names
+
+```go
+zerolog.TimestampFieldName = "t"
+zerolog.LevelFieldName = "l"
+zerolog.MessageFieldName = "m"
+
+log.Info().Msg("hello world")
+
+// Output: {"l":"info","t":1494567715,"m":"hello world"}
+```
+
+### Add contextual fields to the global logger
+
+```go
+log.Logger = log.With().Str("foo", "bar").Logger()
+```
+
+### Add file and line number to log
+
+```go
+log.Logger = log.With().Caller().Logger()
+log.Info().Msg("hello world")
+
+// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"}
+```
+
+
+### Thread-safe, lock-free, non-blocking writer
+
+If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follow:
+
+```go
+wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) {
+ fmt.Printf("Logger Dropped %d messages", missed)
+ })
+log := zerolog.New(wr)
+log.Print("test")
+```
+
+You will need to install `code.cloudfoundry.org/go-diodes` to use this feature.
+
+### Log Sampling
+
+```go
+sampled := log.Sample(&zerolog.BasicSampler{N: 10})
+sampled.Info().Msg("will be logged every 10 messages")
+
+// Output: {"time":1494567715,"level":"info","message":"will be logged every 10 messages"}
+```
+
+More advanced sampling:
+
+```go
+// Will let 5 debug messages per period of 1 second.
+// Over 5 debug message, 1 every 100 debug messages are logged.
+// Other levels are not sampled.
+sampled := log.Sample(zerolog.LevelSampler{
+ DebugSampler: &zerolog.BurstSampler{
+ Burst: 5,
+ Period: 1*time.Second,
+ NextSampler: &zerolog.BasicSampler{N: 100},
+ },
+})
+sampled.Debug().Msg("hello world")
+
+// Output: {"time":1494567715,"level":"debug","message":"hello world"}
+```
+
+### Hooks
+
+```go
+type SeverityHook struct{}
+
+func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
+ if level != zerolog.NoLevel {
+ e.Str("severity", level.String())
+ }
+}
+
+hooked := log.Hook(SeverityHook{})
+hooked.Warn().Msg("")
+
+// Output: {"level":"warn","severity":"warn"}
+```
+
+### Pass a sub-logger by context
+
+```go
+ctx := log.With().Str("component", "module").Logger().WithContext(ctx)
+
+log.Ctx(ctx).Info().Msg("hello world")
+
+// Output: {"component":"module","level":"info","message":"hello world"}
+```
+
+### Set as standard logger output
+
+```go
+log := zerolog.New(os.Stdout).With().
+ Str("foo", "bar").
+ Logger()
+
+stdlog.SetFlags(0)
+stdlog.SetOutput(log)
+
+stdlog.Print("hello world")
+
+// Output: {"foo":"bar","message":"hello world"}
+```
+
+### Integration with `net/http`
+
+The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`.
+
+In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability.
+
+```go
+log := zerolog.New(os.Stdout).With().
+ Timestamp().
+ Str("role", "my-service").
+ Str("host", host).
+ Logger()
+
+c := alice.New()
+
+// Install the logger handler with default output on the console
+c = c.Append(hlog.NewHandler(log))
+
+// Install some provided extra handler to set some request's context fields.
+// Thanks to that handler, all our logs will come with some prepopulated fields.
+c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) {
+ hlog.FromRequest(r).Info().
+ Str("method", r.Method).
+ Stringer("url", r.URL).
+ Int("status", status).
+ Int("size", size).
+ Dur("duration", duration).
+ Msg("")
+}))
+c = c.Append(hlog.RemoteAddrHandler("ip"))
+c = c.Append(hlog.UserAgentHandler("user_agent"))
+c = c.Append(hlog.RefererHandler("referer"))
+c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id"))
+
+// Here is your final handler
+h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Get the logger from the request's context. You can safely assume it
+ // will be always there: if the handler is removed, hlog.FromRequest
+ // will return a no-op logger.
+ hlog.FromRequest(r).Info().
+ Str("user", "current user").
+ Str("status", "ok").
+ Msg("Something happened")
+
+ // Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"}
+}))
+http.Handle("/", h)
+
+if err := http.ListenAndServe(":8080", nil); err != nil {
+ log.Fatal().Err(err).Msg("Startup failed")
+}
+```
+
+## Multiple Log Output
+`zerolog.MultiLevelWriter` may be used to send the log message to multiple outputs.
+In this example, we send the log message to both `os.Stdout` and the in-built ConsoleWriter.
+```go
+func main() {
+ consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout}
+
+ multi := zerolog.MultiLevelWriter(consoleWriter, os.Stdout)
+
+ logger := zerolog.New(multi).With().Timestamp().Logger()
+
+ logger.Info().Msg("Hello World!")
+}
+
+// Output (Line 1: Console; Line 2: Stdout)
+// 12:36PM INF Hello World!
+// {"level":"info","time":"2019-11-07T12:36:38+03:00","message":"Hello World!"}
+```
+
+## Global Settings
+
+Some settings can be changed and will by applied to all loggers:
+
+* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods).
+* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode).
+* `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events.
+* `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name.
+* `zerolog.LevelFieldName`: Can be set to customize level field name.
+* `zerolog.MessageFieldName`: Can be set to customize message field name.
+* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name.
+* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp.
+* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`).
+* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`).
+* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking.
+
+## Field Types
+
+### Standard Types
+
+* `Str`
+* `Bool`
+* `Int`, `Int8`, `Int16`, `Int32`, `Int64`
+* `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64`
+* `Float32`, `Float64`
+
+### Advanced Fields
+
+* `Err`: Takes an `error` and renders it as a string using the `zerolog.ErrorFieldName` field name.
+* `Timestamp`: Inserts a timestamp field with `zerolog.TimestampFieldName` field name, formatted using `zerolog.TimeFieldFormat`.
+* `Time`: Adds a field with time formatted with `zerolog.TimeFieldFormat`.
+* `Dur`: Adds a field with `time.Duration`.
+* `Dict`: Adds a sub-key/value as a field of the event.
+* `RawJSON`: Adds a field with an already encoded JSON (`[]byte`)
+* `Hex`: Adds a field with value formatted as a hexadecimal string (`[]byte`)
+* `Interface`: Uses reflection to marshal the type.
+
+Most fields are also available in the slice format (`Strs` for `[]string`, `Errs` for `[]error` etc.)
+
+## Binary Encoding
+
+In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](http://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows:
+
+```bash
+go build -tags binary_log .
+```
+
+To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work
+with zerolog library is [CSD](https://github.com/toravir/csd/).
+
+## Related Projects
+
+* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog`
+* [overlog](https://github.com/Trendyol/overlog): Implementation of `Mapped Diagnostic Context` interface using `zerolog`
+
+## Benchmarks
+
+See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks.
+
+All operations are allocation free (those numbers *include* JSON encoding):
+
+```text
+BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op
+BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op
+BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op
+BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op
+BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op
+```
+
+There are a few Go logging benchmarks and comparisons that include zerolog.
+
+* [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench)
+* [uber-common/zap](https://github.com/uber-go/zap#performance)
+
+Using Uber's zap comparison benchmark:
+
+Log a message and 10 fields:
+
+| Library | Time | Bytes Allocated | Objects Allocated |
+| :--- | :---: | :---: | :---: |
+| zerolog | 767 ns/op | 552 B/op | 6 allocs/op |
+| :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op |
+| :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op |
+| go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op |
+| lion | 5392 ns/op | 5807 B/op | 63 allocs/op |
+| logrus | 5661 ns/op | 6092 B/op | 78 allocs/op |
+| apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op |
+| log15 | 20657 ns/op | 5632 B/op | 93 allocs/op |
+
+Log a message with a logger that already has 10 fields of context:
+
+| Library | Time | Bytes Allocated | Objects Allocated |
+| :--- | :---: | :---: | :---: |
+| zerolog | 52 ns/op | 0 B/op | 0 allocs/op |
+| :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op |
+| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op |
+| lion | 2702 ns/op | 4074 B/op | 38 allocs/op |
+| go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op |
+| logrus | 4309 ns/op | 4564 B/op | 63 allocs/op |
+| apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op |
+| log15 | 14179 ns/op | 2642 B/op | 44 allocs/op |
+
+Log a static string, without any context or `printf`-style templating:
+
+| Library | Time | Bytes Allocated | Objects Allocated |
+| :--- | :---: | :---: | :---: |
+| zerolog | 50 ns/op | 0 B/op | 0 allocs/op |
+| :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op |
+| standard library | 453 ns/op | 80 B/op | 2 allocs/op |
+| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op |
+| go-kit | 508 ns/op | 656 B/op | 13 allocs/op |
+| lion | 771 ns/op | 1224 B/op | 10 allocs/op |
+| logrus | 1244 ns/op | 1505 B/op | 27 allocs/op |
+| apex/log | 2751 ns/op | 584 B/op | 11 allocs/op |
+| log15 | 5181 ns/op | 1592 B/op | 26 allocs/op |
+
+## Caveats
+
+Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON:
+
+```go
+logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
+logger.Info().
+ Timestamp().
+ Msg("dup")
+// Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"}
+```
+
+In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt.
diff --git a/vendor/github.com/rs/zerolog/_config.yml b/vendor/github.com/rs/zerolog/_config.yml
new file mode 100644
index 000000000..a1e896d7b
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/_config.yml
@@ -0,0 +1 @@
+remote_theme: rs/gh-readme
diff --git a/vendor/github.com/rs/zerolog/array.go b/vendor/github.com/rs/zerolog/array.go
new file mode 100644
index 000000000..0f7f53eed
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/array.go
@@ -0,0 +1,233 @@
+package zerolog
+
+import (
+ "net"
+ "sync"
+ "time"
+)
+
+var arrayPool = &sync.Pool{
+ New: func() interface{} {
+ return &Array{
+ buf: make([]byte, 0, 500),
+ }
+ },
+}
+
+// Array is used to prepopulate an array of items
+// which can be re-used to add to log messages.
+type Array struct {
+ buf []byte
+}
+
+func putArray(a *Array) {
+ // Proper usage of a sync.Pool requires each entry to have approximately
+ // the same memory cost. To obtain this property when the stored type
+ // contains a variably-sized buffer, we add a hard limit on the maximum buffer
+ // to place back in the pool.
+ //
+ // See https://golang.org/issue/23199
+ const maxSize = 1 << 16 // 64KiB
+ if cap(a.buf) > maxSize {
+ return
+ }
+ arrayPool.Put(a)
+}
+
+// Arr creates an array to be added to an Event or Context.
+func Arr() *Array {
+ a := arrayPool.Get().(*Array)
+ a.buf = a.buf[:0]
+ return a
+}
+
+// MarshalZerologArray method here is no-op - since data is
+// already in the needed format.
+func (*Array) MarshalZerologArray(*Array) {
+}
+
+func (a *Array) write(dst []byte) []byte {
+ dst = enc.AppendArrayStart(dst)
+ if len(a.buf) > 0 {
+ dst = append(append(dst, a.buf...))
+ }
+ dst = enc.AppendArrayEnd(dst)
+ putArray(a)
+ return dst
+}
+
+// Object marshals an object that implement the LogObjectMarshaler
+// interface and append append it to the array.
+func (a *Array) Object(obj LogObjectMarshaler) *Array {
+ e := Dict()
+ obj.MarshalZerologObject(e)
+ e.buf = enc.AppendEndMarker(e.buf)
+ a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...)
+ putEvent(e)
+ return a
+}
+
+// Str append append the val as a string to the array.
+func (a *Array) Str(val string) *Array {
+ a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val)
+ return a
+}
+
+// Bytes append append the val as a string to the array.
+func (a *Array) Bytes(val []byte) *Array {
+ a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val)
+ return a
+}
+
+// Hex append append the val as a hex string to the array.
+func (a *Array) Hex(val []byte) *Array {
+ a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val)
+ return a
+}
+
+// RawJSON adds already encoded JSON to the array.
+func (a *Array) RawJSON(val []byte) *Array {
+ a.buf = appendJSON(enc.AppendArrayDelim(a.buf), val)
+ return a
+}
+
+// Err serializes and appends the err to the array.
+func (a *Array) Err(err error) *Array {
+ switch m := ErrorMarshalFunc(err).(type) {
+ case LogObjectMarshaler:
+ e := newEvent(nil, 0)
+ e.buf = e.buf[:0]
+ e.appendObject(m)
+ a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...)
+ putEvent(e)
+ case error:
+ if m == nil || isNilValue(m) {
+ a.buf = enc.AppendNil(enc.AppendArrayDelim(a.buf))
+ } else {
+ a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error())
+ }
+ case string:
+ a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m)
+ default:
+ a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m)
+ }
+
+ return a
+}
+
+// Bool append append the val as a bool to the array.
+func (a *Array) Bool(b bool) *Array {
+ a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b)
+ return a
+}
+
+// Int append append i as a int to the array.
+func (a *Array) Int(i int) *Array {
+ a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// Int8 append append i as a int8 to the array.
+func (a *Array) Int8(i int8) *Array {
+ a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// Int16 append append i as a int16 to the array.
+func (a *Array) Int16(i int16) *Array {
+ a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// Int32 append append i as a int32 to the array.
+func (a *Array) Int32(i int32) *Array {
+ a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// Int64 append append i as a int64 to the array.
+func (a *Array) Int64(i int64) *Array {
+ a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// Uint append append i as a uint to the array.
+func (a *Array) Uint(i uint) *Array {
+ a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// Uint8 append append i as a uint8 to the array.
+func (a *Array) Uint8(i uint8) *Array {
+ a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// Uint16 append append i as a uint16 to the array.
+func (a *Array) Uint16(i uint16) *Array {
+ a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// Uint32 append append i as a uint32 to the array.
+func (a *Array) Uint32(i uint32) *Array {
+ a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// Uint64 append append i as a uint64 to the array.
+func (a *Array) Uint64(i uint64) *Array {
+ a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// Float32 append append f as a float32 to the array.
+func (a *Array) Float32(f float32) *Array {
+ a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f)
+ return a
+}
+
+// Float64 append append f as a float64 to the array.
+func (a *Array) Float64(f float64) *Array {
+ a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f)
+ return a
+}
+
+// Time append append t formated as string using zerolog.TimeFieldFormat.
+func (a *Array) Time(t time.Time) *Array {
+ a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat)
+ return a
+}
+
+// Dur append append d to the array.
+func (a *Array) Dur(d time.Duration) *Array {
+ a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger)
+ return a
+}
+
+// Interface append append i marshaled using reflection.
+func (a *Array) Interface(i interface{}) *Array {
+ if obj, ok := i.(LogObjectMarshaler); ok {
+ return a.Object(obj)
+ }
+ a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i)
+ return a
+}
+
+// IPAddr adds IPv4 or IPv6 address to the array
+func (a *Array) IPAddr(ip net.IP) *Array {
+ a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip)
+ return a
+}
+
+// IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array
+func (a *Array) IPPrefix(pfx net.IPNet) *Array {
+ a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx)
+ return a
+}
+
+// MACAddr adds a MAC (Ethernet) address to the array
+func (a *Array) MACAddr(ha net.HardwareAddr) *Array {
+ a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha)
+ return a
+}
diff --git a/vendor/github.com/rs/zerolog/console.go b/vendor/github.com/rs/zerolog/console.go
new file mode 100644
index 000000000..fd0cad440
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/console.go
@@ -0,0 +1,409 @@
+package zerolog
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ colorBlack = iota + 30
+ colorRed
+ colorGreen
+ colorYellow
+ colorBlue
+ colorMagenta
+ colorCyan
+ colorWhite
+
+ colorBold = 1
+ colorDarkGray = 90
+)
+
+var (
+ consoleBufPool = sync.Pool{
+ New: func() interface{} {
+ return bytes.NewBuffer(make([]byte, 0, 100))
+ },
+ }
+)
+
+const (
+ consoleDefaultTimeFormat = time.Kitchen
+)
+
+// Formatter transforms the input into a formatted string.
+type Formatter func(interface{}) string
+
+// ConsoleWriter parses the JSON input and writes it in an
+// (optionally) colorized, human-friendly format to Out.
+type ConsoleWriter struct {
+ // Out is the output destination.
+ Out io.Writer
+
+ // NoColor disables the colorized output.
+ NoColor bool
+
+ // TimeFormat specifies the format for timestamp in output.
+ TimeFormat string
+
+ // PartsOrder defines the order of parts in output.
+ PartsOrder []string
+
+ // PartsExclude defines parts to not display in output.
+ PartsExclude []string
+
+ FormatTimestamp Formatter
+ FormatLevel Formatter
+ FormatCaller Formatter
+ FormatMessage Formatter
+ FormatFieldName Formatter
+ FormatFieldValue Formatter
+ FormatErrFieldName Formatter
+ FormatErrFieldValue Formatter
+}
+
+// NewConsoleWriter creates and initializes a new ConsoleWriter.
+func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter {
+ w := ConsoleWriter{
+ Out: os.Stdout,
+ TimeFormat: consoleDefaultTimeFormat,
+ PartsOrder: consoleDefaultPartsOrder(),
+ }
+
+ for _, opt := range options {
+ opt(&w)
+ }
+
+ return w
+}
+
+// Write transforms the JSON input with formatters and appends to w.Out.
+func (w ConsoleWriter) Write(p []byte) (n int, err error) {
+ if w.PartsOrder == nil {
+ w.PartsOrder = consoleDefaultPartsOrder()
+ }
+
+ var buf = consoleBufPool.Get().(*bytes.Buffer)
+ defer func() {
+ buf.Reset()
+ consoleBufPool.Put(buf)
+ }()
+
+ var evt map[string]interface{}
+ p = decodeIfBinaryToBytes(p)
+ d := json.NewDecoder(bytes.NewReader(p))
+ d.UseNumber()
+ err = d.Decode(&evt)
+ if err != nil {
+ return n, fmt.Errorf("cannot decode event: %s", err)
+ }
+
+ for _, p := range w.PartsOrder {
+ w.writePart(buf, evt, p)
+ }
+
+ w.writeFields(evt, buf)
+
+ err = buf.WriteByte('\n')
+ if err != nil {
+ return n, err
+ }
+ _, err = buf.WriteTo(w.Out)
+ return len(p), err
+}
+
+// writeFields appends formatted key-value pairs to buf.
+func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) {
+ var fields = make([]string, 0, len(evt))
+ for field := range evt {
+ switch field {
+ case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName:
+ continue
+ }
+ fields = append(fields, field)
+ }
+ sort.Strings(fields)
+
+ if len(fields) > 0 {
+ buf.WriteByte(' ')
+ }
+
+ // Move the "error" field to the front
+ ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName })
+ if ei < len(fields) && fields[ei] == ErrorFieldName {
+ fields[ei] = ""
+ fields = append([]string{ErrorFieldName}, fields...)
+ var xfields = make([]string, 0, len(fields))
+ for _, field := range fields {
+ if field == "" { // Skip empty fields
+ continue
+ }
+ xfields = append(xfields, field)
+ }
+ fields = xfields
+ }
+
+ for i, field := range fields {
+ var fn Formatter
+ var fv Formatter
+
+ if field == ErrorFieldName {
+ if w.FormatErrFieldName == nil {
+ fn = consoleDefaultFormatErrFieldName(w.NoColor)
+ } else {
+ fn = w.FormatErrFieldName
+ }
+
+ if w.FormatErrFieldValue == nil {
+ fv = consoleDefaultFormatErrFieldValue(w.NoColor)
+ } else {
+ fv = w.FormatErrFieldValue
+ }
+ } else {
+ if w.FormatFieldName == nil {
+ fn = consoleDefaultFormatFieldName(w.NoColor)
+ } else {
+ fn = w.FormatFieldName
+ }
+
+ if w.FormatFieldValue == nil {
+ fv = consoleDefaultFormatFieldValue
+ } else {
+ fv = w.FormatFieldValue
+ }
+ }
+
+ buf.WriteString(fn(field))
+
+ switch fValue := evt[field].(type) {
+ case string:
+ if needsQuote(fValue) {
+ buf.WriteString(fv(strconv.Quote(fValue)))
+ } else {
+ buf.WriteString(fv(fValue))
+ }
+ case json.Number:
+ buf.WriteString(fv(fValue))
+ default:
+ b, err := json.Marshal(fValue)
+ if err != nil {
+ fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err)
+ } else {
+ fmt.Fprint(buf, fv(b))
+ }
+ }
+
+ if i < len(fields)-1 { // Skip space for last field
+ buf.WriteByte(' ')
+ }
+ }
+}
+
+// writePart appends a formatted part to buf.
+func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) {
+ var f Formatter
+
+ if w.PartsExclude != nil && len(w.PartsExclude) > 0 {
+ for _, exclude := range w.PartsExclude {
+ if exclude == p {
+ return
+ }
+ }
+ }
+
+ switch p {
+ case LevelFieldName:
+ if w.FormatLevel == nil {
+ f = consoleDefaultFormatLevel(w.NoColor)
+ } else {
+ f = w.FormatLevel
+ }
+ case TimestampFieldName:
+ if w.FormatTimestamp == nil {
+ f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor)
+ } else {
+ f = w.FormatTimestamp
+ }
+ case MessageFieldName:
+ if w.FormatMessage == nil {
+ f = consoleDefaultFormatMessage
+ } else {
+ f = w.FormatMessage
+ }
+ case CallerFieldName:
+ if w.FormatCaller == nil {
+ f = consoleDefaultFormatCaller(w.NoColor)
+ } else {
+ f = w.FormatCaller
+ }
+ default:
+ if w.FormatFieldValue == nil {
+ f = consoleDefaultFormatFieldValue
+ } else {
+ f = w.FormatFieldValue
+ }
+ }
+
+ var s = f(evt[p])
+
+ if len(s) > 0 {
+ buf.WriteString(s)
+ if p != w.PartsOrder[len(w.PartsOrder)-1] { // Skip space for last part
+ buf.WriteByte(' ')
+ }
+ }
+}
+
+// needsQuote returns true when the string s should be quoted in output.
+func needsQuote(s string) bool {
+ for i := range s {
+ if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' {
+ return true
+ }
+ }
+ return false
+}
+
+// colorize returns the string s wrapped in ANSI code c, unless disabled is true.
+func colorize(s interface{}, c int, disabled bool) string {
+ if disabled {
+ return fmt.Sprintf("%s", s)
+ }
+ return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s)
+}
+
+// ----- DEFAULT FORMATTERS ---------------------------------------------------
+
+func consoleDefaultPartsOrder() []string {
+ return []string{
+ TimestampFieldName,
+ LevelFieldName,
+ CallerFieldName,
+ MessageFieldName,
+ }
+}
+
+func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter {
+ if timeFormat == "" {
+ timeFormat = consoleDefaultTimeFormat
+ }
+ return func(i interface{}) string {
+ t := ""
+ switch tt := i.(type) {
+ case string:
+ ts, err := time.Parse(TimeFieldFormat, tt)
+ if err != nil {
+ t = tt
+ } else {
+ t = ts.Format(timeFormat)
+ }
+ case json.Number:
+ i, err := tt.Int64()
+ if err != nil {
+ t = tt.String()
+ } else {
+ var sec, nsec int64 = i, 0
+ switch TimeFieldFormat {
+ case TimeFormatUnixMs:
+ nsec = int64(time.Duration(i) * time.Millisecond)
+ sec = 0
+ case TimeFormatUnixMicro:
+ nsec = int64(time.Duration(i) * time.Microsecond)
+ sec = 0
+ }
+ ts := time.Unix(sec, nsec).UTC()
+ t = ts.Format(timeFormat)
+ }
+ }
+ return colorize(t, colorDarkGray, noColor)
+ }
+}
+
+func consoleDefaultFormatLevel(noColor bool) Formatter {
+ return func(i interface{}) string {
+ var l string
+ if ll, ok := i.(string); ok {
+ switch ll {
+ case "trace":
+ l = colorize("TRC", colorMagenta, noColor)
+ case "debug":
+ l = colorize("DBG", colorYellow, noColor)
+ case "info":
+ l = colorize("INF", colorGreen, noColor)
+ case "warn":
+ l = colorize("WRN", colorRed, noColor)
+ case "error":
+ l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor)
+ case "fatal":
+ l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor)
+ case "panic":
+ l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor)
+ default:
+ l = colorize("???", colorBold, noColor)
+ }
+ } else {
+ if i == nil {
+ l = colorize("???", colorBold, noColor)
+ } else {
+ l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3]
+ }
+ }
+ return l
+ }
+}
+
+func consoleDefaultFormatCaller(noColor bool) Formatter {
+ return func(i interface{}) string {
+ var c string
+ if cc, ok := i.(string); ok {
+ c = cc
+ }
+ if len(c) > 0 {
+ if cwd, err := os.Getwd(); err == nil {
+ if rel, err := filepath.Rel(cwd, c); err == nil {
+ c = rel
+ }
+ }
+ c = colorize(c, colorBold, noColor) + colorize(" >", colorCyan, noColor)
+ }
+ return c
+ }
+}
+
+func consoleDefaultFormatMessage(i interface{}) string {
+ if i == nil {
+ return ""
+ }
+ return fmt.Sprintf("%s", i)
+}
+
+func consoleDefaultFormatFieldName(noColor bool) Formatter {
+ return func(i interface{}) string {
+ return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor)
+ }
+}
+
+func consoleDefaultFormatFieldValue(i interface{}) string {
+ return fmt.Sprintf("%s", i)
+}
+
+func consoleDefaultFormatErrFieldName(noColor bool) Formatter {
+ return func(i interface{}) string {
+ return colorize(fmt.Sprintf("%s=", i), colorRed, noColor)
+ }
+}
+
+func consoleDefaultFormatErrFieldValue(noColor bool) Formatter {
+ return func(i interface{}) string {
+ return colorize(fmt.Sprintf("%s", i), colorRed, noColor)
+ }
+}
diff --git a/vendor/github.com/rs/zerolog/context.go b/vendor/github.com/rs/zerolog/context.go
new file mode 100644
index 000000000..7cdd8cc2a
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/context.go
@@ -0,0 +1,431 @@
+package zerolog
+
+import (
+ "fmt"
+ "io/ioutil"
+ "math"
+ "net"
+ "time"
+)
+
+// Context configures a new sub-logger with contextual fields.
+type Context struct {
+ l Logger
+}
+
+// Logger returns the logger with the context previously set.
+func (c Context) Logger() Logger {
+ return c.l
+}
+
+// Fields is a helper function to use a map to set fields using type assertion.
+func (c Context) Fields(fields map[string]interface{}) Context {
+ c.l.context = appendFields(c.l.context, fields)
+ return c
+}
+
+// Dict adds the field key with the dict to the logger context.
+func (c Context) Dict(key string, dict *Event) Context {
+ dict.buf = enc.AppendEndMarker(dict.buf)
+ c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...)
+ putEvent(dict)
+ return c
+}
+
+// Array adds the field key with an array to the event context.
+// Use zerolog.Arr() to create the array or pass a type that
+// implement the LogArrayMarshaler interface.
+func (c Context) Array(key string, arr LogArrayMarshaler) Context {
+ c.l.context = enc.AppendKey(c.l.context, key)
+ if arr, ok := arr.(*Array); ok {
+ c.l.context = arr.write(c.l.context)
+ return c
+ }
+ var a *Array
+ if aa, ok := arr.(*Array); ok {
+ a = aa
+ } else {
+ a = Arr()
+ arr.MarshalZerologArray(a)
+ }
+ c.l.context = a.write(c.l.context)
+ return c
+}
+
+// Object marshals an object that implement the LogObjectMarshaler interface.
+func (c Context) Object(key string, obj LogObjectMarshaler) Context {
+ e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
+ e.Object(key, obj)
+ c.l.context = enc.AppendObjectData(c.l.context, e.buf)
+ putEvent(e)
+ return c
+}
+
+// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface.
+func (c Context) EmbedObject(obj LogObjectMarshaler) Context {
+ e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
+ e.EmbedObject(obj)
+ c.l.context = enc.AppendObjectData(c.l.context, e.buf)
+ putEvent(e)
+ return c
+}
+
+// Str adds the field key with val as a string to the logger context.
+func (c Context) Str(key, val string) Context {
+ c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val)
+ return c
+}
+
+// Strs adds the field key with val as a string to the logger context.
+func (c Context) Strs(key string, vals []string) Context {
+ c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals)
+ return c
+}
+
+// Stringer adds the field key with val.String() (or null if val is nil) to the logger context.
+func (c Context) Stringer(key string, val fmt.Stringer) Context {
+ if val != nil {
+ c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val.String())
+ return c
+ }
+
+ c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), nil)
+ return c
+}
+
+// Bytes adds the field key with val as a []byte to the logger context.
+func (c Context) Bytes(key string, val []byte) Context {
+ c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val)
+ return c
+}
+
+// Hex adds the field key with val as a hex string to the logger context.
+func (c Context) Hex(key string, val []byte) Context {
+ c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val)
+ return c
+}
+
+// RawJSON adds already encoded JSON to context.
+//
+// No sanity check is performed on b; it must not contain carriage returns and
+// be valid JSON.
+func (c Context) RawJSON(key string, b []byte) Context {
+ c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b)
+ return c
+}
+
+// AnErr adds the field key with serialized err to the logger context.
+func (c Context) AnErr(key string, err error) Context {
+ switch m := ErrorMarshalFunc(err).(type) {
+ case nil:
+ return c
+ case LogObjectMarshaler:
+ return c.Object(key, m)
+ case error:
+ if m == nil || isNilValue(m) {
+ return c
+ } else {
+ return c.Str(key, m.Error())
+ }
+ case string:
+ return c.Str(key, m)
+ default:
+ return c.Interface(key, m)
+ }
+}
+
+// Errs adds the field key with errs as an array of serialized errors to the
+// logger context.
+func (c Context) Errs(key string, errs []error) Context {
+ arr := Arr()
+ for _, err := range errs {
+ switch m := ErrorMarshalFunc(err).(type) {
+ case LogObjectMarshaler:
+ arr = arr.Object(m)
+ case error:
+ if m == nil || isNilValue(m) {
+ arr = arr.Interface(nil)
+ } else {
+ arr = arr.Str(m.Error())
+ }
+ case string:
+ arr = arr.Str(m)
+ default:
+ arr = arr.Interface(m)
+ }
+ }
+
+ return c.Array(key, arr)
+}
+
+// Err adds the field "error" with serialized err to the logger context.
+func (c Context) Err(err error) Context {
+ return c.AnErr(ErrorFieldName, err)
+}
+
+// Bool adds the field key with val as a bool to the logger context.
+func (c Context) Bool(key string, b bool) Context {
+ c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b)
+ return c
+}
+
+// Bools adds the field key with val as a []bool to the logger context.
+func (c Context) Bools(key string, b []bool) Context {
+ c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b)
+ return c
+}
+
+// Int adds the field key with i as a int to the logger context.
+func (c Context) Int(key string, i int) Context {
+ c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Ints adds the field key with i as a []int to the logger context.
+func (c Context) Ints(key string, i []int) Context {
+ c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Int8 adds the field key with i as a int8 to the logger context.
+func (c Context) Int8(key string, i int8) Context {
+ c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Ints8 adds the field key with i as a []int8 to the logger context.
+func (c Context) Ints8(key string, i []int8) Context {
+ c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Int16 adds the field key with i as a int16 to the logger context.
+func (c Context) Int16(key string, i int16) Context {
+ c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Ints16 adds the field key with i as a []int16 to the logger context.
+func (c Context) Ints16(key string, i []int16) Context {
+ c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Int32 adds the field key with i as a int32 to the logger context.
+func (c Context) Int32(key string, i int32) Context {
+ c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Ints32 adds the field key with i as a []int32 to the logger context.
+func (c Context) Ints32(key string, i []int32) Context {
+ c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Int64 adds the field key with i as a int64 to the logger context.
+func (c Context) Int64(key string, i int64) Context {
+ c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Ints64 adds the field key with i as a []int64 to the logger context.
+func (c Context) Ints64(key string, i []int64) Context {
+ c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Uint adds the field key with i as a uint to the logger context.
+func (c Context) Uint(key string, i uint) Context {
+ c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Uints adds the field key with i as a []uint to the logger context.
+func (c Context) Uints(key string, i []uint) Context {
+ c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Uint8 adds the field key with i as a uint8 to the logger context.
+func (c Context) Uint8(key string, i uint8) Context {
+ c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Uints8 adds the field key with i as a []uint8 to the logger context.
+func (c Context) Uints8(key string, i []uint8) Context {
+ c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Uint16 adds the field key with i as a uint16 to the logger context.
+func (c Context) Uint16(key string, i uint16) Context {
+ c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Uints16 adds the field key with i as a []uint16 to the logger context.
+func (c Context) Uints16(key string, i []uint16) Context {
+ c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Uint32 adds the field key with i as a uint32 to the logger context.
+func (c Context) Uint32(key string, i uint32) Context {
+ c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Uints32 adds the field key with i as a []uint32 to the logger context.
+func (c Context) Uints32(key string, i []uint32) Context {
+ c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Uint64 adds the field key with i as a uint64 to the logger context.
+func (c Context) Uint64(key string, i uint64) Context {
+ c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Uints64 adds the field key with i as a []uint64 to the logger context.
+func (c Context) Uints64(key string, i []uint64) Context {
+ c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+// Float32 adds the field key with f as a float32 to the logger context.
+func (c Context) Float32(key string, f float32) Context {
+ c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f)
+ return c
+}
+
+// Floats32 adds the field key with f as a []float32 to the logger context.
+func (c Context) Floats32(key string, f []float32) Context {
+ c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f)
+ return c
+}
+
+// Float64 adds the field key with f as a float64 to the logger context.
+func (c Context) Float64(key string, f float64) Context {
+ c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f)
+ return c
+}
+
+// Floats64 adds the field key with f as a []float64 to the logger context.
+func (c Context) Floats64(key string, f []float64) Context {
+ c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f)
+ return c
+}
+
+type timestampHook struct{}
+
+func (ts timestampHook) Run(e *Event, level Level, msg string) {
+ e.Timestamp()
+}
+
+var th = timestampHook{}
+
+// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key.
+// To customize the key name, change zerolog.TimestampFieldName.
+//
+// NOTE: It won't dedupe the "time" key if the *Context has one already.
+func (c Context) Timestamp() Context {
+ c.l = c.l.Hook(th)
+ return c
+}
+
+// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
+func (c Context) Time(key string, t time.Time) Context {
+ c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
+ return c
+}
+
+// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
+func (c Context) Times(key string, t []time.Time) Context {
+ c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
+ return c
+}
+
+// Dur adds the fields key with d divided by unit and stored as a float.
+func (c Context) Dur(key string, d time.Duration) Context {
+ c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
+ return c
+}
+
+// Durs adds the fields key with d divided by unit and stored as a float.
+func (c Context) Durs(key string, d []time.Duration) Context {
+ c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
+ return c
+}
+
+// Interface adds the field key with obj marshaled using reflection.
+func (c Context) Interface(key string, i interface{}) Context {
+ c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i)
+ return c
+}
+
+type callerHook struct {
+ callerSkipFrameCount int
+}
+
+func newCallerHook(skipFrameCount int) callerHook {
+ return callerHook{callerSkipFrameCount: skipFrameCount}
+}
+
+func (ch callerHook) Run(e *Event, level Level, msg string) {
+ switch ch.callerSkipFrameCount {
+ case useGlobalSkipFrameCount:
+ // Extra frames to skip (added by hook infra).
+ e.caller(CallerSkipFrameCount + contextCallerSkipFrameCount)
+ default:
+ // Extra frames to skip (added by hook infra).
+ e.caller(ch.callerSkipFrameCount + contextCallerSkipFrameCount)
+ }
+}
+
+// useGlobalSkipFrameCount acts as a flag to informat callerHook.Run
+// to use the global CallerSkipFrameCount.
+const useGlobalSkipFrameCount = math.MinInt32
+
+// ch is the default caller hook using the global CallerSkipFrameCount.
+var ch = newCallerHook(useGlobalSkipFrameCount)
+
+// Caller adds the file:line of the caller with the zerolog.CallerFieldName key.
+func (c Context) Caller() Context {
+ c.l = c.l.Hook(ch)
+ return c
+}
+
+// CallerWithSkipFrameCount adds the file:line of the caller with the zerolog.CallerFieldName key.
+// The specified skipFrameCount int will override the global CallerSkipFrameCount for this context's respective logger.
+// If set to -1 the global CallerSkipFrameCount will be used.
+func (c Context) CallerWithSkipFrameCount(skipFrameCount int) Context {
+ c.l = c.l.Hook(newCallerHook(skipFrameCount))
+ return c
+}
+
+// Stack enables stack trace printing for the error passed to Err().
+func (c Context) Stack() Context {
+ c.l.stack = true
+ return c
+}
+
+// IPAddr adds IPv4 or IPv6 Address to the context
+func (c Context) IPAddr(key string, ip net.IP) Context {
+ c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip)
+ return c
+}
+
+// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context
+func (c Context) IPPrefix(key string, pfx net.IPNet) Context {
+ c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx)
+ return c
+}
+
+// MACAddr adds MAC address to the context
+func (c Context) MACAddr(key string, ha net.HardwareAddr) Context {
+ c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha)
+ return c
+}
diff --git a/vendor/github.com/rs/zerolog/ctx.go b/vendor/github.com/rs/zerolog/ctx.go
new file mode 100644
index 000000000..ce18a32cb
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/ctx.go
@@ -0,0 +1,48 @@
+package zerolog
+
+import (
+ "context"
+)
+
+var disabledLogger *Logger
+
+func init() {
+ SetGlobalLevel(TraceLevel)
+ l := Nop()
+ disabledLogger = &l
+}
+
+type ctxKey struct{}
+
+// WithContext returns a copy of ctx with l associated. If an instance of Logger
+// is already in the context, the context is not updated.
+//
+// For instance, to add a field to an existing logger in the context, use this
+// notation:
+//
+// ctx := r.Context()
+// l := zerolog.Ctx(ctx)
+// l.UpdateContext(func(c Context) Context {
+// return c.Str("bar", "baz")
+// })
+func (l *Logger) WithContext(ctx context.Context) context.Context {
+ if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok {
+ if lp == l {
+ // Do not store same logger.
+ return ctx
+ }
+ } else if l.level == Disabled {
+ // Do not store disabled logger.
+ return ctx
+ }
+ return context.WithValue(ctx, ctxKey{}, l)
+}
+
+// Ctx returns the Logger associated with the ctx. If no logger
+// is associated, a disabled logger is returned.
+func Ctx(ctx context.Context) *Logger {
+ if l, ok := ctx.Value(ctxKey{}).(*Logger); ok {
+ return l
+ }
+ return disabledLogger
+}
diff --git a/vendor/github.com/rs/zerolog/encoder.go b/vendor/github.com/rs/zerolog/encoder.go
new file mode 100644
index 000000000..09b24e80c
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/encoder.go
@@ -0,0 +1,56 @@
+package zerolog
+
+import (
+ "net"
+ "time"
+)
+
+type encoder interface {
+ AppendArrayDelim(dst []byte) []byte
+ AppendArrayEnd(dst []byte) []byte
+ AppendArrayStart(dst []byte) []byte
+ AppendBeginMarker(dst []byte) []byte
+ AppendBool(dst []byte, val bool) []byte
+ AppendBools(dst []byte, vals []bool) []byte
+ AppendBytes(dst, s []byte) []byte
+ AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte
+ AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte
+ AppendEndMarker(dst []byte) []byte
+ AppendFloat32(dst []byte, val float32) []byte
+ AppendFloat64(dst []byte, val float64) []byte
+ AppendFloats32(dst []byte, vals []float32) []byte
+ AppendFloats64(dst []byte, vals []float64) []byte
+ AppendHex(dst, s []byte) []byte
+ AppendIPAddr(dst []byte, ip net.IP) []byte
+ AppendIPPrefix(dst []byte, pfx net.IPNet) []byte
+ AppendInt(dst []byte, val int) []byte
+ AppendInt16(dst []byte, val int16) []byte
+ AppendInt32(dst []byte, val int32) []byte
+ AppendInt64(dst []byte, val int64) []byte
+ AppendInt8(dst []byte, val int8) []byte
+ AppendInterface(dst []byte, i interface{}) []byte
+ AppendInts(dst []byte, vals []int) []byte
+ AppendInts16(dst []byte, vals []int16) []byte
+ AppendInts32(dst []byte, vals []int32) []byte
+ AppendInts64(dst []byte, vals []int64) []byte
+ AppendInts8(dst []byte, vals []int8) []byte
+ AppendKey(dst []byte, key string) []byte
+ AppendLineBreak(dst []byte) []byte
+ AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte
+ AppendNil(dst []byte) []byte
+ AppendObjectData(dst []byte, o []byte) []byte
+ AppendString(dst []byte, s string) []byte
+ AppendStrings(dst []byte, vals []string) []byte
+ AppendTime(dst []byte, t time.Time, format string) []byte
+ AppendTimes(dst []byte, vals []time.Time, format string) []byte
+ AppendUint(dst []byte, val uint) []byte
+ AppendUint16(dst []byte, val uint16) []byte
+ AppendUint32(dst []byte, val uint32) []byte
+ AppendUint64(dst []byte, val uint64) []byte
+ AppendUint8(dst []byte, val uint8) []byte
+ AppendUints(dst []byte, vals []uint) []byte
+ AppendUints16(dst []byte, vals []uint16) []byte
+ AppendUints32(dst []byte, vals []uint32) []byte
+ AppendUints64(dst []byte, vals []uint64) []byte
+ AppendUints8(dst []byte, vals []uint8) []byte
+}
diff --git a/vendor/github.com/rs/zerolog/encoder_cbor.go b/vendor/github.com/rs/zerolog/encoder_cbor.go
new file mode 100644
index 000000000..f8d3fe9e7
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/encoder_cbor.go
@@ -0,0 +1,35 @@
+// +build binary_log
+
+package zerolog
+
+// This file contains bindings to do binary encoding.
+
+import (
+ "github.com/rs/zerolog/internal/cbor"
+)
+
+var (
+ _ encoder = (*cbor.Encoder)(nil)
+
+ enc = cbor.Encoder{}
+)
+
+func appendJSON(dst []byte, j []byte) []byte {
+ return cbor.AppendEmbeddedJSON(dst, j)
+}
+
+// decodeIfBinaryToString - converts a binary formatted log msg to a
+// JSON formatted String Log message.
+func decodeIfBinaryToString(in []byte) string {
+ return cbor.DecodeIfBinaryToString(in)
+}
+
+func decodeObjectToStr(in []byte) string {
+ return cbor.DecodeObjectToStr(in)
+}
+
+// decodeIfBinaryToBytes - converts a binary formatted log msg to a
+// JSON formatted Bytes Log message.
+func decodeIfBinaryToBytes(in []byte) []byte {
+ return cbor.DecodeIfBinaryToBytes(in)
+}
diff --git a/vendor/github.com/rs/zerolog/encoder_json.go b/vendor/github.com/rs/zerolog/encoder_json.go
new file mode 100644
index 000000000..fe580f5f6
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/encoder_json.go
@@ -0,0 +1,32 @@
+// +build !binary_log
+
+package zerolog
+
+// encoder_json.go file contains bindings to generate
+// JSON encoded byte stream.
+
+import (
+ "github.com/rs/zerolog/internal/json"
+)
+
+var (
+ _ encoder = (*json.Encoder)(nil)
+
+ enc = json.Encoder{}
+)
+
+func appendJSON(dst []byte, j []byte) []byte {
+ return append(dst, j...)
+}
+
+func decodeIfBinaryToString(in []byte) string {
+ return string(in)
+}
+
+func decodeObjectToStr(in []byte) string {
+ return string(in)
+}
+
+func decodeIfBinaryToBytes(in []byte) []byte {
+ return in
+}
diff --git a/vendor/github.com/rs/zerolog/event.go b/vendor/github.com/rs/zerolog/event.go
new file mode 100644
index 000000000..ff6ff24f2
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/event.go
@@ -0,0 +1,745 @@
+package zerolog
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "runtime"
+ "sync"
+ "time"
+)
+
+var eventPool = &sync.Pool{
+ New: func() interface{} {
+ return &Event{
+ buf: make([]byte, 0, 500),
+ }
+ },
+}
+
+// Event represents a log event. It is instanced by one of the level method of
+// Logger and finalized by the Msg or Msgf method.
+type Event struct {
+ buf []byte
+ w LevelWriter
+ level Level
+ done func(msg string)
+ stack bool // enable error stack trace
+ ch []Hook // hooks from context
+ skipFrame int // The number of additional frames to skip when printing the caller.
+}
+
+func putEvent(e *Event) {
+ // Proper usage of a sync.Pool requires each entry to have approximately
+ // the same memory cost. To obtain this property when the stored type
+ // contains a variably-sized buffer, we add a hard limit on the maximum buffer
+ // to place back in the pool.
+ //
+ // See https://golang.org/issue/23199
+ const maxSize = 1 << 16 // 64KiB
+ if cap(e.buf) > maxSize {
+ return
+ }
+ eventPool.Put(e)
+}
+
+// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface
+// to be implemented by types used with Event/Context's Object methods.
+type LogObjectMarshaler interface {
+ MarshalZerologObject(e *Event)
+}
+
+// LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface
+// to be implemented by types used with Event/Context's Array methods.
+type LogArrayMarshaler interface {
+ MarshalZerologArray(a *Array)
+}
+
+func newEvent(w LevelWriter, level Level) *Event {
+ e := eventPool.Get().(*Event)
+ e.buf = e.buf[:0]
+ e.ch = nil
+ e.buf = enc.AppendBeginMarker(e.buf)
+ e.w = w
+ e.level = level
+ e.stack = false
+ e.skipFrame = 0
+ return e
+}
+
+func (e *Event) write() (err error) {
+ if e == nil {
+ return nil
+ }
+ if e.level != Disabled {
+ e.buf = enc.AppendEndMarker(e.buf)
+ e.buf = enc.AppendLineBreak(e.buf)
+ if e.w != nil {
+ _, err = e.w.WriteLevel(e.level, e.buf)
+ }
+ }
+ putEvent(e)
+ return
+}
+
+// Enabled return false if the *Event is going to be filtered out by
+// log level or sampling.
+func (e *Event) Enabled() bool {
+ return e != nil && e.level != Disabled
+}
+
+// Discard disables the event so Msg(f) won't print it.
+func (e *Event) Discard() *Event {
+ if e == nil {
+ return e
+ }
+ e.level = Disabled
+ return nil
+}
+
+// Msg sends the *Event with msg added as the message field if not empty.
+//
+// NOTICE: once this method is called, the *Event should be disposed.
+// Calling Msg twice can have unexpected result.
+func (e *Event) Msg(msg string) {
+ if e == nil {
+ return
+ }
+ e.msg(msg)
+}
+
+// Send is equivalent to calling Msg("").
+//
+// NOTICE: once this method is called, the *Event should be disposed.
+func (e *Event) Send() {
+ if e == nil {
+ return
+ }
+ e.msg("")
+}
+
+// Msgf sends the event with formatted msg added as the message field if not empty.
+//
+// NOTICE: once this method is called, the *Event should be disposed.
+// Calling Msgf twice can have unexpected result.
+func (e *Event) Msgf(format string, v ...interface{}) {
+ if e == nil {
+ return
+ }
+ e.msg(fmt.Sprintf(format, v...))
+}
+
+func (e *Event) msg(msg string) {
+ for _, hook := range e.ch {
+ hook.Run(e, e.level, msg)
+ }
+ if msg != "" {
+ e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg)
+ }
+ if e.done != nil {
+ defer e.done(msg)
+ }
+ if err := e.write(); err != nil {
+ if ErrorHandler != nil {
+ ErrorHandler(err)
+ } else {
+ fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err)
+ }
+ }
+}
+
+// Fields is a helper function to use a map to set fields using type assertion.
+func (e *Event) Fields(fields map[string]interface{}) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = appendFields(e.buf, fields)
+ return e
+}
+
+// Dict adds the field key with a dict to the event context.
+// Use zerolog.Dict() to create the dictionary.
+func (e *Event) Dict(key string, dict *Event) *Event {
+ if e == nil {
+ return e
+ }
+ dict.buf = enc.AppendEndMarker(dict.buf)
+ e.buf = append(enc.AppendKey(e.buf, key), dict.buf...)
+ putEvent(dict)
+ return e
+}
+
+// Dict creates an Event to be used with the *Event.Dict method.
+// Call usual field methods like Str, Int etc to add fields to this
+// event and give it as argument the *Event.Dict method.
+func Dict() *Event {
+ return newEvent(nil, 0)
+}
+
+// Array adds the field key with an array to the event context.
+// Use zerolog.Arr() to create the array or pass a type that
+// implement the LogArrayMarshaler interface.
+func (e *Event) Array(key string, arr LogArrayMarshaler) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendKey(e.buf, key)
+ var a *Array
+ if aa, ok := arr.(*Array); ok {
+ a = aa
+ } else {
+ a = Arr()
+ arr.MarshalZerologArray(a)
+ }
+ e.buf = a.write(e.buf)
+ return e
+}
+
+func (e *Event) appendObject(obj LogObjectMarshaler) {
+ e.buf = enc.AppendBeginMarker(e.buf)
+ obj.MarshalZerologObject(e)
+ e.buf = enc.AppendEndMarker(e.buf)
+}
+
+// Object marshals an object that implement the LogObjectMarshaler interface.
+func (e *Event) Object(key string, obj LogObjectMarshaler) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendKey(e.buf, key)
+ e.appendObject(obj)
+ return e
+}
+
+// EmbedObject marshals an object that implement the LogObjectMarshaler interface.
+func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event {
+ if e == nil {
+ return e
+ }
+ obj.MarshalZerologObject(e)
+ return e
+}
+
+// Str adds the field key with val as a string to the *Event context.
+func (e *Event) Str(key, val string) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val)
+ return e
+}
+
+// Strs adds the field key with vals as a []string to the *Event context.
+func (e *Event) Strs(key string, vals []string) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals)
+ return e
+}
+
+// Stringer adds the field key with val.String() (or null if val is nil) to the *Event context.
+func (e *Event) Stringer(key string, val fmt.Stringer) *Event {
+ if e == nil {
+ return e
+ }
+
+ if val != nil {
+ e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val.String())
+ return e
+ }
+
+ e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), nil)
+ return e
+}
+
+// Bytes adds the field key with val as a string to the *Event context.
+//
+// Runes outside of normal ASCII ranges will be hex-encoded in the resulting
+// JSON.
+func (e *Event) Bytes(key string, val []byte) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val)
+ return e
+}
+
+// Hex adds the field key with val as a hex string to the *Event context.
+func (e *Event) Hex(key string, val []byte) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val)
+ return e
+}
+
+// RawJSON adds already encoded JSON to the log line under key.
+//
+// No sanity check is performed on b; it must not contain carriage returns and
+// be valid JSON.
+func (e *Event) RawJSON(key string, b []byte) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = appendJSON(enc.AppendKey(e.buf, key), b)
+ return e
+}
+
+// AnErr adds the field key with serialized err to the *Event context.
+// If err is nil, no field is added.
+func (e *Event) AnErr(key string, err error) *Event {
+ if e == nil {
+ return e
+ }
+ switch m := ErrorMarshalFunc(err).(type) {
+ case nil:
+ return e
+ case LogObjectMarshaler:
+ return e.Object(key, m)
+ case error:
+ if m == nil || isNilValue(m) {
+ return e
+ } else {
+ return e.Str(key, m.Error())
+ }
+ case string:
+ return e.Str(key, m)
+ default:
+ return e.Interface(key, m)
+ }
+}
+
+// Errs adds the field key with errs as an array of serialized errors to the
+// *Event context.
+func (e *Event) Errs(key string, errs []error) *Event {
+ if e == nil {
+ return e
+ }
+ arr := Arr()
+ for _, err := range errs {
+ switch m := ErrorMarshalFunc(err).(type) {
+ case LogObjectMarshaler:
+ arr = arr.Object(m)
+ case error:
+ arr = arr.Err(m)
+ case string:
+ arr = arr.Str(m)
+ default:
+ arr = arr.Interface(m)
+ }
+ }
+
+ return e.Array(key, arr)
+}
+
+// Err adds the field "error" with serialized err to the *Event context.
+// If err is nil, no field is added.
+//
+// To customize the key name, change zerolog.ErrorFieldName.
+//
+// If Stack() has been called before and zerolog.ErrorStackMarshaler is defined,
+// the err is passed to ErrorStackMarshaler and the result is appended to the
+// zerolog.ErrorStackFieldName.
+func (e *Event) Err(err error) *Event {
+ if e == nil {
+ return e
+ }
+ if e.stack && ErrorStackMarshaler != nil {
+ switch m := ErrorStackMarshaler(err).(type) {
+ case nil:
+ case LogObjectMarshaler:
+ e.Object(ErrorStackFieldName, m)
+ case error:
+ if m != nil && !isNilValue(m) {
+ e.Str(ErrorStackFieldName, m.Error())
+ }
+ case string:
+ e.Str(ErrorStackFieldName, m)
+ default:
+ e.Interface(ErrorStackFieldName, m)
+ }
+ }
+ return e.AnErr(ErrorFieldName, err)
+}
+
+// Stack enables stack trace printing for the error passed to Err().
+//
+// ErrorStackMarshaler must be set for this method to do something.
+func (e *Event) Stack() *Event {
+ if e != nil {
+ e.stack = true
+ }
+ return e
+}
+
+// Bool adds the field key with val as a bool to the *Event context.
+func (e *Event) Bool(key string, b bool) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b)
+ return e
+}
+
+// Bools adds the field key with val as a []bool to the *Event context.
+func (e *Event) Bools(key string, b []bool) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b)
+ return e
+}
+
+// Int adds the field key with i as a int to the *Event context.
+func (e *Event) Int(key string, i int) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Ints adds the field key with i as a []int to the *Event context.
+func (e *Event) Ints(key string, i []int) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Int8 adds the field key with i as a int8 to the *Event context.
+func (e *Event) Int8(key string, i int8) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Ints8 adds the field key with i as a []int8 to the *Event context.
+func (e *Event) Ints8(key string, i []int8) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Int16 adds the field key with i as a int16 to the *Event context.
+func (e *Event) Int16(key string, i int16) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Ints16 adds the field key with i as a []int16 to the *Event context.
+func (e *Event) Ints16(key string, i []int16) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Int32 adds the field key with i as a int32 to the *Event context.
+func (e *Event) Int32(key string, i int32) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Ints32 adds the field key with i as a []int32 to the *Event context.
+func (e *Event) Ints32(key string, i []int32) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Int64 adds the field key with i as a int64 to the *Event context.
+func (e *Event) Int64(key string, i int64) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Ints64 adds the field key with i as a []int64 to the *Event context.
+func (e *Event) Ints64(key string, i []int64) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Uint adds the field key with i as a uint to the *Event context.
+func (e *Event) Uint(key string, i uint) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Uints adds the field key with i as a []int to the *Event context.
+func (e *Event) Uints(key string, i []uint) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Uint8 adds the field key with i as a uint8 to the *Event context.
+func (e *Event) Uint8(key string, i uint8) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Uints8 adds the field key with i as a []int8 to the *Event context.
+func (e *Event) Uints8(key string, i []uint8) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Uint16 adds the field key with i as a uint16 to the *Event context.
+func (e *Event) Uint16(key string, i uint16) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Uints16 adds the field key with i as a []int16 to the *Event context.
+func (e *Event) Uints16(key string, i []uint16) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Uint32 adds the field key with i as a uint32 to the *Event context.
+func (e *Event) Uint32(key string, i uint32) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Uints32 adds the field key with i as a []int32 to the *Event context.
+func (e *Event) Uints32(key string, i []uint32) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Uint64 adds the field key with i as a uint64 to the *Event context.
+func (e *Event) Uint64(key string, i uint64) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Uints64 adds the field key with i as a []int64 to the *Event context.
+func (e *Event) Uints64(key string, i []uint64) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// Float32 adds the field key with f as a float32 to the *Event context.
+func (e *Event) Float32(key string, f float32) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f)
+ return e
+}
+
+// Floats32 adds the field key with f as a []float32 to the *Event context.
+func (e *Event) Floats32(key string, f []float32) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f)
+ return e
+}
+
+// Float64 adds the field key with f as a float64 to the *Event context.
+func (e *Event) Float64(key string, f float64) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f)
+ return e
+}
+
+// Floats64 adds the field key with f as a []float64 to the *Event context.
+func (e *Event) Floats64(key string, f []float64) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f)
+ return e
+}
+
+// Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key.
+// To customize the key name, change zerolog.TimestampFieldName.
+//
+// NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one
+// already.
+func (e *Event) Timestamp() *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat)
+ return e
+}
+
+// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
+func (e *Event) Time(key string, t time.Time) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat)
+ return e
+}
+
+// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
+func (e *Event) Times(key string, t []time.Time) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat)
+ return e
+}
+
+// Dur adds the field key with duration d stored as zerolog.DurationFieldUnit.
+// If zerolog.DurationFieldInteger is true, durations are rendered as integer
+// instead of float.
+func (e *Event) Dur(key string, d time.Duration) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
+ return e
+}
+
+// Durs adds the field key with duration d stored as zerolog.DurationFieldUnit.
+// If zerolog.DurationFieldInteger is true, durations are rendered as integer
+// instead of float.
+func (e *Event) Durs(key string, d []time.Duration) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
+ return e
+}
+
+// TimeDiff adds the field key with positive duration between time t and start.
+// If time t is not greater than start, duration will be 0.
+// Duration format follows the same principle as Dur().
+func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event {
+ if e == nil {
+ return e
+ }
+ var d time.Duration
+ if t.After(start) {
+ d = t.Sub(start)
+ }
+ e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
+ return e
+}
+
+// Interface adds the field key with i marshaled using reflection.
+func (e *Event) Interface(key string, i interface{}) *Event {
+ if e == nil {
+ return e
+ }
+ if obj, ok := i.(LogObjectMarshaler); ok {
+ return e.Object(key, obj)
+ }
+ e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i)
+ return e
+}
+
+// CallerSkipFrame instructs any future Caller calls to skip the specified number of frames.
+// This includes those added via hooks from the context.
+func (e *Event) CallerSkipFrame(skip int) *Event {
+ e.skipFrame += skip
+ return e
+}
+
+// Caller adds the file:line of the caller with the zerolog.CallerFieldName key.
+// The argument skip is the number of stack frames to ascend
+// Skip If not passed, use the global variable CallerSkipFrameCount
+func (e *Event) Caller(skip ...int) *Event {
+ sk := CallerSkipFrameCount
+ if len(skip) > 0 {
+ sk = skip[0] + CallerSkipFrameCount
+ }
+ return e.caller(sk)
+}
+
+func (e *Event) caller(skip int) *Event {
+ if e == nil {
+ return e
+ }
+ _, file, line, ok := runtime.Caller(skip + e.skipFrame)
+ if !ok {
+ return e
+ }
+ e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(file, line))
+ return e
+}
+
+// IPAddr adds IPv4 or IPv6 Address to the event
+func (e *Event) IPAddr(key string, ip net.IP) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip)
+ return e
+}
+
+// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event
+func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx)
+ return e
+}
+
+// MACAddr adds MAC address to the event
+func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event {
+ if e == nil {
+ return e
+ }
+ e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha)
+ return e
+}
diff --git a/vendor/github.com/rs/zerolog/fields.go b/vendor/github.com/rs/zerolog/fields.go
new file mode 100644
index 000000000..cf3c3e918
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/fields.go
@@ -0,0 +1,253 @@
+package zerolog
+
+import (
+ "net"
+ "sort"
+ "time"
+ "unsafe"
+)
+
+func isNilValue(i interface{}) bool {
+ return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0
+}
+
+func appendFields(dst []byte, fields map[string]interface{}) []byte {
+ keys := make([]string, 0, len(fields))
+ for key := range fields {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ for _, key := range keys {
+ dst = enc.AppendKey(dst, key)
+ val := fields[key]
+ if val, ok := val.(LogObjectMarshaler); ok {
+ e := newEvent(nil, 0)
+ e.buf = e.buf[:0]
+ e.appendObject(val)
+ dst = append(dst, e.buf...)
+ putEvent(e)
+ continue
+ }
+ switch val := val.(type) {
+ case string:
+ dst = enc.AppendString(dst, val)
+ case []byte:
+ dst = enc.AppendBytes(dst, val)
+ case error:
+ switch m := ErrorMarshalFunc(val).(type) {
+ case LogObjectMarshaler:
+ e := newEvent(nil, 0)
+ e.buf = e.buf[:0]
+ e.appendObject(m)
+ dst = append(dst, e.buf...)
+ putEvent(e)
+ case error:
+ if m == nil || isNilValue(m) {
+ dst = enc.AppendNil(dst)
+ } else {
+ dst = enc.AppendString(dst, m.Error())
+ }
+ case string:
+ dst = enc.AppendString(dst, m)
+ default:
+ dst = enc.AppendInterface(dst, m)
+ }
+ case []error:
+ dst = enc.AppendArrayStart(dst)
+ for i, err := range val {
+ switch m := ErrorMarshalFunc(err).(type) {
+ case LogObjectMarshaler:
+ e := newEvent(nil, 0)
+ e.buf = e.buf[:0]
+ e.appendObject(m)
+ dst = append(dst, e.buf...)
+ putEvent(e)
+ case error:
+ if m == nil || isNilValue(m) {
+ dst = enc.AppendNil(dst)
+ } else {
+ dst = enc.AppendString(dst, m.Error())
+ }
+ case string:
+ dst = enc.AppendString(dst, m)
+ default:
+ dst = enc.AppendInterface(dst, m)
+ }
+
+ if i < (len(val) - 1) {
+ enc.AppendArrayDelim(dst)
+ }
+ }
+ dst = enc.AppendArrayEnd(dst)
+ case bool:
+ dst = enc.AppendBool(dst, val)
+ case int:
+ dst = enc.AppendInt(dst, val)
+ case int8:
+ dst = enc.AppendInt8(dst, val)
+ case int16:
+ dst = enc.AppendInt16(dst, val)
+ case int32:
+ dst = enc.AppendInt32(dst, val)
+ case int64:
+ dst = enc.AppendInt64(dst, val)
+ case uint:
+ dst = enc.AppendUint(dst, val)
+ case uint8:
+ dst = enc.AppendUint8(dst, val)
+ case uint16:
+ dst = enc.AppendUint16(dst, val)
+ case uint32:
+ dst = enc.AppendUint32(dst, val)
+ case uint64:
+ dst = enc.AppendUint64(dst, val)
+ case float32:
+ dst = enc.AppendFloat32(dst, val)
+ case float64:
+ dst = enc.AppendFloat64(dst, val)
+ case time.Time:
+ dst = enc.AppendTime(dst, val, TimeFieldFormat)
+ case time.Duration:
+ dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger)
+ case *string:
+ if val != nil {
+ dst = enc.AppendString(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *bool:
+ if val != nil {
+ dst = enc.AppendBool(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *int:
+ if val != nil {
+ dst = enc.AppendInt(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *int8:
+ if val != nil {
+ dst = enc.AppendInt8(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *int16:
+ if val != nil {
+ dst = enc.AppendInt16(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *int32:
+ if val != nil {
+ dst = enc.AppendInt32(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *int64:
+ if val != nil {
+ dst = enc.AppendInt64(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *uint:
+ if val != nil {
+ dst = enc.AppendUint(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *uint8:
+ if val != nil {
+ dst = enc.AppendUint8(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *uint16:
+ if val != nil {
+ dst = enc.AppendUint16(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *uint32:
+ if val != nil {
+ dst = enc.AppendUint32(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *uint64:
+ if val != nil {
+ dst = enc.AppendUint64(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *float32:
+ if val != nil {
+ dst = enc.AppendFloat32(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *float64:
+ if val != nil {
+ dst = enc.AppendFloat64(dst, *val)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *time.Time:
+ if val != nil {
+ dst = enc.AppendTime(dst, *val, TimeFieldFormat)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case *time.Duration:
+ if val != nil {
+ dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger)
+ } else {
+ dst = enc.AppendNil(dst)
+ }
+ case []string:
+ dst = enc.AppendStrings(dst, val)
+ case []bool:
+ dst = enc.AppendBools(dst, val)
+ case []int:
+ dst = enc.AppendInts(dst, val)
+ case []int8:
+ dst = enc.AppendInts8(dst, val)
+ case []int16:
+ dst = enc.AppendInts16(dst, val)
+ case []int32:
+ dst = enc.AppendInts32(dst, val)
+ case []int64:
+ dst = enc.AppendInts64(dst, val)
+ case []uint:
+ dst = enc.AppendUints(dst, val)
+ // case []uint8:
+ // dst = enc.AppendUints8(dst, val)
+ case []uint16:
+ dst = enc.AppendUints16(dst, val)
+ case []uint32:
+ dst = enc.AppendUints32(dst, val)
+ case []uint64:
+ dst = enc.AppendUints64(dst, val)
+ case []float32:
+ dst = enc.AppendFloats32(dst, val)
+ case []float64:
+ dst = enc.AppendFloats64(dst, val)
+ case []time.Time:
+ dst = enc.AppendTimes(dst, val, TimeFieldFormat)
+ case []time.Duration:
+ dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger)
+ case nil:
+ dst = enc.AppendNil(dst)
+ case net.IP:
+ dst = enc.AppendIPAddr(dst, val)
+ case net.IPNet:
+ dst = enc.AppendIPPrefix(dst, val)
+ case net.HardwareAddr:
+ dst = enc.AppendMACAddr(dst, val)
+ default:
+ dst = enc.AppendInterface(dst, val)
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/rs/zerolog/globals.go b/vendor/github.com/rs/zerolog/globals.go
new file mode 100644
index 000000000..2c60382d2
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/globals.go
@@ -0,0 +1,129 @@
+package zerolog
+
+import (
+ "strconv"
+ "sync/atomic"
+ "time"
+)
+
+const (
+ // TimeFormatUnix defines a time format that makes time fields to be
+ // serialized as Unix timestamp integers.
+ TimeFormatUnix = ""
+
+ // TimeFormatUnixMs defines a time format that makes time fields to be
+ // serialized as Unix timestamp integers in milliseconds.
+ TimeFormatUnixMs = "UNIXMS"
+
+ // TimeFormatUnixMicro defines a time format that makes time fields to be
+ // serialized as Unix timestamp integers in microseconds.
+ TimeFormatUnixMicro = "UNIXMICRO"
+)
+
+var (
+ // TimestampFieldName is the field name used for the timestamp field.
+ TimestampFieldName = "time"
+
+ // LevelFieldName is the field name used for the level field.
+ LevelFieldName = "level"
+
+ // LevelTraceValue is the value used for the trace level field.
+ LevelTraceValue = "trace"
+ // LevelDebugValue is the value used for the debug level field.
+ LevelDebugValue = "debug"
+ // LevelInfoValue is the value used for the info level field.
+ LevelInfoValue = "info"
+ // LevelWarnValue is the value used for the warn level field.
+ LevelWarnValue = "warn"
+ // LevelErrorValue is the value used for the error level field.
+ LevelErrorValue = "error"
+ // LevelFatalValue is the value used for the fatal level field.
+ LevelFatalValue = "fatal"
+ // LevelPanicValue is the value used for the panic level field.
+ LevelPanicValue = "panic"
+
+ // LevelFieldMarshalFunc allows customization of global level field marshaling.
+ LevelFieldMarshalFunc = func(l Level) string {
+ return l.String()
+ }
+
+ // MessageFieldName is the field name used for the message field.
+ MessageFieldName = "message"
+
+ // ErrorFieldName is the field name used for error fields.
+ ErrorFieldName = "error"
+
+ // CallerFieldName is the field name used for caller field.
+ CallerFieldName = "caller"
+
+ // CallerSkipFrameCount is the number of stack frames to skip to find the caller.
+ CallerSkipFrameCount = 2
+
+ // CallerMarshalFunc allows customization of global caller marshaling
+ CallerMarshalFunc = func(file string, line int) string {
+ return file + ":" + strconv.Itoa(line)
+ }
+
+ // ErrorStackFieldName is the field name used for error stacks.
+ ErrorStackFieldName = "stack"
+
+ // ErrorStackMarshaler extract the stack from err if any.
+ ErrorStackMarshaler func(err error) interface{}
+
+ // ErrorMarshalFunc allows customization of global error marshaling
+ ErrorMarshalFunc = func(err error) interface{} {
+ return err
+ }
+
+ // TimeFieldFormat defines the time format of the Time field type. If set to
+ // TimeFormatUnix, TimeFormatUnixMs or TimeFormatUnixMicro, the time is formatted as an UNIX
+ // timestamp as integer.
+ TimeFieldFormat = time.RFC3339
+
+ // TimestampFunc defines the function called to generate a timestamp.
+ TimestampFunc = time.Now
+
+ // DurationFieldUnit defines the unit for time.Duration type fields added
+ // using the Dur method.
+ DurationFieldUnit = time.Millisecond
+
+ // DurationFieldInteger renders Dur fields as integer instead of float if
+ // set to true.
+ DurationFieldInteger = false
+
+ // ErrorHandler is called whenever zerolog fails to write an event on its
+ // output. If not set, an error is printed on the stderr. This handler must
+ // be thread safe and non-blocking.
+ ErrorHandler func(err error)
+)
+
+var (
+ gLevel = new(int32)
+ disableSampling = new(int32)
+)
+
+// SetGlobalLevel sets the global override for log level. If this
+// values is raised, all Loggers will use at least this value.
+//
+// To globally disable logs, set GlobalLevel to Disabled.
+func SetGlobalLevel(l Level) {
+ atomic.StoreInt32(gLevel, int32(l))
+}
+
+// GlobalLevel returns the current global log level
+func GlobalLevel() Level {
+ return Level(atomic.LoadInt32(gLevel))
+}
+
+// DisableSampling will disable sampling in all Loggers if true.
+func DisableSampling(v bool) {
+ var i int32
+ if v {
+ i = 1
+ }
+ atomic.StoreInt32(disableSampling, i)
+}
+
+func samplingDisabled() bool {
+ return atomic.LoadInt32(disableSampling) == 1
+}
diff --git a/vendor/github.com/rs/zerolog/go.mod b/vendor/github.com/rs/zerolog/go.mod
new file mode 100644
index 000000000..ab04df985
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/go.mod
@@ -0,0 +1,8 @@
+module github.com/rs/zerolog
+
+require (
+ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
+ github.com/pkg/errors v0.9.1
+ github.com/rs/xid v1.2.1
+ golang.org/x/tools v0.1.0
+)
diff --git a/vendor/github.com/rs/zerolog/go.sum b/vendor/github.com/rs/zerolog/go.sum
new file mode 100644
index 000000000..37b2ffcb3
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/go.sum
@@ -0,0 +1,32 @@
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
+github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/rs/zerolog/go112.go b/vendor/github.com/rs/zerolog/go112.go
new file mode 100644
index 000000000..e7b5a1bdc
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/go112.go
@@ -0,0 +1,7 @@
+// +build go1.12
+
+package zerolog
+
+// Since go 1.12, some auto generated init functions are hidden from
+// runtime.Caller.
+const contextCallerSkipFrameCount = 2
diff --git a/vendor/github.com/rs/zerolog/hook.go b/vendor/github.com/rs/zerolog/hook.go
new file mode 100644
index 000000000..ec6effc1a
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/hook.go
@@ -0,0 +1,64 @@
+package zerolog
+
+// Hook defines an interface to a log hook.
+type Hook interface {
+ // Run runs the hook with the event.
+ Run(e *Event, level Level, message string)
+}
+
+// HookFunc is an adaptor to allow the use of an ordinary function
+// as a Hook.
+type HookFunc func(e *Event, level Level, message string)
+
+// Run implements the Hook interface.
+func (h HookFunc) Run(e *Event, level Level, message string) {
+ h(e, level, message)
+}
+
+// LevelHook applies a different hook for each level.
+type LevelHook struct {
+ NoLevelHook, TraceHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook
+}
+
+// Run implements the Hook interface.
+func (h LevelHook) Run(e *Event, level Level, message string) {
+ switch level {
+ case TraceLevel:
+ if h.TraceHook != nil {
+ h.TraceHook.Run(e, level, message)
+ }
+ case DebugLevel:
+ if h.DebugHook != nil {
+ h.DebugHook.Run(e, level, message)
+ }
+ case InfoLevel:
+ if h.InfoHook != nil {
+ h.InfoHook.Run(e, level, message)
+ }
+ case WarnLevel:
+ if h.WarnHook != nil {
+ h.WarnHook.Run(e, level, message)
+ }
+ case ErrorLevel:
+ if h.ErrorHook != nil {
+ h.ErrorHook.Run(e, level, message)
+ }
+ case FatalLevel:
+ if h.FatalHook != nil {
+ h.FatalHook.Run(e, level, message)
+ }
+ case PanicLevel:
+ if h.PanicHook != nil {
+ h.PanicHook.Run(e, level, message)
+ }
+ case NoLevel:
+ if h.NoLevelHook != nil {
+ h.NoLevelHook.Run(e, level, message)
+ }
+ }
+}
+
+// NewLevelHook returns a new LevelHook.
+func NewLevelHook() LevelHook {
+ return LevelHook{}
+}
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/README.md b/vendor/github.com/rs/zerolog/internal/cbor/README.md
new file mode 100644
index 000000000..92c2e8c7f
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/README.md
@@ -0,0 +1,56 @@
+## Reference:
+ CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049)
+
+## Comparison of JSON vs CBOR
+
+Two main areas of reduction are:
+
+1. CPU usage to write a log msg
+2. Size (in bytes) of log messages.
+
+
+CPU Usage savings are below:
+```
+name JSON time/op CBOR time/op delta
+Info-32 15.3ns Âą 1% 11.7ns Âą 3% -23.78% (p=0.000 n=9+10)
+ContextFields-32 16.2ns Âą 2% 12.3ns Âą 3% -23.97% (p=0.000 n=9+9)
+ContextAppend-32 6.70ns Âą 0% 6.20ns Âą 0% -7.44% (p=0.000 n=9+9)
+LogFields-32 66.4ns Âą 0% 24.6ns Âą 2% -62.89% (p=0.000 n=10+9)
+LogArrayObject-32 911ns Âą11% 768ns Âą 6% -15.64% (p=0.000 n=10+10)
+LogFieldType/Floats-32 70.3ns Âą 2% 29.5ns Âą 1% -57.98% (p=0.000 n=10+10)
+LogFieldType/Err-32 14.0ns Âą 3% 12.1ns Âą 8% -13.20% (p=0.000 n=8+10)
+LogFieldType/Dur-32 17.2ns Âą 2% 13.1ns Âą 1% -24.27% (p=0.000 n=10+9)
+LogFieldType/Object-32 54.3ns Âą11% 52.3ns Âą 7% ~ (p=0.239 n=10+10)
+LogFieldType/Ints-32 20.3ns Âą 2% 15.1ns Âą 2% -25.50% (p=0.000 n=9+10)
+LogFieldType/Interfaces-32 642ns Âą11% 621ns Âą 9% ~ (p=0.118 n=10+10)
+LogFieldType/Interface(Objects)-32 635ns Âą13% 632ns Âą 9% ~ (p=0.592 n=10+10)
+LogFieldType/Times-32 294ns Âą 0% 27ns Âą 1% -90.71% (p=0.000 n=10+9)
+LogFieldType/Durs-32 121ns Âą 0% 33ns Âą 2% -72.44% (p=0.000 n=9+9)
+LogFieldType/Interface(Object)-32 56.6ns Âą 8% 52.3ns Âą 8% -7.54% (p=0.007 n=10+10)
+LogFieldType/Errs-32 17.8ns Âą 3% 16.1ns Âą 2% -9.71% (p=0.000 n=10+9)
+LogFieldType/Time-32 40.5ns Âą 1% 12.7ns Âą 6% -68.66% (p=0.000 n=8+9)
+LogFieldType/Bool-32 12.0ns Âą 5% 10.2ns Âą 2% -15.18% (p=0.000 n=10+8)
+LogFieldType/Bools-32 17.2ns Âą 2% 12.6ns Âą 4% -26.63% (p=0.000 n=10+10)
+LogFieldType/Int-32 12.3ns Âą 2% 11.2ns Âą 4% -9.27% (p=0.000 n=9+10)
+LogFieldType/Float-32 16.7ns Âą 1% 12.6ns Âą 2% -24.42% (p=0.000 n=7+9)
+LogFieldType/Str-32 12.7ns Âą 7% 11.3ns Âą 7% -10.88% (p=0.000 n=10+9)
+LogFieldType/Strs-32 20.3ns Âą 3% 18.2ns Âą 3% -10.25% (p=0.000 n=9+10)
+LogFieldType/Interface-32 183ns Âą12% 175ns Âą 9% ~ (p=0.078 n=10+10)
+```
+
+Log message size savings is greatly dependent on the number and type of fields in the log message.
+Assuming this log message (with an Integer, timestamp and string, in addition to level).
+
+`{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}`
+
+Two measurements were done for the log file sizes - one without any compression, second
+using [compress/zlib](https://golang.org/pkg/compress/zlib/).
+
+Results for 10,000 log messages:
+
+| Log Format | Plain File Size (in KB) | Compressed File Size (in KB) |
+| :--- | :---: | :---: |
+| JSON | 920 | 28 |
+| CBOR | 550 | 28 |
+
+The example used to calculate the above data is available in [Examples](examples).
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/base.go b/vendor/github.com/rs/zerolog/internal/cbor/base.go
new file mode 100644
index 000000000..58cd0822b
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/base.go
@@ -0,0 +1,11 @@
+package cbor
+
+type Encoder struct{}
+
+// AppendKey adds a key (string) to the binary encoded log message
+func (e Encoder) AppendKey(dst []byte, key string) []byte {
+ if len(dst) < 1 {
+ dst = e.AppendBeginMarker(dst)
+ }
+ return e.AppendString(dst, key)
+}
\ No newline at end of file
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/cbor.go b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go
new file mode 100644
index 000000000..969f59159
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go
@@ -0,0 +1,100 @@
+// Package cbor provides primitives for storing different data
+// in the CBOR (binary) format. CBOR is defined in RFC7049.
+package cbor
+
+import "time"
+
+const (
+ majorOffset = 5
+ additionalMax = 23
+
+ // Non Values.
+ additionalTypeBoolFalse byte = 20
+ additionalTypeBoolTrue byte = 21
+ additionalTypeNull byte = 22
+
+ // Integer (+ve and -ve) Sub-types.
+ additionalTypeIntUint8 byte = 24
+ additionalTypeIntUint16 byte = 25
+ additionalTypeIntUint32 byte = 26
+ additionalTypeIntUint64 byte = 27
+
+ // Float Sub-types.
+ additionalTypeFloat16 byte = 25
+ additionalTypeFloat32 byte = 26
+ additionalTypeFloat64 byte = 27
+ additionalTypeBreak byte = 31
+
+ // Tag Sub-types.
+ additionalTypeTimestamp byte = 01
+
+ // Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml
+ additionalTypeTagNetworkAddr uint16 = 260
+ additionalTypeTagNetworkPrefix uint16 = 261
+ additionalTypeEmbeddedJSON uint16 = 262
+ additionalTypeTagHexString uint16 = 263
+
+ // Unspecified number of elements.
+ additionalTypeInfiniteCount byte = 31
+)
+const (
+ majorTypeUnsignedInt byte = iota << majorOffset // Major type 0
+ majorTypeNegativeInt // Major type 1
+ majorTypeByteString // Major type 2
+ majorTypeUtf8String // Major type 3
+ majorTypeArray // Major type 4
+ majorTypeMap // Major type 5
+ majorTypeTags // Major type 6
+ majorTypeSimpleAndFloat // Major type 7
+)
+
+const (
+ maskOutAdditionalType byte = (7 << majorOffset)
+ maskOutMajorType byte = 31
+)
+
+const (
+ float32Nan = "\xfa\x7f\xc0\x00\x00"
+ float32PosInfinity = "\xfa\x7f\x80\x00\x00"
+ float32NegInfinity = "\xfa\xff\x80\x00\x00"
+ float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"
+ float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"
+ float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"
+)
+
+// IntegerTimeFieldFormat indicates the format of timestamp decoded
+// from an integer (time in seconds).
+var IntegerTimeFieldFormat = time.RFC3339
+
+// NanoTimeFieldFormat indicates the format of timestamp decoded
+// from a float value (time in seconds and nano seconds).
+var NanoTimeFieldFormat = time.RFC3339Nano
+
+func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte {
+ byteCount := 8
+ var minor byte
+ switch {
+ case number < 256:
+ byteCount = 1
+ minor = additionalTypeIntUint8
+
+ case number < 65536:
+ byteCount = 2
+ minor = additionalTypeIntUint16
+
+ case number < 4294967296:
+ byteCount = 4
+ minor = additionalTypeIntUint32
+
+ default:
+ byteCount = 8
+ minor = additionalTypeIntUint64
+
+ }
+ dst = append(dst, byte(major|minor))
+ byteCount--
+ for ; byteCount >= 0; byteCount-- {
+ dst = append(dst, byte(number>>(uint(byteCount)*8)))
+ }
+ return dst
+}
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go
new file mode 100644
index 000000000..e3cf3b7db
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go
@@ -0,0 +1,614 @@
+package cbor
+
+// This file contains code to decode a stream of CBOR Data into JSON.
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+var decodeTimeZone *time.Location
+
+const hexTable = "0123456789abcdef"
+
+const isFloat32 = 4
+const isFloat64 = 8
+
+func readNBytes(src *bufio.Reader, n int) []byte {
+ ret := make([]byte, n)
+ for i := 0; i < n; i++ {
+ ch, e := src.ReadByte()
+ if e != nil {
+ panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n))
+ }
+ ret[i] = ch
+ }
+ return ret
+}
+
+func readByte(src *bufio.Reader) byte {
+ b, e := src.ReadByte()
+ if e != nil {
+ panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file"))
+ }
+ return b
+}
+
+func decodeIntAdditonalType(src *bufio.Reader, minor byte) int64 {
+ val := int64(0)
+ if minor <= 23 {
+ val = int64(minor)
+ } else {
+ bytesToRead := 0
+ switch minor {
+ case additionalTypeIntUint8:
+ bytesToRead = 1
+ case additionalTypeIntUint16:
+ bytesToRead = 2
+ case additionalTypeIntUint32:
+ bytesToRead = 4
+ case additionalTypeIntUint64:
+ bytesToRead = 8
+ default:
+ panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor))
+ }
+ pb := readNBytes(src, bytesToRead)
+ for i := 0; i < bytesToRead; i++ {
+ val = val * 256
+ val += int64(pb[i])
+ }
+ }
+ return val
+}
+
+func decodeInteger(src *bufio.Reader) int64 {
+ pb := readByte(src)
+ major := pb & maskOutAdditionalType
+ minor := pb & maskOutMajorType
+ if major != majorTypeUnsignedInt && major != majorTypeNegativeInt {
+ panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major))
+ }
+ val := decodeIntAdditonalType(src, minor)
+ if major == 0 {
+ return val
+ }
+ return (-1 - val)
+}
+
+func decodeFloat(src *bufio.Reader) (float64, int) {
+ pb := readByte(src)
+ major := pb & maskOutAdditionalType
+ minor := pb & maskOutMajorType
+ if major != majorTypeSimpleAndFloat {
+ panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major))
+ }
+
+ switch minor {
+ case additionalTypeFloat16:
+ panic(fmt.Errorf("float16 is not suppported in decodeFloat"))
+
+ case additionalTypeFloat32:
+ pb := readNBytes(src, 4)
+ switch string(pb) {
+ case float32Nan:
+ return math.NaN(), isFloat32
+ case float32PosInfinity:
+ return math.Inf(0), isFloat32
+ case float32NegInfinity:
+ return math.Inf(-1), isFloat32
+ }
+ n := uint32(0)
+ for i := 0; i < 4; i++ {
+ n = n * 256
+ n += uint32(pb[i])
+ }
+ val := math.Float32frombits(n)
+ return float64(val), isFloat32
+ case additionalTypeFloat64:
+ pb := readNBytes(src, 8)
+ switch string(pb) {
+ case float64Nan:
+ return math.NaN(), isFloat64
+ case float64PosInfinity:
+ return math.Inf(0), isFloat64
+ case float64NegInfinity:
+ return math.Inf(-1), isFloat64
+ }
+ n := uint64(0)
+ for i := 0; i < 8; i++ {
+ n = n * 256
+ n += uint64(pb[i])
+ }
+ val := math.Float64frombits(n)
+ return val, isFloat64
+ }
+ panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor))
+}
+
+func decodeStringComplex(dst []byte, s string, pos uint) []byte {
+ i := int(pos)
+ start := 0
+
+ for i < len(s) {
+ b := s[i]
+ if b >= utf8.RuneSelf {
+ r, size := utf8.DecodeRuneInString(s[i:])
+ if r == utf8.RuneError && size == 1 {
+ // In case of error, first append previous simple characters to
+ // the byte slice if any and append a replacement character code
+ // in place of the invalid sequence.
+ if start < i {
+ dst = append(dst, s[start:i]...)
+ }
+ dst = append(dst, `\ufffd`...)
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ continue
+ }
+ if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' {
+ i++
+ continue
+ }
+ // We encountered a character that needs to be encoded.
+ // Let's append the previous simple characters to the byte slice
+ // and switch our operation to read and encode the remainder
+ // characters byte-by-byte.
+ if start < i {
+ dst = append(dst, s[start:i]...)
+ }
+ switch b {
+ case '"', '\\':
+ dst = append(dst, '\\', b)
+ case '\b':
+ dst = append(dst, '\\', 'b')
+ case '\f':
+ dst = append(dst, '\\', 'f')
+ case '\n':
+ dst = append(dst, '\\', 'n')
+ case '\r':
+ dst = append(dst, '\\', 'r')
+ case '\t':
+ dst = append(dst, '\\', 't')
+ default:
+ dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF])
+ }
+ i++
+ start = i
+ }
+ if start < len(s) {
+ dst = append(dst, s[start:]...)
+ }
+ return dst
+}
+
+func decodeString(src *bufio.Reader, noQuotes bool) []byte {
+ pb := readByte(src)
+ major := pb & maskOutAdditionalType
+ minor := pb & maskOutMajorType
+ if major != majorTypeByteString {
+ panic(fmt.Errorf("Major type is: %d in decodeString", major))
+ }
+ result := []byte{}
+ if !noQuotes {
+ result = append(result, '"')
+ }
+ length := decodeIntAdditonalType(src, minor)
+ len := int(length)
+ pbs := readNBytes(src, len)
+ result = append(result, pbs...)
+ if noQuotes {
+ return result
+ }
+ return append(result, '"')
+}
+
+func decodeUTF8String(src *bufio.Reader) []byte {
+ pb := readByte(src)
+ major := pb & maskOutAdditionalType
+ minor := pb & maskOutMajorType
+ if major != majorTypeUtf8String {
+ panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major))
+ }
+ result := []byte{'"'}
+ length := decodeIntAdditonalType(src, minor)
+ len := int(length)
+ pbs := readNBytes(src, len)
+
+ for i := 0; i < len; i++ {
+ // Check if the character needs encoding. Control characters, slashes,
+ // and the double quote need json encoding. Bytes above the ascii
+ // boundary needs utf8 encoding.
+ if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' {
+ // We encountered a character that needs to be encoded. Switch
+ // to complex version of the algorithm.
+ dst := []byte{'"'}
+ dst = decodeStringComplex(dst, string(pbs), uint(i))
+ return append(dst, '"')
+ }
+ }
+ // The string has no need for encoding an therefore is directly
+ // appended to the byte slice.
+ result = append(result, pbs...)
+ return append(result, '"')
+}
+
+func array2Json(src *bufio.Reader, dst io.Writer) {
+ dst.Write([]byte{'['})
+ pb := readByte(src)
+ major := pb & maskOutAdditionalType
+ minor := pb & maskOutMajorType
+ if major != majorTypeArray {
+ panic(fmt.Errorf("Major type is: %d in array2Json", major))
+ }
+ len := 0
+ unSpecifiedCount := false
+ if minor == additionalTypeInfiniteCount {
+ unSpecifiedCount = true
+ } else {
+ length := decodeIntAdditonalType(src, minor)
+ len = int(length)
+ }
+ for i := 0; unSpecifiedCount || i < len; i++ {
+ if unSpecifiedCount {
+ pb, e := src.Peek(1)
+ if e != nil {
+ panic(e)
+ }
+ if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
+ readByte(src)
+ break
+ }
+ }
+ cbor2JsonOneObject(src, dst)
+ if unSpecifiedCount {
+ pb, e := src.Peek(1)
+ if e != nil {
+ panic(e)
+ }
+ if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
+ readByte(src)
+ break
+ }
+ dst.Write([]byte{','})
+ } else if i+1 < len {
+ dst.Write([]byte{','})
+ }
+ }
+ dst.Write([]byte{']'})
+}
+
+func map2Json(src *bufio.Reader, dst io.Writer) {
+ pb := readByte(src)
+ major := pb & maskOutAdditionalType
+ minor := pb & maskOutMajorType
+ if major != majorTypeMap {
+ panic(fmt.Errorf("Major type is: %d in map2Json", major))
+ }
+ len := 0
+ unSpecifiedCount := false
+ if minor == additionalTypeInfiniteCount {
+ unSpecifiedCount = true
+ } else {
+ length := decodeIntAdditonalType(src, minor)
+ len = int(length)
+ }
+ dst.Write([]byte{'{'})
+ for i := 0; unSpecifiedCount || i < len; i++ {
+ if unSpecifiedCount {
+ pb, e := src.Peek(1)
+ if e != nil {
+ panic(e)
+ }
+ if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
+ readByte(src)
+ break
+ }
+ }
+ cbor2JsonOneObject(src, dst)
+ if i%2 == 0 {
+ // Even position values are keys.
+ dst.Write([]byte{':'})
+ } else {
+ if unSpecifiedCount {
+ pb, e := src.Peek(1)
+ if e != nil {
+ panic(e)
+ }
+ if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
+ readByte(src)
+ break
+ }
+ dst.Write([]byte{','})
+ } else if i+1 < len {
+ dst.Write([]byte{','})
+ }
+ }
+ }
+ dst.Write([]byte{'}'})
+}
+
+func decodeTagData(src *bufio.Reader) []byte {
+ pb := readByte(src)
+ major := pb & maskOutAdditionalType
+ minor := pb & maskOutMajorType
+ if major != majorTypeTags {
+ panic(fmt.Errorf("Major type is: %d in decodeTagData", major))
+ }
+ switch minor {
+ case additionalTypeTimestamp:
+ return decodeTimeStamp(src)
+
+ // Tag value is larger than 256 (so uint16).
+ case additionalTypeIntUint16:
+ val := decodeIntAdditonalType(src, minor)
+
+ switch uint16(val) {
+ case additionalTypeEmbeddedJSON:
+ pb := readByte(src)
+ dataMajor := pb & maskOutAdditionalType
+ if dataMajor != majorTypeByteString {
+ panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor))
+ }
+ src.UnreadByte()
+ return decodeString(src, true)
+
+ case additionalTypeTagNetworkAddr:
+ octets := decodeString(src, true)
+ ss := []byte{'"'}
+ switch len(octets) {
+ case 6: // MAC address.
+ ha := net.HardwareAddr(octets)
+ ss = append(append(ss, ha.String()...), '"')
+ case 4: // IPv4 address.
+ fallthrough
+ case 16: // IPv6 address.
+ ip := net.IP(octets)
+ ss = append(append(ss, ip.String()...), '"')
+ default:
+ panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets)))
+ }
+ return ss
+
+ case additionalTypeTagNetworkPrefix:
+ pb := readByte(src)
+ if pb != byte(majorTypeMap|0x1) {
+ panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected"))
+ }
+ octets := decodeString(src, true)
+ val := decodeInteger(src)
+ ip := net.IP(octets)
+ var mask net.IPMask
+ pfxLen := int(val)
+ if len(octets) == 4 {
+ mask = net.CIDRMask(pfxLen, 32)
+ } else {
+ mask = net.CIDRMask(pfxLen, 128)
+ }
+ ipPfx := net.IPNet{IP: ip, Mask: mask}
+ ss := []byte{'"'}
+ ss = append(append(ss, ipPfx.String()...), '"')
+ return ss
+
+ case additionalTypeTagHexString:
+ octets := decodeString(src, true)
+ ss := []byte{'"'}
+ for _, v := range octets {
+ ss = append(ss, hexTable[v>>4], hexTable[v&0x0f])
+ }
+ return append(ss, '"')
+
+ default:
+ panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val))
+ }
+ }
+ panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor))
+}
+
+func decodeTimeStamp(src *bufio.Reader) []byte {
+ pb := readByte(src)
+ src.UnreadByte()
+ tsMajor := pb & maskOutAdditionalType
+ if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt {
+ n := decodeInteger(src)
+ t := time.Unix(n, 0)
+ if decodeTimeZone != nil {
+ t = t.In(decodeTimeZone)
+ } else {
+ t = t.In(time.UTC)
+ }
+ tsb := []byte{}
+ tsb = append(tsb, '"')
+ tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat)
+ tsb = append(tsb, '"')
+ return tsb
+ } else if tsMajor == majorTypeSimpleAndFloat {
+ n, _ := decodeFloat(src)
+ secs := int64(n)
+ n -= float64(secs)
+ n *= float64(1e9)
+ t := time.Unix(secs, int64(n))
+ if decodeTimeZone != nil {
+ t = t.In(decodeTimeZone)
+ } else {
+ t = t.In(time.UTC)
+ }
+ tsb := []byte{}
+ tsb = append(tsb, '"')
+ tsb = t.AppendFormat(tsb, NanoTimeFieldFormat)
+ tsb = append(tsb, '"')
+ return tsb
+ }
+ panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor))
+}
+
+func decodeSimpleFloat(src *bufio.Reader) []byte {
+ pb := readByte(src)
+ major := pb & maskOutAdditionalType
+ minor := pb & maskOutMajorType
+ if major != majorTypeSimpleAndFloat {
+ panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major))
+ }
+ switch minor {
+ case additionalTypeBoolTrue:
+ return []byte("true")
+ case additionalTypeBoolFalse:
+ return []byte("false")
+ case additionalTypeNull:
+ return []byte("null")
+ case additionalTypeFloat16:
+ fallthrough
+ case additionalTypeFloat32:
+ fallthrough
+ case additionalTypeFloat64:
+ src.UnreadByte()
+ v, bc := decodeFloat(src)
+ ba := []byte{}
+ switch {
+ case math.IsNaN(v):
+ return []byte("\"NaN\"")
+ case math.IsInf(v, 1):
+ return []byte("\"+Inf\"")
+ case math.IsInf(v, -1):
+ return []byte("\"-Inf\"")
+ }
+ if bc == isFloat32 {
+ ba = strconv.AppendFloat(ba, v, 'f', -1, 32)
+ } else if bc == isFloat64 {
+ ba = strconv.AppendFloat(ba, v, 'f', -1, 64)
+ } else {
+ panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc))
+ }
+ return ba
+ default:
+ panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor))
+ }
+}
+
+func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) {
+ pb, e := src.Peek(1)
+ if e != nil {
+ panic(e)
+ }
+ major := (pb[0] & maskOutAdditionalType)
+
+ switch major {
+ case majorTypeUnsignedInt:
+ fallthrough
+ case majorTypeNegativeInt:
+ n := decodeInteger(src)
+ dst.Write([]byte(strconv.Itoa(int(n))))
+
+ case majorTypeByteString:
+ s := decodeString(src, false)
+ dst.Write(s)
+
+ case majorTypeUtf8String:
+ s := decodeUTF8String(src)
+ dst.Write(s)
+
+ case majorTypeArray:
+ array2Json(src, dst)
+
+ case majorTypeMap:
+ map2Json(src, dst)
+
+ case majorTypeTags:
+ s := decodeTagData(src)
+ dst.Write(s)
+
+ case majorTypeSimpleAndFloat:
+ s := decodeSimpleFloat(src)
+ dst.Write(s)
+ }
+}
+
+func moreBytesToRead(src *bufio.Reader) bool {
+ _, e := src.ReadByte()
+ if e == nil {
+ src.UnreadByte()
+ return true
+ }
+ return false
+}
+
+// Cbor2JsonManyObjects decodes all the CBOR Objects read from src
+// reader. It keeps on decoding until reader returns EOF (error when reading).
+// Decoded string is written to the dst. At the end of every CBOR Object
+// newline is written to the output stream.
+//
+// Returns error (if any) that was encountered during decode.
+// The child functions will generate a panic when error is encountered and
+// this function will recover non-runtime Errors and return the reason as error.
+func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+ bufRdr := bufio.NewReader(src)
+ for moreBytesToRead(bufRdr) {
+ cbor2JsonOneObject(bufRdr, dst)
+ dst.Write([]byte("\n"))
+ }
+ return nil
+}
+
+// Detect if the bytes to be printed is Binary or not.
+func binaryFmt(p []byte) bool {
+ if len(p) > 0 && p[0] > 0x7F {
+ return true
+ }
+ return false
+}
+
+func getReader(str string) *bufio.Reader {
+ return bufio.NewReader(strings.NewReader(str))
+}
+
+// DecodeIfBinaryToString converts a binary formatted log msg to a
+// JSON formatted String Log message - suitable for printing to Console/Syslog.
+func DecodeIfBinaryToString(in []byte) string {
+ if binaryFmt(in) {
+ var b bytes.Buffer
+ Cbor2JsonManyObjects(strings.NewReader(string(in)), &b)
+ return b.String()
+ }
+ return string(in)
+}
+
+// DecodeObjectToStr checks if the input is a binary format, if so,
+// it will decode a single Object and return the decoded string.
+func DecodeObjectToStr(in []byte) string {
+ if binaryFmt(in) {
+ var b bytes.Buffer
+ cbor2JsonOneObject(getReader(string(in)), &b)
+ return b.String()
+ }
+ return string(in)
+}
+
+// DecodeIfBinaryToBytes checks if the input is a binary format, if so,
+// it will decode all Objects and return the decoded string as byte array.
+func DecodeIfBinaryToBytes(in []byte) []byte {
+ if binaryFmt(in) {
+ var b bytes.Buffer
+ Cbor2JsonManyObjects(bytes.NewReader(in), &b)
+ return b.Bytes()
+ }
+ return in
+}
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/string.go b/vendor/github.com/rs/zerolog/internal/cbor/string.go
new file mode 100644
index 000000000..ff42afab4
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/string.go
@@ -0,0 +1,68 @@
+package cbor
+
+// AppendStrings encodes and adds an array of strings to the dst byte array.
+func (e Encoder) AppendStrings(dst []byte, vals []string) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendString(dst, v)
+ }
+ return dst
+}
+
+// AppendString encodes and adds a string to the dst byte array.
+func (Encoder) AppendString(dst []byte, s string) []byte {
+ major := majorTypeUtf8String
+
+ l := len(s)
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l))
+ }
+ return append(dst, s...)
+}
+
+// AppendBytes encodes and adds an array of bytes to the dst byte array.
+func (Encoder) AppendBytes(dst, s []byte) []byte {
+ major := majorTypeByteString
+
+ l := len(s)
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ return append(dst, s...)
+}
+
+// AppendEmbeddedJSON adds a tag and embeds input JSON as such.
+func AppendEmbeddedJSON(dst, s []byte) []byte {
+ major := majorTypeTags
+ minor := additionalTypeEmbeddedJSON
+
+ // Append the TAG to indicate this is Embedded JSON.
+ dst = append(dst, byte(major|additionalTypeIntUint16))
+ dst = append(dst, byte(minor>>8))
+ dst = append(dst, byte(minor&0xff))
+
+ // Append the JSON Object as Byte String.
+ major = majorTypeByteString
+
+ l := len(s)
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ return append(dst, s...)
+}
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/time.go b/vendor/github.com/rs/zerolog/internal/cbor/time.go
new file mode 100644
index 000000000..12f6a1ddd
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/time.go
@@ -0,0 +1,93 @@
+package cbor
+
+import (
+ "time"
+)
+
+func appendIntegerTimestamp(dst []byte, t time.Time) []byte {
+ major := majorTypeTags
+ minor := additionalTypeTimestamp
+ dst = append(dst, byte(major|minor))
+ secs := t.Unix()
+ var val uint64
+ if secs < 0 {
+ major = majorTypeNegativeInt
+ val = uint64(-secs - 1)
+ } else {
+ major = majorTypeUnsignedInt
+ val = uint64(secs)
+ }
+ dst = appendCborTypePrefix(dst, major, uint64(val))
+ return dst
+}
+
+func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte {
+ major := majorTypeTags
+ minor := additionalTypeTimestamp
+ dst = append(dst, byte(major|minor))
+ secs := t.Unix()
+ nanos := t.Nanosecond()
+ var val float64
+ val = float64(secs)*1.0 + float64(nanos)*1E-9
+ return e.AppendFloat64(dst, val)
+}
+
+// AppendTime encodes and adds a timestamp to the dst byte array.
+func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte {
+ utc := t.UTC()
+ if utc.Nanosecond() == 0 {
+ return appendIntegerTimestamp(dst, utc)
+ }
+ return e.appendFloatTimestamp(dst, utc)
+}
+
+// AppendTimes encodes and adds an array of timestamps to the dst byte array.
+func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+
+ for _, t := range vals {
+ dst = e.AppendTime(dst, t, unused)
+ }
+ return dst
+}
+
+// AppendDuration encodes and adds a duration to the dst byte array.
+// useInt field indicates whether to store the duration as seconds (integer) or
+// as seconds+nanoseconds (float).
+func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
+ if useInt {
+ return e.AppendInt64(dst, int64(d/unit))
+ }
+ return e.AppendFloat64(dst, float64(d)/float64(unit))
+}
+
+// AppendDurations encodes and adds an array of durations to the dst byte array.
+// useInt field indicates whether to store the duration as seconds (integer) or
+// as seconds+nanoseconds (float).
+func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, d := range vals {
+ dst = e.AppendDuration(dst, d, unit, useInt)
+ }
+ return dst
+}
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/types.go b/vendor/github.com/rs/zerolog/internal/cbor/types.go
new file mode 100644
index 000000000..3d76ea08e
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/types.go
@@ -0,0 +1,478 @@
+package cbor
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "net"
+)
+
+// AppendNil inserts a 'Nil' object into the dst byte array.
+func (Encoder) AppendNil(dst []byte) []byte {
+ return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeNull))
+}
+
+// AppendBeginMarker inserts a map start into the dst byte array.
+func (Encoder) AppendBeginMarker(dst []byte) []byte {
+ return append(dst, byte(majorTypeMap|additionalTypeInfiniteCount))
+}
+
+// AppendEndMarker inserts a map end into the dst byte array.
+func (Encoder) AppendEndMarker(dst []byte) []byte {
+ return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
+}
+
+// AppendObjectData takes an object in form of a byte array and appends to dst.
+func (Encoder) AppendObjectData(dst []byte, o []byte) []byte {
+ // BeginMarker is present in the dst, which
+ // should not be copied when appending to existing data.
+ return append(dst, o[1:]...)
+}
+
+// AppendArrayStart adds markers to indicate the start of an array.
+func (Encoder) AppendArrayStart(dst []byte) []byte {
+ return append(dst, byte(majorTypeArray|additionalTypeInfiniteCount))
+}
+
+// AppendArrayEnd adds markers to indicate the end of an array.
+func (Encoder) AppendArrayEnd(dst []byte) []byte {
+ return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
+}
+
+// AppendArrayDelim adds markers to indicate end of a particular array element.
+func (Encoder) AppendArrayDelim(dst []byte) []byte {
+ //No delimiters needed in cbor
+ return dst
+}
+
+// AppendLineBreak is a noop that keep API compat with json encoder.
+func (Encoder) AppendLineBreak(dst []byte) []byte {
+ // No line breaks needed in binary format.
+ return dst
+}
+
+// AppendBool encodes and inserts a boolean value into the dst byte array.
+func (Encoder) AppendBool(dst []byte, val bool) []byte {
+ b := additionalTypeBoolFalse
+ if val {
+ b = additionalTypeBoolTrue
+ }
+ return append(dst, byte(majorTypeSimpleAndFloat|b))
+}
+
+// AppendBools encodes and inserts an array of boolean values into the dst byte array.
+func (e Encoder) AppendBools(dst []byte, vals []bool) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendBool(dst, v)
+ }
+ return dst
+}
+
+// AppendInt encodes and inserts an integer value into the dst byte array.
+func (Encoder) AppendInt(dst []byte, val int) []byte {
+ major := majorTypeUnsignedInt
+ contentVal := val
+ if val < 0 {
+ major = majorTypeNegativeInt
+ contentVal = -val - 1
+ }
+ if contentVal <= additionalMax {
+ lb := byte(contentVal)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(contentVal))
+ }
+ return dst
+}
+
+// AppendInts encodes and inserts an array of integer values into the dst byte array.
+func (e Encoder) AppendInts(dst []byte, vals []int) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendInt(dst, v)
+ }
+ return dst
+}
+
+// AppendInt8 encodes and inserts an int8 value into the dst byte array.
+func (e Encoder) AppendInt8(dst []byte, val int8) []byte {
+ return e.AppendInt(dst, int(val))
+}
+
+// AppendInts8 encodes and inserts an array of integer values into the dst byte array.
+func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendInt(dst, int(v))
+ }
+ return dst
+}
+
+// AppendInt16 encodes and inserts a int16 value into the dst byte array.
+func (e Encoder) AppendInt16(dst []byte, val int16) []byte {
+ return e.AppendInt(dst, int(val))
+}
+
+// AppendInts16 encodes and inserts an array of int16 values into the dst byte array.
+func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendInt(dst, int(v))
+ }
+ return dst
+}
+
+// AppendInt32 encodes and inserts a int32 value into the dst byte array.
+func (e Encoder) AppendInt32(dst []byte, val int32) []byte {
+ return e.AppendInt(dst, int(val))
+}
+
+// AppendInts32 encodes and inserts an array of int32 values into the dst byte array.
+func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendInt(dst, int(v))
+ }
+ return dst
+}
+
+// AppendInt64 encodes and inserts a int64 value into the dst byte array.
+func (Encoder) AppendInt64(dst []byte, val int64) []byte {
+ major := majorTypeUnsignedInt
+ contentVal := val
+ if val < 0 {
+ major = majorTypeNegativeInt
+ contentVal = -val - 1
+ }
+ if contentVal <= additionalMax {
+ lb := byte(contentVal)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(contentVal))
+ }
+ return dst
+}
+
+// AppendInts64 encodes and inserts an array of int64 values into the dst byte array.
+func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendInt64(dst, v)
+ }
+ return dst
+}
+
+// AppendUint encodes and inserts an unsigned integer value into the dst byte array.
+func (e Encoder) AppendUint(dst []byte, val uint) []byte {
+ return e.AppendInt64(dst, int64(val))
+}
+
+// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array.
+func (e Encoder) AppendUints(dst []byte, vals []uint) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendUint(dst, v)
+ }
+ return dst
+}
+
+// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array.
+func (e Encoder) AppendUint8(dst []byte, val uint8) []byte {
+ return e.AppendUint(dst, uint(val))
+}
+
+// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array.
+func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendUint8(dst, v)
+ }
+ return dst
+}
+
+// AppendUint16 encodes and inserts a uint16 value into the dst byte array.
+func (e Encoder) AppendUint16(dst []byte, val uint16) []byte {
+ return e.AppendUint(dst, uint(val))
+}
+
+// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array.
+func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendUint16(dst, v)
+ }
+ return dst
+}
+
+// AppendUint32 encodes and inserts a uint32 value into the dst byte array.
+func (e Encoder) AppendUint32(dst []byte, val uint32) []byte {
+ return e.AppendUint(dst, uint(val))
+}
+
+// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array.
+func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendUint32(dst, v)
+ }
+ return dst
+}
+
+// AppendUint64 encodes and inserts a uint64 value into the dst byte array.
+func (Encoder) AppendUint64(dst []byte, val uint64) []byte {
+ major := majorTypeUnsignedInt
+ contentVal := val
+ if contentVal <= additionalMax {
+ lb := byte(contentVal)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(contentVal))
+ }
+ return dst
+}
+
+// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array.
+func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendUint64(dst, v)
+ }
+ return dst
+}
+
+// AppendFloat32 encodes and inserts a single precision float value into the dst byte array.
+func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
+ switch {
+ case math.IsNaN(float64(val)):
+ return append(dst, "\xfa\x7f\xc0\x00\x00"...)
+ case math.IsInf(float64(val), 1):
+ return append(dst, "\xfa\x7f\x80\x00\x00"...)
+ case math.IsInf(float64(val), -1):
+ return append(dst, "\xfa\xff\x80\x00\x00"...)
+ }
+ major := majorTypeSimpleAndFloat
+ subType := additionalTypeFloat32
+ n := math.Float32bits(val)
+ var buf [4]byte
+ for i := uint(0); i < 4; i++ {
+ buf[i] = byte(n >> ((3 - i) * 8))
+ }
+ return append(append(dst, byte(major|subType)), buf[0], buf[1], buf[2], buf[3])
+}
+
+// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array.
+func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendFloat32(dst, v)
+ }
+ return dst
+}
+
+// AppendFloat64 encodes and inserts a double precision float value into the dst byte array.
+func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
+ switch {
+ case math.IsNaN(val):
+ return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...)
+ case math.IsInf(val, 1):
+ return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...)
+ case math.IsInf(val, -1):
+ return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...)
+ }
+ major := majorTypeSimpleAndFloat
+ subType := additionalTypeFloat64
+ n := math.Float64bits(val)
+ dst = append(dst, byte(major|subType))
+ for i := uint(1); i <= 8; i++ {
+ b := byte(n >> ((8 - i) * 8))
+ dst = append(dst, b)
+ }
+ return dst
+}
+
+// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array.
+func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
+ major := majorTypeArray
+ l := len(vals)
+ if l == 0 {
+ return e.AppendArrayEnd(e.AppendArrayStart(dst))
+ }
+ if l <= additionalMax {
+ lb := byte(l)
+ dst = append(dst, byte(major|lb))
+ } else {
+ dst = appendCborTypePrefix(dst, major, uint64(l))
+ }
+ for _, v := range vals {
+ dst = e.AppendFloat64(dst, v)
+ }
+ return dst
+}
+
+// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst.
+func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte {
+ marshaled, err := json.Marshal(i)
+ if err != nil {
+ return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err))
+ }
+ return AppendEmbeddedJSON(dst, marshaled)
+}
+
+// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6).
+func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte {
+ dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
+ dst = append(dst, byte(additionalTypeTagNetworkAddr>>8))
+ dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff))
+ return e.AppendBytes(dst, ip)
+}
+
+// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length).
+func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte {
+ dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
+ dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8))
+ dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff))
+
+ // Prefix is a tuple (aka MAP of 1 pair of elements) -
+ // first element is prefix, second is mask length.
+ dst = append(dst, byte(majorTypeMap|0x1))
+ dst = e.AppendBytes(dst, pfx.IP)
+ maskLen, _ := pfx.Mask.Size()
+ return e.AppendUint8(dst, uint8(maskLen))
+}
+
+// AppendMACAddr encodes and inserts an Hardware (MAC) address.
+func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte {
+ dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
+ dst = append(dst, byte(additionalTypeTagNetworkAddr>>8))
+ dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff))
+ return e.AppendBytes(dst, ha)
+}
+
+// AppendHex adds a TAG and inserts a hex bytes as a string.
+func (e Encoder) AppendHex(dst []byte, val []byte) []byte {
+ dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
+ dst = append(dst, byte(additionalTypeTagHexString>>8))
+ dst = append(dst, byte(additionalTypeTagHexString&0xff))
+ return e.AppendBytes(dst, val)
+}
diff --git a/vendor/github.com/rs/zerolog/internal/json/base.go b/vendor/github.com/rs/zerolog/internal/json/base.go
new file mode 100644
index 000000000..62248e713
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/json/base.go
@@ -0,0 +1,11 @@
+package json
+
+type Encoder struct{}
+
+// AppendKey appends a new key to the output JSON.
+func (e Encoder) AppendKey(dst []byte, key string) []byte {
+ if dst[len(dst)-1] != '{' {
+ dst = append(dst, ',')
+ }
+ return append(e.AppendString(dst, key), ':')
+}
diff --git a/vendor/github.com/rs/zerolog/internal/json/bytes.go b/vendor/github.com/rs/zerolog/internal/json/bytes.go
new file mode 100644
index 000000000..de64120d1
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/json/bytes.go
@@ -0,0 +1,85 @@
+package json
+
+import "unicode/utf8"
+
+// AppendBytes is a mirror of appendString with []byte arg
+func (Encoder) AppendBytes(dst, s []byte) []byte {
+ dst = append(dst, '"')
+ for i := 0; i < len(s); i++ {
+ if !noEscapeTable[s[i]] {
+ dst = appendBytesComplex(dst, s, i)
+ return append(dst, '"')
+ }
+ }
+ dst = append(dst, s...)
+ return append(dst, '"')
+}
+
+// AppendHex encodes the input bytes to a hex string and appends
+// the encoded string to the input byte slice.
+//
+// The operation loops though each byte and encodes it as hex using
+// the hex lookup table.
+func (Encoder) AppendHex(dst, s []byte) []byte {
+ dst = append(dst, '"')
+ for _, v := range s {
+ dst = append(dst, hex[v>>4], hex[v&0x0f])
+ }
+ return append(dst, '"')
+}
+
+// appendBytesComplex is a mirror of the appendStringComplex
+// with []byte arg
+func appendBytesComplex(dst, s []byte, i int) []byte {
+ start := 0
+ for i < len(s) {
+ b := s[i]
+ if b >= utf8.RuneSelf {
+ r, size := utf8.DecodeRune(s[i:])
+ if r == utf8.RuneError && size == 1 {
+ if start < i {
+ dst = append(dst, s[start:i]...)
+ }
+ dst = append(dst, `\ufffd`...)
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ continue
+ }
+ if noEscapeTable[b] {
+ i++
+ continue
+ }
+ // We encountered a character that needs to be encoded.
+ // Let's append the previous simple characters to the byte slice
+ // and switch our operation to read and encode the remainder
+ // characters byte-by-byte.
+ if start < i {
+ dst = append(dst, s[start:i]...)
+ }
+ switch b {
+ case '"', '\\':
+ dst = append(dst, '\\', b)
+ case '\b':
+ dst = append(dst, '\\', 'b')
+ case '\f':
+ dst = append(dst, '\\', 'f')
+ case '\n':
+ dst = append(dst, '\\', 'n')
+ case '\r':
+ dst = append(dst, '\\', 'r')
+ case '\t':
+ dst = append(dst, '\\', 't')
+ default:
+ dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
+ }
+ i++
+ start = i
+ }
+ if start < len(s) {
+ dst = append(dst, s[start:]...)
+ }
+ return dst
+}
diff --git a/vendor/github.com/rs/zerolog/internal/json/string.go b/vendor/github.com/rs/zerolog/internal/json/string.go
new file mode 100644
index 000000000..815906ff7
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/json/string.go
@@ -0,0 +1,121 @@
+package json
+
+import "unicode/utf8"
+
+const hex = "0123456789abcdef"
+
+var noEscapeTable = [256]bool{}
+
+func init() {
+ for i := 0; i <= 0x7e; i++ {
+ noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"'
+ }
+}
+
+// AppendStrings encodes the input strings to json and
+// appends the encoded string list to the input byte slice.
+func (e Encoder) AppendStrings(dst []byte, vals []string) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = e.AppendString(dst, vals[0])
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = e.AppendString(append(dst, ','), val)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendString encodes the input string to json and appends
+// the encoded string to the input byte slice.
+//
+// The operation loops though each byte in the string looking
+// for characters that need json or utf8 encoding. If the string
+// does not need encoding, then the string is appended in it's
+// entirety to the byte slice.
+// If we encounter a byte that does need encoding, switch up
+// the operation and perform a byte-by-byte read-encode-append.
+func (Encoder) AppendString(dst []byte, s string) []byte {
+ // Start with a double quote.
+ dst = append(dst, '"')
+ // Loop through each character in the string.
+ for i := 0; i < len(s); i++ {
+ // Check if the character needs encoding. Control characters, slashes,
+ // and the double quote need json encoding. Bytes above the ascii
+ // boundary needs utf8 encoding.
+ if !noEscapeTable[s[i]] {
+ // We encountered a character that needs to be encoded. Switch
+ // to complex version of the algorithm.
+ dst = appendStringComplex(dst, s, i)
+ return append(dst, '"')
+ }
+ }
+ // The string has no need for encoding an therefore is directly
+ // appended to the byte slice.
+ dst = append(dst, s...)
+ // End with a double quote
+ return append(dst, '"')
+}
+
+// appendStringComplex is used by appendString to take over an in
+// progress JSON string encoding that encountered a character that needs
+// to be encoded.
+func appendStringComplex(dst []byte, s string, i int) []byte {
+ start := 0
+ for i < len(s) {
+ b := s[i]
+ if b >= utf8.RuneSelf {
+ r, size := utf8.DecodeRuneInString(s[i:])
+ if r == utf8.RuneError && size == 1 {
+ // In case of error, first append previous simple characters to
+ // the byte slice if any and append a remplacement character code
+ // in place of the invalid sequence.
+ if start < i {
+ dst = append(dst, s[start:i]...)
+ }
+ dst = append(dst, `\ufffd`...)
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ continue
+ }
+ if noEscapeTable[b] {
+ i++
+ continue
+ }
+ // We encountered a character that needs to be encoded.
+ // Let's append the previous simple characters to the byte slice
+ // and switch our operation to read and encode the remainder
+ // characters byte-by-byte.
+ if start < i {
+ dst = append(dst, s[start:i]...)
+ }
+ switch b {
+ case '"', '\\':
+ dst = append(dst, '\\', b)
+ case '\b':
+ dst = append(dst, '\\', 'b')
+ case '\f':
+ dst = append(dst, '\\', 'f')
+ case '\n':
+ dst = append(dst, '\\', 'n')
+ case '\r':
+ dst = append(dst, '\\', 'r')
+ case '\t':
+ dst = append(dst, '\\', 't')
+ default:
+ dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
+ }
+ i++
+ start = i
+ }
+ if start < len(s) {
+ dst = append(dst, s[start:]...)
+ }
+ return dst
+}
diff --git a/vendor/github.com/rs/zerolog/internal/json/time.go b/vendor/github.com/rs/zerolog/internal/json/time.go
new file mode 100644
index 000000000..5aff6be33
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/json/time.go
@@ -0,0 +1,106 @@
+package json
+
+import (
+ "strconv"
+ "time"
+)
+
+const (
+ // Import from zerolog/global.go
+ timeFormatUnix = ""
+ timeFormatUnixMs = "UNIXMS"
+ timeFormatUnixMicro = "UNIXMICRO"
+)
+
+// AppendTime formats the input time with the given format
+// and appends the encoded string to the input byte slice.
+func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte {
+ switch format {
+ case timeFormatUnix:
+ return e.AppendInt64(dst, t.Unix())
+ case timeFormatUnixMs:
+ return e.AppendInt64(dst, t.UnixNano()/1000000)
+ case timeFormatUnixMicro:
+ return e.AppendInt64(dst, t.UnixNano()/1000)
+ }
+ return append(t.AppendFormat(append(dst, '"'), format), '"')
+}
+
+// AppendTimes converts the input times with the given format
+// and appends the encoded string list to the input byte slice.
+func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte {
+ switch format {
+ case timeFormatUnix:
+ return appendUnixTimes(dst, vals)
+ case timeFormatUnixMs:
+ return appendUnixMsTimes(dst, vals)
+ }
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"')
+ if len(vals) > 1 {
+ for _, t := range vals[1:] {
+ dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"')
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+func appendUnixTimes(dst []byte, vals []time.Time) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendInt(dst, vals[0].Unix(), 10)
+ if len(vals) > 1 {
+ for _, t := range vals[1:] {
+ dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+func appendUnixMsTimes(dst []byte, vals []time.Time) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendInt(dst, vals[0].UnixNano()/1000000, 10)
+ if len(vals) > 1 {
+ for _, t := range vals[1:] {
+ dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/1000000, 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendDuration formats the input duration with the given unit & format
+// and appends the encoded string to the input byte slice.
+func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
+ if useInt {
+ return strconv.AppendInt(dst, int64(d/unit), 10)
+ }
+ return e.AppendFloat64(dst, float64(d)/float64(unit))
+}
+
+// AppendDurations formats the input durations with the given unit & format
+// and appends the encoded string list to the input byte slice.
+func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = e.AppendDuration(dst, vals[0], unit, useInt)
+ if len(vals) > 1 {
+ for _, d := range vals[1:] {
+ dst = e.AppendDuration(append(dst, ','), d, unit, useInt)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
diff --git a/vendor/github.com/rs/zerolog/internal/json/types.go b/vendor/github.com/rs/zerolog/internal/json/types.go
new file mode 100644
index 000000000..924416c24
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/json/types.go
@@ -0,0 +1,406 @@
+package json
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "net"
+ "strconv"
+)
+
+// AppendNil inserts a 'Nil' object into the dst byte array.
+func (Encoder) AppendNil(dst []byte) []byte {
+ return append(dst, "null"...)
+}
+
+// AppendBeginMarker inserts a map start into the dst byte array.
+func (Encoder) AppendBeginMarker(dst []byte) []byte {
+ return append(dst, '{')
+}
+
+// AppendEndMarker inserts a map end into the dst byte array.
+func (Encoder) AppendEndMarker(dst []byte) []byte {
+ return append(dst, '}')
+}
+
+// AppendLineBreak appends a line break.
+func (Encoder) AppendLineBreak(dst []byte) []byte {
+ return append(dst, '\n')
+}
+
+// AppendArrayStart adds markers to indicate the start of an array.
+func (Encoder) AppendArrayStart(dst []byte) []byte {
+ return append(dst, '[')
+}
+
+// AppendArrayEnd adds markers to indicate the end of an array.
+func (Encoder) AppendArrayEnd(dst []byte) []byte {
+ return append(dst, ']')
+}
+
+// AppendArrayDelim adds markers to indicate end of a particular array element.
+func (Encoder) AppendArrayDelim(dst []byte) []byte {
+ if len(dst) > 0 {
+ return append(dst, ',')
+ }
+ return dst
+}
+
+// AppendBool converts the input bool to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendBool(dst []byte, val bool) []byte {
+ return strconv.AppendBool(dst, val)
+}
+
+// AppendBools encodes the input bools to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendBools(dst []byte, vals []bool) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendBool(dst, vals[0])
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendBool(append(dst, ','), val)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendInt converts the input int to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendInt(dst []byte, val int) []byte {
+ return strconv.AppendInt(dst, int64(val), 10)
+}
+
+// AppendInts encodes the input ints to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendInts(dst []byte, vals []int) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendInt(dst, int64(vals[0]), 10)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendInt8 converts the input []int8 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendInt8(dst []byte, val int8) []byte {
+ return strconv.AppendInt(dst, int64(val), 10)
+}
+
+// AppendInts8 encodes the input int8s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendInts8(dst []byte, vals []int8) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendInt(dst, int64(vals[0]), 10)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendInt16 converts the input int16 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendInt16(dst []byte, val int16) []byte {
+ return strconv.AppendInt(dst, int64(val), 10)
+}
+
+// AppendInts16 encodes the input int16s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendInts16(dst []byte, vals []int16) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendInt(dst, int64(vals[0]), 10)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendInt32 converts the input int32 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendInt32(dst []byte, val int32) []byte {
+ return strconv.AppendInt(dst, int64(val), 10)
+}
+
+// AppendInts32 encodes the input int32s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendInts32(dst []byte, vals []int32) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendInt(dst, int64(vals[0]), 10)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendInt64 converts the input int64 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendInt64(dst []byte, val int64) []byte {
+ return strconv.AppendInt(dst, val, 10)
+}
+
+// AppendInts64 encodes the input int64s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendInts64(dst []byte, vals []int64) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendInt(dst, vals[0], 10)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendInt(append(dst, ','), val, 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendUint converts the input uint to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendUint(dst []byte, val uint) []byte {
+ return strconv.AppendUint(dst, uint64(val), 10)
+}
+
+// AppendUints encodes the input uints to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendUints(dst []byte, vals []uint) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendUint8 converts the input uint8 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendUint8(dst []byte, val uint8) []byte {
+ return strconv.AppendUint(dst, uint64(val), 10)
+}
+
+// AppendUints8 encodes the input uint8s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendUint16 converts the input uint16 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendUint16(dst []byte, val uint16) []byte {
+ return strconv.AppendUint(dst, uint64(val), 10)
+}
+
+// AppendUints16 encodes the input uint16s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendUint32 converts the input uint32 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendUint32(dst []byte, val uint32) []byte {
+ return strconv.AppendUint(dst, uint64(val), 10)
+}
+
+// AppendUints32 encodes the input uint32s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendUint64 converts the input uint64 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendUint64(dst []byte, val uint64) []byte {
+ return strconv.AppendUint(dst, uint64(val), 10)
+}
+
+// AppendUints64 encodes the input uint64s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = strconv.AppendUint(dst, vals[0], 10)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = strconv.AppendUint(append(dst, ','), val, 10)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+func appendFloat(dst []byte, val float64, bitSize int) []byte {
+ // JSON does not permit NaN or Infinity. A typical JSON encoder would fail
+ // with an error, but a logging library wants the data to get thru so we
+ // make a tradeoff and store those types as string.
+ switch {
+ case math.IsNaN(val):
+ return append(dst, `"NaN"`...)
+ case math.IsInf(val, 1):
+ return append(dst, `"+Inf"`...)
+ case math.IsInf(val, -1):
+ return append(dst, `"-Inf"`...)
+ }
+ return strconv.AppendFloat(dst, val, 'f', -1, bitSize)
+}
+
+// AppendFloat32 converts the input float32 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
+ return appendFloat(dst, float64(val), 32)
+}
+
+// AppendFloats32 encodes the input float32s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = appendFloat(dst, float64(vals[0]), 32)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = appendFloat(append(dst, ','), float64(val), 32)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendFloat64 converts the input float64 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
+ return appendFloat(dst, val, 64)
+}
+
+// AppendFloats64 encodes the input float64s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
+ if len(vals) == 0 {
+ return append(dst, '[', ']')
+ }
+ dst = append(dst, '[')
+ dst = appendFloat(dst, vals[0], 64)
+ if len(vals) > 1 {
+ for _, val := range vals[1:] {
+ dst = appendFloat(append(dst, ','), val, 64)
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+}
+
+// AppendInterface marshals the input interface to a string and
+// appends the encoded string to the input byte slice.
+func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte {
+ marshaled, err := json.Marshal(i)
+ if err != nil {
+ return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err))
+ }
+ return append(dst, marshaled...)
+}
+
+// AppendObjectData takes in an object that is already in a byte array
+// and adds it to the dst.
+func (Encoder) AppendObjectData(dst []byte, o []byte) []byte {
+ // Three conditions apply here:
+ // 1. new content starts with '{' - which should be dropped OR
+ // 2. new content starts with '{' - which should be replaced with ','
+ // to separate with existing content OR
+ // 3. existing content has already other fields
+ if o[0] == '{' {
+ if len(dst) > 1 {
+ dst = append(dst, ',')
+ }
+ o = o[1:]
+ } else if len(dst) > 1 {
+ dst = append(dst, ',')
+ }
+ return append(dst, o...)
+}
+
+// AppendIPAddr adds IPv4 or IPv6 address to dst.
+func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte {
+ return e.AppendString(dst, ip.String())
+}
+
+// AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst.
+func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte {
+ return e.AppendString(dst, pfx.String())
+
+}
+
+// AppendMACAddr adds MAC address to dst.
+func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte {
+ return e.AppendString(dst, ha.String())
+}
diff --git a/vendor/github.com/rs/zerolog/log.go b/vendor/github.com/rs/zerolog/log.go
new file mode 100644
index 000000000..6bc6a802f
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/log.go
@@ -0,0 +1,448 @@
+// Package zerolog provides a lightweight logging library dedicated to JSON logging.
+//
+// A global Logger can be use for simple logging:
+//
+// import "github.com/rs/zerolog/log"
+//
+// log.Info().Msg("hello world")
+// // Output: {"time":1494567715,"level":"info","message":"hello world"}
+//
+// NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log".
+//
+// Fields can be added to log messages:
+//
+// log.Info().Str("foo", "bar").Msg("hello world")
+// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"}
+//
+// Create logger instance to manage different outputs:
+//
+// logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
+// logger.Info().
+// Str("foo", "bar").
+// Msg("hello world")
+// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"}
+//
+// Sub-loggers let you chain loggers with additional context:
+//
+// sublogger := log.With().Str("component": "foo").Logger()
+// sublogger.Info().Msg("hello world")
+// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"}
+//
+// Level logging
+//
+// zerolog.SetGlobalLevel(zerolog.InfoLevel)
+//
+// log.Debug().Msg("filtered out message")
+// log.Info().Msg("routed message")
+//
+// if e := log.Debug(); e.Enabled() {
+// // Compute log output only if enabled.
+// value := compute()
+// e.Str("foo": value).Msg("some debug message")
+// }
+// // Output: {"level":"info","time":1494567715,"routed message"}
+//
+// Customize automatic field names:
+//
+// log.TimestampFieldName = "t"
+// log.LevelFieldName = "p"
+// log.MessageFieldName = "m"
+//
+// log.Info().Msg("hello world")
+// // Output: {"t":1494567715,"p":"info","m":"hello world"}
+//
+// Log with no level and message:
+//
+// log.Log().Str("foo","bar").Msg("")
+// // Output: {"time":1494567715,"foo":"bar"}
+//
+// Add contextual fields to global Logger:
+//
+// log.Logger = log.With().Str("foo", "bar").Logger()
+//
+// Sample logs:
+//
+// sampled := log.Sample(&zerolog.BasicSampler{N: 10})
+// sampled.Info().Msg("will be logged every 10 messages")
+//
+// Log with contextual hooks:
+//
+// // Create the hook:
+// type SeverityHook struct{}
+//
+// func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
+// if level != zerolog.NoLevel {
+// e.Str("severity", level.String())
+// }
+// }
+//
+// // And use it:
+// var h SeverityHook
+// log := zerolog.New(os.Stdout).Hook(h)
+// log.Warn().Msg("")
+// // Output: {"level":"warn","severity":"warn"}
+//
+//
+// Caveats
+//
+// There is no fields deduplication out-of-the-box.
+// Using the same key multiple times creates new key in final JSON each time.
+//
+// logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
+// logger.Info().
+// Timestamp().
+// Msg("dup")
+// // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"}
+//
+// In this case, many consumers will take the last value,
+// but this is not guaranteed; check yours if in doubt.
+package zerolog
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strconv"
+)
+
+// Level defines log levels.
+type Level int8
+
+const (
+ // DebugLevel defines debug log level.
+ DebugLevel Level = iota
+ // InfoLevel defines info log level.
+ InfoLevel
+ // WarnLevel defines warn log level.
+ WarnLevel
+ // ErrorLevel defines error log level.
+ ErrorLevel
+ // FatalLevel defines fatal log level.
+ FatalLevel
+ // PanicLevel defines panic log level.
+ PanicLevel
+ // NoLevel defines an absent log level.
+ NoLevel
+ // Disabled disables the logger.
+ Disabled
+
+ // TraceLevel defines trace log level.
+ TraceLevel Level = -1
+)
+
+func (l Level) String() string {
+ switch l {
+ case TraceLevel:
+ return LevelTraceValue
+ case DebugLevel:
+ return LevelDebugValue
+ case InfoLevel:
+ return LevelInfoValue
+ case WarnLevel:
+ return LevelWarnValue
+ case ErrorLevel:
+ return LevelErrorValue
+ case FatalLevel:
+ return LevelFatalValue
+ case PanicLevel:
+ return LevelPanicValue
+ case Disabled:
+ return "disabled"
+ case NoLevel:
+ return ""
+ }
+ return ""
+}
+
+// ParseLevel converts a level string into a zerolog Level value.
+// returns an error if the input string does not match known values.
+func ParseLevel(levelStr string) (Level, error) {
+ switch levelStr {
+ case LevelFieldMarshalFunc(TraceLevel):
+ return TraceLevel, nil
+ case LevelFieldMarshalFunc(DebugLevel):
+ return DebugLevel, nil
+ case LevelFieldMarshalFunc(InfoLevel):
+ return InfoLevel, nil
+ case LevelFieldMarshalFunc(WarnLevel):
+ return WarnLevel, nil
+ case LevelFieldMarshalFunc(ErrorLevel):
+ return ErrorLevel, nil
+ case LevelFieldMarshalFunc(FatalLevel):
+ return FatalLevel, nil
+ case LevelFieldMarshalFunc(PanicLevel):
+ return PanicLevel, nil
+ case LevelFieldMarshalFunc(Disabled):
+ return Disabled, nil
+ case LevelFieldMarshalFunc(NoLevel):
+ return NoLevel, nil
+ }
+ return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr)
+}
+
+// A Logger represents an active logging object that generates lines
+// of JSON output to an io.Writer. Each logging operation makes a single
+// call to the Writer's Write method. There is no guarantee on access
+// serialization to the Writer. If your Writer is not thread safe,
+// you may consider a sync wrapper.
+type Logger struct {
+ w LevelWriter
+ level Level
+ sampler Sampler
+ context []byte
+ hooks []Hook
+ stack bool
+}
+
+// New creates a root logger with given output writer. If the output writer implements
+// the LevelWriter interface, the WriteLevel method will be called instead of the Write
+// one.
+//
+// Each logging operation makes a single call to the Writer's Write method. There is no
+// guarantee on access serialization to the Writer. If your Writer is not thread safe,
+// you may consider using sync wrapper.
+func New(w io.Writer) Logger {
+ if w == nil {
+ w = ioutil.Discard
+ }
+ lw, ok := w.(LevelWriter)
+ if !ok {
+ lw = levelWriterAdapter{w}
+ }
+ return Logger{w: lw, level: TraceLevel}
+}
+
+// Nop returns a disabled logger for which all operation are no-op.
+func Nop() Logger {
+ return New(nil).Level(Disabled)
+}
+
+// Output duplicates the current logger and sets w as its output.
+func (l Logger) Output(w io.Writer) Logger {
+ l2 := New(w)
+ l2.level = l.level
+ l2.sampler = l.sampler
+ if len(l.hooks) > 0 {
+ l2.hooks = append(l2.hooks, l.hooks...)
+ }
+ if l.context != nil {
+ l2.context = make([]byte, len(l.context), cap(l.context))
+ copy(l2.context, l.context)
+ }
+ return l2
+}
+
+// With creates a child logger with the field added to its context.
+func (l Logger) With() Context {
+ context := l.context
+ l.context = make([]byte, 0, 500)
+ if context != nil {
+ l.context = append(l.context, context...)
+ } else {
+ // This is needed for AppendKey to not check len of input
+ // thus making it inlinable
+ l.context = enc.AppendBeginMarker(l.context)
+ }
+ return Context{l}
+}
+
+// UpdateContext updates the internal logger's context.
+//
+// Use this method with caution. If unsure, prefer the With method.
+func (l *Logger) UpdateContext(update func(c Context) Context) {
+ if l == disabledLogger {
+ return
+ }
+ if cap(l.context) == 0 {
+ l.context = make([]byte, 0, 500)
+ }
+ if len(l.context) == 0 {
+ l.context = enc.AppendBeginMarker(l.context)
+ }
+ c := update(Context{*l})
+ l.context = c.l.context
+}
+
+// Level creates a child logger with the minimum accepted level set to level.
+func (l Logger) Level(lvl Level) Logger {
+ l.level = lvl
+ return l
+}
+
+// GetLevel returns the current Level of l.
+func (l Logger) GetLevel() Level {
+ return l.level
+}
+
+// Sample returns a logger with the s sampler.
+func (l Logger) Sample(s Sampler) Logger {
+ l.sampler = s
+ return l
+}
+
+// Hook returns a logger with the h Hook.
+func (l Logger) Hook(h Hook) Logger {
+ l.hooks = append(l.hooks, h)
+ return l
+}
+
+// Trace starts a new message with trace level.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Trace() *Event {
+ return l.newEvent(TraceLevel, nil)
+}
+
+// Debug starts a new message with debug level.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Debug() *Event {
+ return l.newEvent(DebugLevel, nil)
+}
+
+// Info starts a new message with info level.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Info() *Event {
+ return l.newEvent(InfoLevel, nil)
+}
+
+// Warn starts a new message with warn level.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Warn() *Event {
+ return l.newEvent(WarnLevel, nil)
+}
+
+// Error starts a new message with error level.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Error() *Event {
+ return l.newEvent(ErrorLevel, nil)
+}
+
+// Err starts a new message with error level with err as a field if not nil or
+// with info level if err is nil.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Err(err error) *Event {
+ if err != nil {
+ return l.Error().Err(err)
+ }
+
+ return l.Info()
+}
+
+// Fatal starts a new message with fatal level. The os.Exit(1) function
+// is called by the Msg method, which terminates the program immediately.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Fatal() *Event {
+ return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) })
+}
+
+// Panic starts a new message with panic level. The panic() function
+// is called by the Msg method, which stops the ordinary flow of a goroutine.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Panic() *Event {
+ return l.newEvent(PanicLevel, func(msg string) { panic(msg) })
+}
+
+// WithLevel starts a new message with level. Unlike Fatal and Panic
+// methods, WithLevel does not terminate the program or stop the ordinary
+// flow of a gourotine when used with their respective levels.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) WithLevel(level Level) *Event {
+ switch level {
+ case TraceLevel:
+ return l.Trace()
+ case DebugLevel:
+ return l.Debug()
+ case InfoLevel:
+ return l.Info()
+ case WarnLevel:
+ return l.Warn()
+ case ErrorLevel:
+ return l.Error()
+ case FatalLevel:
+ return l.newEvent(FatalLevel, nil)
+ case PanicLevel:
+ return l.newEvent(PanicLevel, nil)
+ case NoLevel:
+ return l.Log()
+ case Disabled:
+ return nil
+ default:
+ panic("zerolog: WithLevel(): invalid level: " + strconv.Itoa(int(level)))
+ }
+}
+
+// Log starts a new message with no level. Setting GlobalLevel to Disabled
+// will still disable events produced by this method.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Log() *Event {
+ return l.newEvent(NoLevel, nil)
+}
+
+// Print sends a log event using debug level and no extra field.
+// Arguments are handled in the manner of fmt.Print.
+func (l *Logger) Print(v ...interface{}) {
+ if e := l.Debug(); e.Enabled() {
+ e.CallerSkipFrame(1).Msg(fmt.Sprint(v...))
+ }
+}
+
+// Printf sends a log event using debug level and no extra field.
+// Arguments are handled in the manner of fmt.Printf.
+func (l *Logger) Printf(format string, v ...interface{}) {
+ if e := l.Debug(); e.Enabled() {
+ e.CallerSkipFrame(1).Msg(fmt.Sprintf(format, v...))
+ }
+}
+
+// Write implements the io.Writer interface. This is useful to set as a writer
+// for the standard library log.
+func (l Logger) Write(p []byte) (n int, err error) {
+ n = len(p)
+ if n > 0 && p[n-1] == '\n' {
+ // Trim CR added by stdlog.
+ p = p[0 : n-1]
+ }
+ l.Log().CallerSkipFrame(1).Msg(string(p))
+ return
+}
+
+func (l *Logger) newEvent(level Level, done func(string)) *Event {
+ enabled := l.should(level)
+ if !enabled {
+ return nil
+ }
+ e := newEvent(l.w, level)
+ e.done = done
+ e.ch = l.hooks
+ if level != NoLevel && LevelFieldName != "" {
+ e.Str(LevelFieldName, LevelFieldMarshalFunc(level))
+ }
+ if l.context != nil && len(l.context) > 1 {
+ e.buf = enc.AppendObjectData(e.buf, l.context)
+ }
+ if l.stack {
+ e.Stack()
+ }
+ return e
+}
+
+// should returns true if the log event should be logged.
+func (l *Logger) should(lvl Level) bool {
+ if lvl < l.level || lvl < GlobalLevel() {
+ return false
+ }
+ if l.sampler != nil && !samplingDisabled() {
+ return l.sampler.Sample(lvl)
+ }
+ return true
+}
diff --git a/vendor/github.com/rs/zerolog/not_go112.go b/vendor/github.com/rs/zerolog/not_go112.go
new file mode 100644
index 000000000..4c43c9e76
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/not_go112.go
@@ -0,0 +1,5 @@
+// +build !go1.12
+
+package zerolog
+
+const contextCallerSkipFrameCount = 3
diff --git a/vendor/github.com/rs/zerolog/pkgerrors/stacktrace.go b/vendor/github.com/rs/zerolog/pkgerrors/stacktrace.go
new file mode 100644
index 000000000..01420e64a
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/pkgerrors/stacktrace.go
@@ -0,0 +1,65 @@
+package pkgerrors
+
+import (
+ "github.com/pkg/errors"
+)
+
+var (
+ StackSourceFileName = "source"
+ StackSourceLineName = "line"
+ StackSourceFunctionName = "func"
+)
+
+type state struct {
+ b []byte
+}
+
+// Write implement fmt.Formatter interface.
+func (s *state) Write(b []byte) (n int, err error) {
+ s.b = b
+ return len(b), nil
+}
+
+// Width implement fmt.Formatter interface.
+func (s *state) Width() (wid int, ok bool) {
+ return 0, false
+}
+
+// Precision implement fmt.Formatter interface.
+func (s *state) Precision() (prec int, ok bool) {
+ return 0, false
+}
+
+// Flag implement fmt.Formatter interface.
+func (s *state) Flag(c int) bool {
+ return false
+}
+
+func frameField(f errors.Frame, s *state, c rune) string {
+ f.Format(s, c)
+ return string(s.b)
+}
+
+// MarshalStack implements pkg/errors stack trace marshaling.
+//
+// zerolog.ErrorStackMarshaler = MarshalStack
+func MarshalStack(err error) interface{} {
+ type stackTracer interface {
+ StackTrace() errors.StackTrace
+ }
+ sterr, ok := err.(stackTracer)
+ if !ok {
+ return nil
+ }
+ st := sterr.StackTrace()
+ s := &state{}
+ out := make([]map[string]string, 0, len(st))
+ for _, frame := range st {
+ out = append(out, map[string]string{
+ StackSourceFileName: frameField(frame, s, 's'),
+ StackSourceLineName: frameField(frame, s, 'd'),
+ StackSourceFunctionName: frameField(frame, s, 'n'),
+ })
+ }
+ return out
+}
diff --git a/vendor/github.com/rs/zerolog/pretty.png b/vendor/github.com/rs/zerolog/pretty.png
new file mode 100644
index 000000000..242033686
Binary files /dev/null and b/vendor/github.com/rs/zerolog/pretty.png differ
diff --git a/vendor/github.com/rs/zerolog/sampler.go b/vendor/github.com/rs/zerolog/sampler.go
new file mode 100644
index 000000000..a99629eb0
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/sampler.go
@@ -0,0 +1,134 @@
+package zerolog
+
+import (
+ "math/rand"
+ "sync/atomic"
+ "time"
+)
+
+var (
+ // Often samples log every ~ 10 events.
+ Often = RandomSampler(10)
+ // Sometimes samples log every ~ 100 events.
+ Sometimes = RandomSampler(100)
+ // Rarely samples log every ~ 1000 events.
+ Rarely = RandomSampler(1000)
+)
+
+// Sampler defines an interface to a log sampler.
+type Sampler interface {
+ // Sample returns true if the event should be part of the sample, false if
+ // the event should be dropped.
+ Sample(lvl Level) bool
+}
+
+// RandomSampler use a PRNG to randomly sample an event out of N events,
+// regardless of their level.
+type RandomSampler uint32
+
+// Sample implements the Sampler interface.
+func (s RandomSampler) Sample(lvl Level) bool {
+ if s <= 0 {
+ return false
+ }
+ if rand.Intn(int(s)) != 0 {
+ return false
+ }
+ return true
+}
+
+// BasicSampler is a sampler that will send every Nth events, regardless of
+// there level.
+type BasicSampler struct {
+ N uint32
+ counter uint32
+}
+
+// Sample implements the Sampler interface.
+func (s *BasicSampler) Sample(lvl Level) bool {
+ n := s.N
+ if n == 1 {
+ return true
+ }
+ c := atomic.AddUint32(&s.counter, 1)
+ return c%n == 1
+}
+
+// BurstSampler lets Burst events pass per Period then pass the decision to
+// NextSampler. If Sampler is not set, all subsequent events are rejected.
+type BurstSampler struct {
+ // Burst is the maximum number of event per period allowed before calling
+ // NextSampler.
+ Burst uint32
+ // Period defines the burst period. If 0, NextSampler is always called.
+ Period time.Duration
+ // NextSampler is the sampler used after the burst is reached. If nil,
+ // events are always rejected after the burst.
+ NextSampler Sampler
+
+ counter uint32
+ resetAt int64
+}
+
+// Sample implements the Sampler interface.
+func (s *BurstSampler) Sample(lvl Level) bool {
+ if s.Burst > 0 && s.Period > 0 {
+ if s.inc() <= s.Burst {
+ return true
+ }
+ }
+ if s.NextSampler == nil {
+ return false
+ }
+ return s.NextSampler.Sample(lvl)
+}
+
+func (s *BurstSampler) inc() uint32 {
+ now := time.Now().UnixNano()
+ resetAt := atomic.LoadInt64(&s.resetAt)
+ var c uint32
+ if now > resetAt {
+ c = 1
+ atomic.StoreUint32(&s.counter, c)
+ newResetAt := now + s.Period.Nanoseconds()
+ reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt)
+ if !reset {
+ // Lost the race with another goroutine trying to reset.
+ c = atomic.AddUint32(&s.counter, 1)
+ }
+ } else {
+ c = atomic.AddUint32(&s.counter, 1)
+ }
+ return c
+}
+
+// LevelSampler applies a different sampler for each level.
+type LevelSampler struct {
+ TraceSampler, DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler
+}
+
+func (s LevelSampler) Sample(lvl Level) bool {
+ switch lvl {
+ case TraceLevel:
+ if s.TraceSampler != nil {
+ return s.TraceSampler.Sample(lvl)
+ }
+ case DebugLevel:
+ if s.DebugSampler != nil {
+ return s.DebugSampler.Sample(lvl)
+ }
+ case InfoLevel:
+ if s.InfoSampler != nil {
+ return s.InfoSampler.Sample(lvl)
+ }
+ case WarnLevel:
+ if s.WarnSampler != nil {
+ return s.WarnSampler.Sample(lvl)
+ }
+ case ErrorLevel:
+ if s.ErrorSampler != nil {
+ return s.ErrorSampler.Sample(lvl)
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/rs/zerolog/syslog.go b/vendor/github.com/rs/zerolog/syslog.go
new file mode 100644
index 000000000..c40828307
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/syslog.go
@@ -0,0 +1,80 @@
+// +build !windows
+// +build !binary_log
+
+package zerolog
+
+import (
+ "io"
+)
+
+// See http://cee.mitre.org/language/1.0-beta1/clt.html#syslog
+// or https://www.rsyslog.com/json-elasticsearch/
+const ceePrefix = "@cee:"
+
+// SyslogWriter is an interface matching a syslog.Writer struct.
+type SyslogWriter interface {
+ io.Writer
+ Debug(m string) error
+ Info(m string) error
+ Warning(m string) error
+ Err(m string) error
+ Emerg(m string) error
+ Crit(m string) error
+}
+
+type syslogWriter struct {
+ w SyslogWriter
+ prefix string
+}
+
+// SyslogLevelWriter wraps a SyslogWriter and call the right syslog level
+// method matching the zerolog level.
+func SyslogLevelWriter(w SyslogWriter) LevelWriter {
+ return syslogWriter{w, ""}
+}
+
+// SyslogCEEWriter wraps a SyslogWriter with a SyslogLevelWriter that adds a
+// MITRE CEE prefix for JSON syslog entries, compatible with rsyslog
+// and syslog-ng JSON logging support.
+// See https://www.rsyslog.com/json-elasticsearch/
+func SyslogCEEWriter(w SyslogWriter) LevelWriter {
+ return syslogWriter{w, ceePrefix}
+}
+
+func (sw syslogWriter) Write(p []byte) (n int, err error) {
+ var pn int
+ if sw.prefix != "" {
+ pn, err = sw.w.Write([]byte(sw.prefix))
+ if err != nil {
+ return pn, err
+ }
+ }
+ n, err = sw.w.Write(p)
+ return pn + n, err
+}
+
+// WriteLevel implements LevelWriter interface.
+func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) {
+ switch level {
+ case TraceLevel:
+ case DebugLevel:
+ err = sw.w.Debug(sw.prefix + string(p))
+ case InfoLevel:
+ err = sw.w.Info(sw.prefix + string(p))
+ case WarnLevel:
+ err = sw.w.Warning(sw.prefix + string(p))
+ case ErrorLevel:
+ err = sw.w.Err(sw.prefix + string(p))
+ case FatalLevel:
+ err = sw.w.Emerg(sw.prefix + string(p))
+ case PanicLevel:
+ err = sw.w.Crit(sw.prefix + string(p))
+ case NoLevel:
+ err = sw.w.Info(sw.prefix + string(p))
+ default:
+ panic("invalid level")
+ }
+ // Any CEE prefix is not part of the message, so we don't include its length
+ n = len(p)
+ return
+}
diff --git a/vendor/github.com/rs/zerolog/writer.go b/vendor/github.com/rs/zerolog/writer.go
new file mode 100644
index 000000000..98c932ea5
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/writer.go
@@ -0,0 +1,98 @@
+package zerolog
+
+import (
+ "io"
+ "sync"
+)
+
+// LevelWriter defines as interface a writer may implement in order
+// to receive level information with payload.
+type LevelWriter interface {
+ io.Writer
+ WriteLevel(level Level, p []byte) (n int, err error)
+}
+
+type levelWriterAdapter struct {
+ io.Writer
+}
+
+func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) {
+ return lw.Write(p)
+}
+
+type syncWriter struct {
+ mu sync.Mutex
+ lw LevelWriter
+}
+
+// SyncWriter wraps w so that each call to Write is synchronized with a mutex.
+// This syncer can be used to wrap the call to writer's Write method if it is
+// not thread safe. Note that you do not need this wrapper for os.File Write
+// operations on POSIX and Windows systems as they are already thread-safe.
+func SyncWriter(w io.Writer) io.Writer {
+ if lw, ok := w.(LevelWriter); ok {
+ return &syncWriter{lw: lw}
+ }
+ return &syncWriter{lw: levelWriterAdapter{w}}
+}
+
+// Write implements the io.Writer interface.
+func (s *syncWriter) Write(p []byte) (n int, err error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.lw.Write(p)
+}
+
+// WriteLevel implements the LevelWriter interface.
+func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.lw.WriteLevel(l, p)
+}
+
+type multiLevelWriter struct {
+ writers []LevelWriter
+}
+
+func (t multiLevelWriter) Write(p []byte) (n int, err error) {
+ for _, w := range t.writers {
+ if _n, _err := w.Write(p); err == nil {
+ n = _n
+ if _err != nil {
+ err = _err
+ } else if _n != len(p) {
+ err = io.ErrShortWrite
+ }
+ }
+ }
+ return n, err
+}
+
+func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) {
+ for _, w := range t.writers {
+ if _n, _err := w.WriteLevel(l, p); err == nil {
+ n = _n
+ if _err != nil {
+ err = _err
+ } else if _n != len(p) {
+ err = io.ErrShortWrite
+ }
+ }
+ }
+ return n, err
+}
+
+// MultiLevelWriter creates a writer that duplicates its writes to all the
+// provided writers, similar to the Unix tee(1) command. If some writers
+// implement LevelWriter, their WriteLevel method will be used instead of Write.
+func MultiLevelWriter(writers ...io.Writer) LevelWriter {
+ lwriters := make([]LevelWriter, 0, len(writers))
+ for _, w := range writers {
+ if lw, ok := w.(LevelWriter); ok {
+ lwriters = append(lwriters, lw)
+ } else {
+ lwriters = append(lwriters, levelWriterAdapter{w})
+ }
+ }
+ return multiLevelWriter{lwriters}
+}
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
new file mode 100644
index 000000000..c7b459e4d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -0,0 +1,39 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
+# swap
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+# session
+Session.vim
+# temporary
+.netrwhist
+*~
+# auto-generated tag files
+tags
+
+*.exe
+cobra.test
+bin
+
+.idea/
+*.iml
diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml
new file mode 100644
index 000000000..0d6e61793
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.golangci.yml
@@ -0,0 +1,48 @@
+run:
+ deadline: 5m
+
+linters:
+ disable-all: true
+ enable:
+ #- bodyclose
+ - deadcode
+ #- depguard
+ #- dogsled
+ #- dupl
+ - errcheck
+ #- exhaustive
+ #- funlen
+ - gas
+ #- gochecknoinits
+ - goconst
+ #- gocritic
+ #- gocyclo
+ #- gofmt
+ - goimports
+ - golint
+ #- gomnd
+ #- goprintffuncname
+ #- gosec
+ #- gosimple
+ - govet
+ - ineffassign
+ - interfacer
+ #- lll
+ - maligned
+ - megacheck
+ #- misspell
+ #- nakedret
+ #- noctx
+ #- nolintlint
+ #- rowserrcheck
+ #- scopelint
+ #- staticcheck
+ - structcheck
+ #- stylecheck
+ #- typecheck
+ - unconvert
+ #- unparam
+ #- unused
+ - varcheck
+ #- whitespace
+ fast: false
diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap
new file mode 100644
index 000000000..94ec53068
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.mailmap
@@ -0,0 +1,3 @@
+Steve Francia
+Bjørn Erik Pedersen
+Fabiano Franz
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
new file mode 100644
index 000000000..e0a3b5004
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -0,0 +1,28 @@
+language: go
+
+stages:
+ - test
+ - build
+
+go:
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+env: GO111MODULE=on
+
+before_install:
+ - go get -u github.com/kyoh86/richgo
+ - go get -u github.com/mitchellh/gox
+ - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest
+
+matrix:
+ allow_failures:
+ - go: tip
+ include:
+ - stage: build
+ go: 1.13.x
+ script: make cobra_generator
+
+script:
+ - make test
diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md
new file mode 100644
index 000000000..8a23b4f85
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CHANGELOG.md
@@ -0,0 +1,51 @@
+# Cobra Changelog
+
+## v1.1.3
+
+* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility
+
+## v1.1.2
+
+### Notable Changes
+
+* Bump license year to 2021 in golden files (#1309) @Bowbaq
+* Enhance PowerShell completion with custom comp (#1208) @Luap99
+* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670
+* Documentation readability improvements (#1228 etc.) @zaataylor etc.
+* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor
+
+## v1.1.1
+
+* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context.
+* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context.
+
+## v1.1.0
+
+### Notable Changes
+
+* Extend Go completions and revamp zsh comp (#1070)
+* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb
+* Add completion for help command (#1136)
+* Complete subcommands when TraverseChildren is set (#1171)
+* Fix stderr printing functions (#894)
+* fix: fish output redirection (#1247)
+
+## v1.0.0
+
+Announcing v1.0.0 of Cobra. đ
+
+### Notable Changes
+* Fish completion (including support for Go custom completion) @marckhouzam
+* API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam
+* Remove/replace SetOutput on Command - deprecated @jpmcb
+* add support for autolabel stale PR @xchapter7x
+* Add Labeler Actions @xchapter7x
+* Custom completions coded in Go (instead of Bash) @marckhouzam
+* Partial Revert of #922 @jharshman
+* Add Makefile to project @jharshman
+* Correct documentation for InOrStdin @desponda
+* Apply formatting to templates @jharshman
+* Revert change so help is printed on stdout again @marckhouzam
+* Update md2man to v2.0.0 @pdf
+* update viper to v1.4.0 @umarcor
+* Update cmd/root.go example in README.md @jharshman
diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md
new file mode 100644
index 000000000..9d16f88fd
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONDUCT.md
@@ -0,0 +1,37 @@
+## Cobra User Contract
+
+### Versioning
+Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release.
+
+### Backward Compatibility
+We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released.
+
+### Deprecation
+Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github.
+
+### CVE
+Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one.
+
+### Communication
+Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors.
+
+### Breaking Changes
+Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra.
+
+There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version.
+
+Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release.
+
+Examples of breaking changes include:
+- Removing or renaming exported constant, variable, type, or function.
+- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc...
+ - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing.
+
+There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging.
+
+### CI Testing
+Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang.
+
+### Disclaimer
+Changes to this document and the contents therein are at the discretion of the maintainers.
+None of the contents of this document are legally binding in any way to the maintainers or the users.
diff --git a/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/vendor/github.com/spf13/cobra/CONTRIBUTING.md
new file mode 100644
index 000000000..6f356e6a8
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONTRIBUTING.md
@@ -0,0 +1,50 @@
+# Contributing to Cobra
+
+Thank you so much for contributing to Cobra. We appreciate your time and help.
+Here are some guidelines to help you get started.
+
+## Code of Conduct
+
+Be kind and respectful to the members of the community. Take time to educate
+others who are seeking help. Harassment of any kind will not be tolerated.
+
+## Questions
+
+If you have questions regarding Cobra, feel free to ask it in the community
+[#cobra Slack channel][cobra-slack]
+
+## Filing a bug or feature
+
+1. Before filing an issue, please check the existing issues to see if a
+ similar one was already opened. If there is one already opened, feel free
+ to comment on it.
+1. If you believe you've found a bug, please provide detailed steps of
+ reproduction, the version of Cobra and anything else you believe will be
+ useful to help troubleshoot it (e.g. OS environment, environment variables,
+ etc...). Also state the current behavior vs. the expected behavior.
+1. If you'd like to see a feature or an enhancement please open an issue with
+ a clear title and description of what the feature is and why it would be
+ beneficial to the project and its users.
+
+## Submitting changes
+
+1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to
+ sign a CLA. Please sign the CLA :slightly_smiling_face:
+1. Tests: If you are submitting code, please ensure you have adequate tests
+ for the feature. Tests can be run via `go test ./...` or `make test`.
+1. Since this is golang project, ensure the new code is properly formatted to
+ ensure code consistency. Run `make all`.
+
+### Quick steps to contribute
+
+1. Fork the project.
+1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+1. Create your feature branch (`git checkout -b my-new-feature`)
+1. Make changes and run tests (`make test`)
+1. Add them to staging (`git add .`)
+1. Commit your changes (`git commit -m 'Add some feature'`)
+1. Push to the branch (`git push origin my-new-feature`)
+1. Create new pull request
+
+
+[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 000000000..298f0e266
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
new file mode 100644
index 000000000..472c73bf1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -0,0 +1,40 @@
+BIN="./bin"
+SRC=$(shell find . -name "*.go")
+
+ifeq (, $(shell which golangci-lint))
+$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh")
+endif
+
+ifeq (, $(shell which richgo))
+$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo")
+endif
+
+.PHONY: fmt lint test cobra_generator install_deps clean
+
+default: all
+
+all: fmt test cobra_generator
+
+fmt:
+ $(info ******************** checking formatting ********************)
+ @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
+
+lint:
+ $(info ******************** running lint tools ********************)
+ golangci-lint run -v
+
+test: install_deps lint
+ $(info ******************** running tests ********************)
+ richgo test -v ./...
+
+cobra_generator: install_deps
+ $(info ******************** building generator ********************)
+ mkdir -p $(BIN)
+ make -C cobra all
+
+install_deps:
+ $(info ******************** downloading dependencies ********************)
+ go get -v ./...
+
+clean:
+ rm -rf $(BIN)
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 000000000..a1b13ddda
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,760 @@
+
+
+Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
+
+Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/),
+[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to
+name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra.
+
+[](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
+[](https://travis-ci.org/spf13/cobra)
+[](https://godoc.org/github.com/spf13/cobra)
+[](https://goreportcard.com/report/github.com/spf13/cobra)
+[](https://gophers.slack.com/archives/CD3LP1199)
+
+# Table of Contents
+
+- [Overview](#overview)
+- [Concepts](#concepts)
+ * [Commands](#commands)
+ * [Flags](#flags)
+- [Installing](#installing)
+- [Getting Started](#getting-started)
+ * [Using the Cobra Generator](#using-the-cobra-generator)
+ * [Using the Cobra Library](#using-the-cobra-library)
+ * [Working with Flags](#working-with-flags)
+ * [Positional and Custom Arguments](#positional-and-custom-arguments)
+ * [Example](#example)
+ * [Help Command](#help-command)
+ * [Usage Message](#usage-message)
+ * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks)
+ * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
+ * [Generating documentation for your command](#generating-documentation-for-your-command)
+ * [Generating shell completions](#generating-shell-completions)
+- [Contributing](CONTRIBUTING.md)
+- [License](#license)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra is also an application that will generate your application scaffolding to rapidly
+develop a Cobra-based application.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell)
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications read like sentences when used, and as a result, users
+intuitively know how to interact with them.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE.`
+ or
+`APPNAME COMMAND ARG --FLAG`
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+ hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+ git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library. This command will install the `cobra` generator executable
+along with the library and its dependencies:
+
+ go get -u github.com/spf13/cobra
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Getting Started
+
+While you are welcome to provide your own organization, typically a Cobra-based
+application will follow the following organizational structure:
+
+```
+ âž appName/
+ âž cmd/
+ add.go
+ your.go
+ commands.go
+ here.go
+ main.go
+```
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
+
+```go
+package main
+
+import (
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+## Using the Cobra Generator
+
+Cobra provides its own program that will create your application and add any
+commands you want. It's the easiest way to incorporate Cobra into your application.
+
+[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
+
+## Using the Cobra Library
+
+To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
+You will optionally provide additional commands as you see fit.
+
+### Create rootCmd
+
+Cobra doesn't require any special constructors. Simply create your commands.
+
+Ideally you place this in app/cmd/root.go:
+
+```go
+var rootCmd = &cobra.Command{
+ Use: "hugo",
+ Short: "Hugo is a very fast static site generator",
+ Long: `A Fast and Flexible Static Site Generator built with
+ love by spf13 and friends in Go.
+ Complete documentation is available at http://hugo.spf13.com`,
+ Run: func(cmd *cobra.Command, args []string) {
+ // Do Stuff Here
+ },
+}
+
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+```
+
+You will additionally define flags and handle configuration in your init() function.
+
+For example cmd/root.go:
+
+```go
+package cmd
+
+import (
+ "fmt"
+ "os"
+
+ homedir "github.com/mitchellh/go-homedir"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+var (
+ // Used for flags.
+ cfgFile string
+ userLicense string
+
+ rootCmd = &cobra.Command{
+ Use: "cobra",
+ Short: "A generator for Cobra based Applications",
+ Long: `Cobra is a CLI library for Go that empowers applications.
+This application is a tool to generate the needed files
+to quickly create a Cobra application.`,
+ }
+)
+
+// Execute executes the root command.
+func Execute() error {
+ return rootCmd.Execute()
+}
+
+func init() {
+ cobra.OnInitialize(initConfig)
+
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+ rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
+ rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
+ rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+ viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+ viper.SetDefault("author", "NAME HERE ")
+ viper.SetDefault("license", "apache")
+
+ rootCmd.AddCommand(addCmd)
+ rootCmd.AddCommand(initCmd)
+}
+
+func initConfig() {
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := homedir.Dir()
+ cobra.CheckErr(err)
+
+ // Search config in home directory with name ".cobra" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigName(".cobra")
+ }
+
+ viper.AutomaticEnv()
+
+ if err := viper.ReadInConfig(); err == nil {
+ fmt.Println("Using config file:", viper.ConfigFileUsed())
+ }
+}
+```
+
+### Create your main.go
+
+With the root command you need to have your main function execute it.
+Execute should be run on the root for clarity, though it can be called on any command.
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra.
+
+```go
+package main
+
+import (
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+### Create additional commands
+
+Additional commands can be defined and typically are each given their own file
+inside of the cmd/ directory.
+
+If you wanted to create a version command you would create cmd/version.go and
+populate it with the following:
+
+```go
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ rootCmd.AddCommand(versionCmd)
+}
+
+var versionCmd = &cobra.Command{
+ Use: "version",
+ Short: "Print the version number of Hugo",
+ Long: `All software has versions. This is Hugo's`,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
+ },
+}
+```
+
+### Returning and handling errors
+
+If you wish to return an error to the caller of a command, `RunE` can be used.
+
+```go
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ rootCmd.AddCommand(tryCmd)
+}
+
+var tryCmd = &cobra.Command{
+ Use: "try",
+ Short: "Try and possibly fail at something",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if err := someFunc(); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+```
+
+The error can then be caught at the execute function call.
+
+## Working with Flags
+
+Flags provide modifiers to control how the action command operates.
+
+### Assign flags to a command
+
+Since the flags are defined and used in different locations, we need to
+define a variable outside with the correct scope to assign the flag to
+work with.
+
+```go
+var Verbose bool
+var Source string
+```
+
+There are two different approaches to assign a flag.
+
+### Persistent Flags
+
+A flag can be 'persistent', meaning that this flag will be available to the
+command it's assigned to as well as every command under that command. For
+global flags, assign a flag as a persistent flag on the root.
+
+```go
+rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
+```
+
+### Local Flags
+
+A flag can also be assigned locally, which will only apply to that specific command.
+
+```go
+localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
+```
+
+### Local Flag on Parent Commands
+
+By default, Cobra only parses local flags on the target command, and any local flags on
+parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will
+parse local flags on each command before executing the target command.
+
+```go
+command := cobra.Command{
+ Use: "print [OPTIONS] [COMMANDS]",
+ TraverseChildren: true,
+}
+```
+
+### Bind Flags with Config
+
+You can also bind your flags with [viper](https://github.com/spf13/viper):
+```go
+var author string
+
+func init() {
+ rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+}
+```
+
+In this example, the persistent flag `author` is bound with `viper`.
+**Note**: the variable `author` will not be set to the value from config,
+when the `--author` flag is not provided by user.
+
+More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
+
+### Required flags
+
+Flags are optional by default. If instead you wish your command to report an error
+when a flag has not been set, mark it as required:
+```go
+rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkFlagRequired("region")
+```
+
+Or, for persistent flags:
+```go
+rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkPersistentFlagRequired("region")
+```
+
+## Positional and Custom Arguments
+
+Validation of positional arguments can be specified using the `Args` field
+of `Command`.
+
+The following validators are built in:
+
+- `NoArgs` - the command will report an error if there are any positional args.
+- `ArbitraryArgs` - the command will accept any args.
+- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
+- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
+- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
+- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
+- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command`
+- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
+
+An example of setting the custom validator:
+
+```go
+var cmd = &cobra.Command{
+ Short: "hello",
+ Args: func(cmd *cobra.Command, args []string) error {
+ if len(args) < 1 {
+ return errors.New("requires a color argument")
+ }
+ if myapp.IsValidColor(args[0]) {
+ return nil
+ }
+ return fmt.Errorf("invalid color specified: %s", args[0])
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hello, World!")
+ },
+}
+```
+
+## Example
+
+In the example below, we have defined three commands. Two are at the top level
+and one (cmdTimes) is a child of one of the top commands. In this case the root
+is not executable, meaning that a subcommand is required. This is accomplished
+by not providing a 'Run' for the 'rootCmd'.
+
+We have only defined one flag for a single command.
+
+More documentation about flags is available at https://github.com/spf13/pflag
+
+```go
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+ var echoTimes int
+
+ var cmdPrint = &cobra.Command{
+ Use: "print [string to print]",
+ Short: "Print anything to the screen",
+ Long: `print is for printing anything back to the screen.
+For many years people have printed back to the screen.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdEcho = &cobra.Command{
+ Use: "echo [string to echo]",
+ Short: "Echo anything to the screen",
+ Long: `echo is for echoing anything back.
+Echo works a lot like print, except it has a child command.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdTimes = &cobra.Command{
+ Use: "times [string to echo]",
+ Short: "Echo anything to the screen more times",
+ Long: `echo things multiple times back to the user by providing
+a count and a string.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ for i := 0; i < echoTimes; i++ {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ }
+ },
+ }
+
+ cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
+
+ var rootCmd = &cobra.Command{Use: "app"}
+ rootCmd.AddCommand(cmdPrint, cmdEcho)
+ cmdEcho.AddCommand(cmdTimes)
+ rootCmd.Execute()
+}
+```
+
+For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
+
+## Help Command
+
+Cobra automatically adds a help command to your application when you have subcommands.
+This will be called when a user runs 'app help'. Additionally, help will also
+support all other commands as input. Say, for instance, you have a command called
+'create' without any additional configuration; Cobra will work when 'app help
+create' is called. Every command will automatically have the '--help' flag added.
+
+### Example
+
+The following output is automatically generated by Cobra. Nothing beyond the
+command and flag definitions are needed.
+
+ $ cobra help
+
+ Cobra is a CLI library for Go that empowers applications.
+ This application is a tool to generate the needed files
+ to quickly create a Cobra application.
+
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+
+Help is just a command like any other. There is no special logic or behavior
+around it. In fact, you can provide your own if you want.
+
+### Defining your own help
+
+You can provide your own Help command or your own template for the default command to use
+with following functions:
+
+```go
+cmd.SetHelpCommand(cmd *Command)
+cmd.SetHelpFunc(f func(*Command, []string))
+cmd.SetHelpTemplate(s string)
+```
+
+The latter two will also apply to any children commands.
+
+## Usage Message
+
+When the user provides an invalid flag or invalid command, Cobra responds by
+showing the user the 'usage'.
+
+### Example
+You may recognize this from the help above. That's because the default help
+embeds the usage as part of its output.
+
+ $ cobra --invalid
+ Error: unknown flag: --invalid
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+### Defining your own usage
+You can provide your own usage function or template for Cobra to use.
+Like help, the function and template are overridable through public methods:
+
+```go
+cmd.SetUsageFunc(f func(*Command) error)
+cmd.SetUsageTemplate(s string)
+```
+
+## Version Flag
+
+Cobra adds a top-level '--version' flag if the Version field is set on the root command.
+Running an application with the '--version' flag will print the version to stdout using
+the version template. The template can be customized using the
+`cmd.SetVersionTemplate(s string)` function.
+
+## PreRun and PostRun Hooks
+
+It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
+
+- `PersistentPreRun`
+- `PreRun`
+- `Run`
+- `PostRun`
+- `PersistentPostRun`
+
+An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+
+ var rootCmd = &cobra.Command{
+ Use: "root [sub]",
+ Short: "My root command",
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
+ },
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ var subCmd = &cobra.Command{
+ Use: "sub [no options!]",
+ Short: "My subcommand",
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ rootCmd.AddCommand(subCmd)
+
+ rootCmd.SetArgs([]string{""})
+ rootCmd.Execute()
+ fmt.Println()
+ rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
+ rootCmd.Execute()
+}
+```
+
+Output:
+```
+Inside rootCmd PersistentPreRun with args: []
+Inside rootCmd PreRun with args: []
+Inside rootCmd Run with args: []
+Inside rootCmd PostRun with args: []
+Inside rootCmd PersistentPostRun with args: []
+
+Inside rootCmd PersistentPreRun with args: [arg1 arg2]
+Inside subCmd PreRun with args: [arg1 arg2]
+Inside subCmd Run with args: [arg1 arg2]
+Inside subCmd PostRun with args: [arg1 arg2]
+Inside subCmd PersistentPostRun with args: [arg1 arg2]
+```
+
+## Suggestions when "unknown command" happens
+
+Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
+
+```
+$ hugo srever
+Error: unknown command "srever" for "hugo"
+
+Did you mean this?
+ server
+
+Run 'hugo --help' for usage.
+```
+
+Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+
+If you need to disable suggestions or tweak the string distance in your command, use:
+
+```go
+command.DisableSuggestions = true
+```
+
+or
+
+```go
+command.SuggestionsMinimumDistance = 1
+```
+
+You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
+
+```
+$ kubectl remove
+Error: unknown command "remove" for "kubectl"
+
+Did you mean this?
+ delete
+
+Run 'kubectl help' for usage.
+```
+
+## Generating documentation for your command
+
+Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md).
+
+## Generating shell completions
+
+Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md).
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 000000000..70e9b2629
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,109 @@
+package cobra
+
+import (
+ "fmt"
+ "strings"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// Legacy arg validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+ // no subcommand, always take args
+ if !cmd.HasSubCommands() {
+ return nil
+ }
+
+ // root command with subcommands, do subcommand checking.
+ if !cmd.HasParent() && len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+ }
+ return nil
+}
+
+// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
+func OnlyValidArgs(cmd *Command, args []string) error {
+ if len(cmd.ValidArgs) > 0 {
+ // Remove any description that may be included in ValidArgs.
+ // A description is following a tab character.
+ var validArgs []string
+ for _, v := range cmd.ValidArgs {
+ validArgs = append(validArgs, strings.Split(v, "\t")[0])
+ }
+
+ for _, v := range args {
+ if !stringInSlice(v, validArgs) {
+ return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ }
+ }
+ return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+ return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < n {
+ return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) > n {
+ return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) != n {
+ return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactValidArgs returns an error if
+// there are not exactly N positional args OR
+// there are any positional args that are not in the `ValidArgs` field of `Command`
+func ExactValidArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if err := ExactArgs(n)(cmd, args); err != nil {
+ return err
+ }
+ return OnlyValidArgs(cmd, args)
+ }
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < min || len(args) > max {
+ return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 000000000..710614793
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,681 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+ BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ BashCompCustom = "cobra_annotation_bash_completion_custom"
+ BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+ BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf io.StringWriter, name string) {
+ WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+ local w word=$1
+ shift
+ index=0
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ index=$((index+1))
+ done
+ index=-1
+}
+
+__%[1]s_contains_word()
+{
+ local w word=$1; shift
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ done
+ return 1
+}
+
+__%[1]s_handle_go_custom_completion()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
+
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+
+ local out requestComp lastParam lastChar comp directive args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows to handle aliases
+ args=("${words[@]:1}")
+ requestComp="${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [ "${directive}" = "${out}" ]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ # Error code. No completion.
+ __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
+ return
+ else
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no space"
+ compopt -o nospace
+ fi
+ fi
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
+ compopt +o default
+ fi
+ fi
+ fi
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local fullFilter filter filteringCmd
+ # Do not use quotes around the $out variable or else newline
+ # characters will be kept.
+ for filter in ${out[*]}; do
+ fullFilter+="$filter|"
+ done
+
+ filteringCmd="_filedir $fullFilter"
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ $filteringCmd
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+ local subDir
+ # Use printf to strip any trailing newline
+ subdir=$(printf "%%s" "${out[0]}")
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ __%[1]s_handle_subdirs_in_dir_flag "$subdir"
+ else
+ __%[1]s_debug "Listing directories in ."
+ _filedir -d
+ fi
+ else
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${out[*]}" -- "$cur")
+ fi
+}
+
+__%[1]s_handle_reply()
+{
+ __%[1]s_debug "${FUNCNAME[0]}"
+ local comp
+ case $cur in
+ -*)
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt -o nospace
+ fi
+ local allflags
+ if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+ allflags=("${must_have_one_flag[@]}")
+ else
+ allflags=("${flags[*]} ${two_word_flags[*]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${allflags[*]}" -- "$cur")
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+ fi
+
+ # complete after --flag=abc
+ if [[ $cur == *=* ]]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt +o nospace
+ fi
+
+ local index flag
+ flag="${cur%%=*}"
+ __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+ COMPREPLY=()
+ if [[ ${index} -ge 0 ]]; then
+ PREFIX=""
+ cur="${cur#*=}"
+ ${flags_completion[${index}]}
+ if [ -n "${ZSH_VERSION}" ]; then
+ # zsh completion needs --flag= prefix
+ eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+ fi
+ fi
+ fi
+ return 0;
+ ;;
+ esac
+
+ # check if we are handling a flag with special work handling
+ local index
+ __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+ if [[ ${index} -ge 0 ]]; then
+ ${flags_completion[${index}]}
+ return
+ fi
+
+ # we are parsing a flag and don't have a special handler, no completion
+ if [[ ${cur} != "${words[cword]}" ]]; then
+ return
+ fi
+
+ local completions
+ completions=("${commands[@]}")
+ if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_noun[@]}")
+ elif [[ -n "${has_completion_function}" ]]; then
+ # if a go completion function is provided, defer to that function
+ __%[1]s_handle_go_custom_completion
+ fi
+ if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_flag[@]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${completions[*]}" -- "$cur")
+
+ if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
+ fi
+
+ if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+ if declare -F __%[1]s_custom_func >/dev/null; then
+ # try command name qualified custom func
+ __%[1]s_custom_func
+ else
+ # otherwise fall back to unqualified for compatibility
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
+ fi
+
+ # available in bash-completion >= 2, not always present on macOS
+ if declare -F __ltrim_colon_completions >/dev/null; then
+ __ltrim_colon_completions "$cur"
+ fi
+
+ # If there is only 1 completion and it is a flag with an = it will be completed
+ # but we don't want a space after the =
+ if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+ compopt -o nospace
+ fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+ local ext="$1"
+ _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+ local dir="$1"
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+}
+
+__%[1]s_handle_flag()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ # if a command required a flag, and we found it, unset must_have_one_flag()
+ local flagname=${words[c]}
+ local flagvalue
+ # if the word contained an =
+ if [[ ${words[c]} == *"="* ]]; then
+ flagvalue=${flagname#*=} # take in as flagvalue after the =
+ flagname=${flagname%%=*} # strip everything after the =
+ flagname="${flagname}=" # but put the = back
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+ if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+ must_have_one_flag=()
+ fi
+
+ # if you set a flag which only applies to this command, don't show subcommands
+ if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+ commands=()
+ fi
+
+ # keep flag value with flagname as flaghash
+ # flaghash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ if [ -n "${flagvalue}" ] ; then
+ flaghash[${flagname}]=${flagvalue}
+ elif [ -n "${words[ $((c+1)) ]}" ] ; then
+ flaghash[${flagname}]=${words[ $((c+1)) ]}
+ else
+ flaghash[${flagname}]="true" # pad "true" for bool flag
+ fi
+ fi
+
+ # skip the argument to a two word flag
+ if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
+ c=$((c+1))
+ # if we are looking for a flags value, don't show commands
+ if [[ $c -eq $cword ]]; then
+ commands=()
+ fi
+ fi
+
+ c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+ must_have_one_noun=()
+ elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+ must_have_one_noun=()
+ fi
+
+ nouns+=("${words[c]}")
+ c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ local next_command
+ if [[ -n ${last_command} ]]; then
+ next_command="_${last_command}_${words[c]//:/__}"
+ else
+ if [[ $c -eq 0 ]]; then
+ next_command="_%[1]s_root_command"
+ else
+ next_command="_${words[c]//:/__}"
+ fi
+ fi
+ c=$((c+1))
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+ declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+ if [[ $c -ge $cword ]]; then
+ __%[1]s_handle_reply
+ return
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+ if [[ "${words[c]}" == -* ]]; then
+ __%[1]s_handle_flag
+ elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+ __%[1]s_handle_command
+ elif [[ $c -eq 0 ]]; then
+ __%[1]s_handle_command
+ elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+ # aliashash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ words[c]=${aliashash[${words[c]}]}
+ __%[1]s_handle_command
+ else
+ __%[1]s_handle_noun
+ fi
+ else
+ __%[1]s_handle_noun
+ fi
+ __%[1]s_handle_word
+}
+
+`, name, ShellCompNoDescRequestCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+}
+
+func writePostscript(buf io.StringWriter, name string) {
+ name = strings.Replace(name, ":", "__", -1)
+ WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`{
+ local cur prev words cword
+ declare -A flaghash 2>/dev/null || :
+ declare -A aliashash 2>/dev/null || :
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -s || return
+ else
+ __%[1]s_init_completion -n "=" || return
+ fi
+
+ local c=0
+ local flags=()
+ local two_word_flags=()
+ local local_nonpersistent_flags=()
+ local flags_with_completion=()
+ local flags_completion=()
+ local commands=("%[1]s")
+ local must_have_one_flag=()
+ local must_have_one_noun=()
+ local has_completion_function
+ local last_command
+ local nouns=()
+
+ __%[1]s_handle_word
+}
+
+`, name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%s %s
+else
+ complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+ WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " commands=()\n")
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() && c != cmd.helpCommand {
+ continue
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name()))
+ writeCmdAliases(buf, c)
+ }
+ WriteStringAndCheck(buf, "\n")
+}
+
+func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) {
+ for key, value := range annotations {
+ switch key {
+ case BashCompFilenameExt:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) > 0 {
+ ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+ } else {
+ ext = "_filedir"
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ case BashCompCustom:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ if len(value) > 0 {
+ handlers := strings.Join(value, "; ")
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
+ } else {
+ WriteStringAndCheck(buf, " flags_completion+=(:)\n")
+ }
+ case BashCompSubdirsInDir:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) == 1 {
+ ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+ } else {
+ ext = "_filedir -d"
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ }
+ }
+}
+
+const cbn = "\")\n"
+
+func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
+ name := flag.Shorthand
+ format := " "
+ if len(flag.NoOptDefVal) == 0 {
+ format += "two_word_"
+ }
+ format += "flags+=(\"-%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
+ name := flag.Name
+ format := " flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ if len(flag.NoOptDefVal) == 0 {
+ format = " two_word_flags+=(\"--%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ }
+ writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) {
+ name := flag.Name
+ format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn
+ if len(flag.NoOptDefVal) == 0 {
+ format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ if len(flag.Shorthand) > 0 {
+ WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand))
+ }
+}
+
+// Setup annotations for go completions for registered flags
+func prepareCustomAnnotationsForFlags(cmd *Command) {
+ for flag := range flagCompletionFunctions {
+ // Make sure the completion script calls the __*_go_custom_completion function for
+ // every registered flag. We need to do this here (and not when the flag was registered
+ // for completion) so that we can know the root command name for the prefix
+ // of ___go_custom_completion
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
+ }
+}
+
+func writeFlags(buf io.StringWriter, cmd *Command) {
+ prepareCustomAnnotationsForFlags(cmd)
+ WriteStringAndCheck(buf, ` flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+`)
+ localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+ cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ // localNonPersistentFlags are used to stop the completion of subcommands when one is set
+ // if TraverseChildren is true we should allow to complete subcommands
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren {
+ writeLocalNonPersistentFlag(buf, flag)
+ }
+ })
+ cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ })
+
+ WriteStringAndCheck(buf, "\n")
+}
+
+func writeRequiredFlag(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_flag=()\n")
+ flags := cmd.NonInheritedFlags()
+ flags.VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ for key := range flag.Annotations {
+ switch key {
+ case BashCompOneRequiredFlag:
+ format := " must_have_one_flag+=(\"--%s"
+ if flag.Value.Type() != "bool" {
+ format += "="
+ }
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name))
+
+ if len(flag.Shorthand) > 0 {
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand))
+ }
+ }
+ }
+ })
+}
+
+func writeRequiredNouns(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_noun=()\n")
+ sort.Strings(cmd.ValidArgs)
+ for _, value := range cmd.ValidArgs {
+ // Remove any description that may be included following a tab character.
+ // Descriptions are not supported by bash completion.
+ value = strings.Split(value, "\t")[0]
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
+ }
+ if cmd.ValidArgsFunction != nil {
+ WriteStringAndCheck(buf, " has_completion_function=1\n")
+ }
+}
+
+func writeCmdAliases(buf io.StringWriter, cmd *Command) {
+ if len(cmd.Aliases) == 0 {
+ return
+ }
+
+ sort.Strings(cmd.Aliases)
+
+ WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+ for _, value := range cmd.Aliases {
+ WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value))
+ WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
+ }
+ WriteStringAndCheck(buf, ` fi`)
+ WriteStringAndCheck(buf, "\n")
+}
+func writeArgAliases(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " noun_aliases=()\n")
+ sort.Strings(cmd.ArgAliases)
+ for _, value := range cmd.ArgAliases {
+ WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value))
+ }
+}
+
+func gen(buf io.StringWriter, cmd *Command) {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() && c != cmd.helpCommand {
+ continue
+ }
+ gen(buf, c)
+ }
+ commandName := cmd.CommandPath()
+ commandName = strings.Replace(commandName, " ", "_", -1)
+ commandName = strings.Replace(commandName, ":", "__", -1)
+
+ if cmd.Root() == cmd {
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+ } else {
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName))
+ }
+
+ WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName))
+ WriteStringAndCheck(buf, "\n")
+ WriteStringAndCheck(buf, " command_aliases=()\n")
+ WriteStringAndCheck(buf, "\n")
+
+ writeCommands(buf, cmd)
+ writeFlags(buf, cmd)
+ writeRequiredFlag(buf, cmd)
+ writeRequiredNouns(buf, cmd)
+ writeArgAliases(buf, cmd)
+ WriteStringAndCheck(buf, "}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+ writePreamble(buf, c.Name())
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
+ }
+ gen(buf, c)
+ writePostscript(buf, c.Name())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+ return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletion(outFile)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
new file mode 100644
index 000000000..130f99b92
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -0,0 +1,91 @@
+# Generating Bash Completions For Your cobra.Command
+
+Please refer to [Shell Completions](shell_completions.md) for details.
+
+## Bash legacy dynamic completions
+
+For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution.
+
+The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions.
+
+Some code that works in kubernetes:
+
+```bash
+const (
+ bash_completion_func = `__kubectl_parse_get()
+{
+ local kubectl_output out
+ if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
+ out=($(echo "${kubectl_output}" | awk '{print $1}'))
+ COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
+ fi
+}
+
+__kubectl_get_resource()
+{
+ if [[ ${#nouns[@]} -eq 0 ]]; then
+ return 1
+ fi
+ __kubectl_parse_get ${nouns[${#nouns[@]} -1]}
+ if [[ $? -eq 0 ]]; then
+ return 0
+ fi
+}
+
+__kubectl_custom_func() {
+ case ${last_command} in
+ kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
+ __kubectl_get_resource
+ return
+ ;;
+ *)
+ ;;
+ esac
+}
+`)
+```
+
+And then I set that in my command definition:
+
+```go
+cmds := &cobra.Command{
+ Use: "kubectl",
+ Short: "kubectl controls the Kubernetes cluster manager",
+ Long: `kubectl controls the Kubernetes cluster manager.
+
+Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
+ Run: runHelp,
+ BashCompletionFunction: bash_completion_func,
+}
+```
+
+The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
+
+Similarly, for flags:
+
+```go
+ annotation := make(map[string][]string)
+ annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"}
+
+ flag := &pflag.Flag{
+ Name: "namespace",
+ Usage: usage,
+ Annotations: annotation,
+ }
+ cmd.Flags().AddFlag(flag)
+```
+
+In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction`
+value, e.g.:
+
+```bash
+__kubectl_get_namespaces()
+{
+ local template
+ template="{{ range .items }}{{ .metadata.name }} {{ end }}"
+ local kubectl_out
+ if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
+ COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
+ fi
+}
+```
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 000000000..d6cbfd719
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,222 @@
+// Copyright Š 2013 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/template"
+ "time"
+ "unicode"
+)
+
+var templateFuncs = template.FuncMap{
+ "trim": strings.TrimSpace,
+ "trimRightSpace": trimRightSpace,
+ "trimTrailingWhitespaces": trimRightSpace,
+ "appendIfNotPresent": appendIfNotPresent,
+ "rpad": rpad,
+ "gt": Gt,
+ "eq": Eq,
+}
+
+var initializers []func()
+
+// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = false
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = true
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
+// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
+// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapDisplayDuration = 5 * time.Second
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+ templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+ for k, v := range tmplFuncs {
+ templateFuncs[k] = v
+ }
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+ initializers = append(initializers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+ var left, right int64
+ av := reflect.ValueOf(a)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ left = int64(av.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ left = av.Int()
+ case reflect.String:
+ left, _ = strconv.ParseInt(av.String(), 10, 64)
+ }
+
+ bv := reflect.ValueOf(b)
+
+ switch bv.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ right = int64(bv.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ right = bv.Int()
+ case reflect.String:
+ right, _ = strconv.ParseInt(bv.String(), 10, 64)
+ }
+
+ return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ panic("Eq called on unsupported type")
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return av.Int() == bv.Int()
+ case reflect.String:
+ return av.String() == bv.String()
+ }
+ return false
+}
+
+func trimRightSpace(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+ if strings.Contains(s, stringToAppend) {
+ return s
+ }
+ return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+ template := fmt.Sprintf("%%-%ds", padding)
+ return fmt.Sprintf(template, s)
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data interface{}) error {
+ t := template.New("top")
+ t.Funcs(templateFuncs)
+ template.Must(t.Parse(text))
+ return t.Execute(w, data)
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+ if ignoreCase {
+ s = strings.ToLower(s)
+ t = strings.ToLower(t)
+ }
+ d := make([][]int, len(s)+1)
+ for i := range d {
+ d[i] = make([]int, len(t)+1)
+ }
+ for i := range d {
+ d[i][0] = i
+ }
+ for j := range d[0] {
+ d[0][j] = j
+ }
+ for j := 1; j <= len(t); j++ {
+ for i := 1; i <= len(s); i++ {
+ if s[i-1] == t[j-1] {
+ d[i][j] = d[i-1][j-1]
+ } else {
+ min := d[i-1][j]
+ if d[i][j-1] < min {
+ min = d[i][j-1]
+ }
+ if d[i-1][j-1] < min {
+ min = d[i-1][j-1]
+ }
+ d[i][j] = min + 1
+ }
+ }
+
+ }
+ return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing.
+func CheckErr(msg interface{}) {
+ if msg != nil {
+ fmt.Fprintln(os.Stderr, "Error:", msg)
+ os.Exit(1)
+ }
+}
+
+// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil.
+func WriteStringAndCheck(b io.StringWriter, s string) {
+ _, err := b.WriteString(s)
+ CheckErr(err)
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 000000000..d6732ad11
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,1666 @@
+// Copyright Š 2013 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Command is just that, a command for your application.
+// E.g. 'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+ // Use is the one-line usage message.
+ // Recommended syntax is as follow:
+ // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
+ // ... indicates that you can specify multiple values for the previous argument.
+ // | indicates mutually exclusive information. You can use the argument to the left of the separator or the
+ // argument to the right of the separator. You cannot use both arguments in a single use of the command.
+ // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are
+ // optional, they are enclosed in brackets ([ ]).
+ // Example: add [-F file | -D dir]... [-f format] profile
+ Use string
+
+ // Aliases is an array of aliases that can be used instead of the first word in Use.
+ Aliases []string
+
+ // SuggestFor is an array of command names for which this command will be suggested -
+ // similar to aliases but only suggests.
+ SuggestFor []string
+
+ // Short is the short description shown in the 'help' output.
+ Short string
+
+ // Long is the long message shown in the 'help ' output.
+ Long string
+
+ // Example is examples of how to use the command.
+ Example string
+
+ // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
+ ValidArgs []string
+ // ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion.
+ // It is a dynamic version of using ValidArgs.
+ // Only one of ValidArgs and ValidArgsFunction can be used for a command.
+ ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+
+ // Expected arguments
+ Args PositionalArgs
+
+ // ArgAliases is List of aliases for ValidArgs.
+ // These are not suggested to the user in the bash completion,
+ // but accepted if entered manually.
+ ArgAliases []string
+
+ // BashCompletionFunction is custom functions used by the bash autocompletion generator.
+ BashCompletionFunction string
+
+ // Deprecated defines, if this command is deprecated and should print this string when used.
+ Deprecated string
+
+ // Annotations are key/value pairs that can be used by applications to identify or
+ // group commands.
+ Annotations map[string]string
+
+ // Version defines the version for this command. If this value is non-empty and the command does not
+ // define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ // will print content of the "Version" variable. A shorthand "v" flag will also be added if the
+ // command does not define one.
+ Version string
+
+ // The *Run functions are executed in the following order:
+ // * PersistentPreRun()
+ // * PreRun()
+ // * Run()
+ // * PostRun()
+ // * PersistentPostRun()
+ // All functions get the same args, the arguments after the command name.
+ //
+ // PersistentPreRun: children of this command will inherit and execute.
+ PersistentPreRun func(cmd *Command, args []string)
+ // PersistentPreRunE: PersistentPreRun but returns an error.
+ PersistentPreRunE func(cmd *Command, args []string) error
+ // PreRun: children of this command will not inherit.
+ PreRun func(cmd *Command, args []string)
+ // PreRunE: PreRun but returns an error.
+ PreRunE func(cmd *Command, args []string) error
+ // Run: Typically the actual work function. Most commands will only implement this.
+ Run func(cmd *Command, args []string)
+ // RunE: Run but returns an error.
+ RunE func(cmd *Command, args []string) error
+ // PostRun: run after the Run command.
+ PostRun func(cmd *Command, args []string)
+ // PostRunE: PostRun but returns an error.
+ PostRunE func(cmd *Command, args []string) error
+ // PersistentPostRun: children of this command will inherit and execute after PostRun.
+ PersistentPostRun func(cmd *Command, args []string)
+ // PersistentPostRunE: PersistentPostRun but returns an error.
+ PersistentPostRunE func(cmd *Command, args []string) error
+
+ // args is actual args parsed from flags.
+ args []string
+ // flagErrorBuf contains all error messages from pflag.
+ flagErrorBuf *bytes.Buffer
+ // flags is full set of flags.
+ flags *flag.FlagSet
+ // pflags contains persistent flags.
+ pflags *flag.FlagSet
+ // lflags contains local flags.
+ lflags *flag.FlagSet
+ // iflags contains inherited flags.
+ iflags *flag.FlagSet
+ // parentsPflags is all persistent flags of cmd's parents.
+ parentsPflags *flag.FlagSet
+ // globNormFunc is the global normalization function
+ // that we can use on every pflag set and children commands
+ globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+ // usageFunc is usage func defined by user.
+ usageFunc func(*Command) error
+ // usageTemplate is usage template defined by user.
+ usageTemplate string
+ // flagErrorFunc is func defined by user and it's called when the parsing of
+ // flags returns an error.
+ flagErrorFunc func(*Command, error) error
+ // helpTemplate is help template defined by user.
+ helpTemplate string
+ // helpFunc is help func defined by user.
+ helpFunc func(*Command, []string)
+ // helpCommand is command with usage 'help'. If it's not defined by user,
+ // cobra uses default help command.
+ helpCommand *Command
+ // versionTemplate is the version template defined by user.
+ versionTemplate string
+
+ // inReader is a reader defined by the user that replaces stdin
+ inReader io.Reader
+ // outWriter is a writer defined by the user that replaces stdout
+ outWriter io.Writer
+ // errWriter is a writer defined by the user that replaces stderr
+ errWriter io.Writer
+
+ //FParseErrWhitelist flag parse errors to be ignored
+ FParseErrWhitelist FParseErrWhitelist
+
+ // commandsAreSorted defines, if command slice are sorted or not.
+ commandsAreSorted bool
+ // commandCalledAs is the name or alias value used to call this command.
+ commandCalledAs struct {
+ name string
+ called bool
+ }
+
+ ctx context.Context
+
+ // commands is the list of commands supported by this program.
+ commands []*Command
+ // parent is a parent command for this command.
+ parent *Command
+ // Max lengths of commands' string lengths for use in padding.
+ commandsMaxUseLen int
+ commandsMaxCommandPathLen int
+ commandsMaxNameLen int
+
+ // TraverseChildren parses flags on all parents before executing child command.
+ TraverseChildren bool
+
+ // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ Hidden bool
+
+ // SilenceErrors is an option to quiet errors down stream.
+ SilenceErrors bool
+
+ // SilenceUsage is an option to silence usage when an error occurs.
+ SilenceUsage bool
+
+ // DisableFlagParsing disables the flag parsing.
+ // If this is true all flags will be passed to the command as arguments.
+ DisableFlagParsing bool
+
+ // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ // will be printed by generating docs for this command.
+ DisableAutoGenTag bool
+
+ // DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ // line of a command when printing help or generating docs
+ DisableFlagsInUseLine bool
+
+ // DisableSuggestions disables the suggestions based on Levenshtein distance
+ // that go along with 'unknown command' messages.
+ DisableSuggestions bool
+
+ // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ // Must be > 0.
+ SuggestionsMinimumDistance int
+}
+
+// Context returns underlying command context. If command wasn't
+// executed with ExecuteContext Context returns Background context.
+func (c *Command) Context() context.Context {
+ return c.ctx
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+ c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+// Deprecated: Use SetOut and/or SetErr instead
+func (c *Command) SetOutput(output io.Writer) {
+ c.outWriter = output
+ c.errWriter = output
+}
+
+// SetOut sets the destination for usage messages.
+// If newOut is nil, os.Stdout is used.
+func (c *Command) SetOut(newOut io.Writer) {
+ c.outWriter = newOut
+}
+
+// SetErr sets the destination for error messages.
+// If newErr is nil, os.Stderr is used.
+func (c *Command) SetErr(newErr io.Writer) {
+ c.errWriter = newErr
+}
+
+// SetIn sets the source for input data
+// If newIn is nil, os.Stdin is used.
+func (c *Command) SetIn(newIn io.Reader) {
+ c.inReader = newIn
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+ c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+ c.usageTemplate = s
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+ c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+ c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+ c.helpCommand = cmd
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+ c.helpTemplate = s
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+ c.versionTemplate = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+ c.Flags().SetNormalizeFunc(n)
+ c.PersistentFlags().SetNormalizeFunc(n)
+ c.globNormFunc = n
+
+ for _, command := range c.commands {
+ command.SetGlobalNormalizationFunc(n)
+ }
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+ return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+ return c.getOut(os.Stderr)
+}
+
+// ErrOrStderr returns output to stderr
+func (c *Command) ErrOrStderr() io.Writer {
+ return c.getErr(os.Stderr)
+}
+
+// InOrStdin returns input to stdin
+func (c *Command) InOrStdin() io.Reader {
+ return c.getIn(os.Stdin)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+ if c.outWriter != nil {
+ return c.outWriter
+ }
+ if c.HasParent() {
+ return c.parent.getOut(def)
+ }
+ return def
+}
+
+func (c *Command) getErr(def io.Writer) io.Writer {
+ if c.errWriter != nil {
+ return c.errWriter
+ }
+ if c.HasParent() {
+ return c.parent.getErr(def)
+ }
+ return def
+}
+
+func (c *Command) getIn(def io.Reader) io.Reader {
+ if c.inReader != nil {
+ return c.inReader
+ }
+ if c.HasParent() {
+ return c.parent.getIn(def)
+ }
+ return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+ if c.usageFunc != nil {
+ return c.usageFunc
+ }
+ if c.HasParent() {
+ return c.Parent().UsageFunc()
+ }
+ return func(c *Command) error {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
+ if err != nil {
+ c.PrintErrln(err)
+ }
+ return err
+ }
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+ return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+ if c.helpFunc != nil {
+ return c.helpFunc
+ }
+ if c.HasParent() {
+ return c.Parent().HelpFunc()
+ }
+ return func(c *Command, a []string) {
+ c.mergePersistentFlags()
+ // The help should be sent to stdout
+ // See https://github.com/spf13/cobra/issues/1002
+ err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
+ if err != nil {
+ c.PrintErrln(err)
+ }
+ }
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+ c.HelpFunc()(c, []string{})
+ return nil
+}
+
+// UsageString returns usage string.
+func (c *Command) UsageString() string {
+ // Storing normal writers
+ tmpOutput := c.outWriter
+ tmpErr := c.errWriter
+
+ bb := new(bytes.Buffer)
+ c.outWriter = bb
+ c.errWriter = bb
+
+ CheckErr(c.Usage())
+
+ // Setting things back to normal
+ c.outWriter = tmpOutput
+ c.errWriter = tmpErr
+
+ return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+ if c.flagErrorFunc != nil {
+ return c.flagErrorFunc
+ }
+
+ if c.HasParent() {
+ return c.parent.FlagErrorFunc()
+ }
+ return func(c *Command, err error) error {
+ return err
+ }
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+ if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+ return minUsagePadding
+ }
+ return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+ if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+ return minCommandPathPadding
+ }
+ return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+ if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+ return minNamePadding
+ }
+ return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+func (c *Command) UsageTemplate() string {
+ if c.usageTemplate != "" {
+ return c.usageTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.UsageTemplate()
+ }
+ return `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+ {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+}
+
+// HelpTemplate return help template for the command.
+func (c *Command) HelpTemplate() string {
+ if c.helpTemplate != "" {
+ return c.helpTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.HelpTemplate()
+ }
+ return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+ if c.versionTemplate != "" {
+ return c.versionTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.VersionTemplate()
+ }
+ return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ flag := fs.Lookup(name)
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ if len(name) == 0 {
+ return false
+ }
+
+ flag := fs.ShorthandLookup(name[:1])
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+
+ commands := []string{}
+ flags := c.Flags()
+
+Loop:
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ switch {
+ case s == "--":
+ // "--" terminates the flags
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ // If '--flag arg' then
+ // delete arg from args.
+ fallthrough // (do the same as below)
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // If '-f arg' then
+ // delete 'arg' from args or break the loop if len(args) <= 1.
+ if len(args) <= 1 {
+ break Loop
+ } else {
+ args = args[1:]
+ continue
+ }
+ case s != "" && !strings.HasPrefix(s, "-"):
+ commands = append(commands, s)
+ }
+ }
+
+ return commands
+}
+
+// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+func argsMinusFirstX(args []string, x string) []string {
+ for i, y := range args {
+ if x == y {
+ ret := []string{}
+ ret = append(ret, args[:i]...)
+ ret = append(ret, args[i+1:]...)
+ return ret
+ }
+ }
+ return args
+}
+
+func isFlagArg(arg string) bool {
+ return ((len(arg) >= 3 && arg[1] == '-') ||
+ (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+ var innerfind func(*Command, []string) (*Command, []string)
+
+ innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+ argsWOflags := stripFlags(innerArgs, c)
+ if len(argsWOflags) == 0 {
+ return c, innerArgs
+ }
+ nextSubCmd := argsWOflags[0]
+
+ cmd := c.findNext(nextSubCmd)
+ if cmd != nil {
+ return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
+ }
+ return c, innerArgs
+ }
+
+ commandFound, a := innerfind(c, args)
+ if commandFound.Args == nil {
+ return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+ }
+ return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+ if c.DisableSuggestions {
+ return ""
+ }
+ if c.SuggestionsMinimumDistance <= 0 {
+ c.SuggestionsMinimumDistance = 2
+ }
+ suggestionsString := ""
+ if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+ suggestionsString += "\n\nDid you mean this?\n"
+ for _, s := range suggestions {
+ suggestionsString += fmt.Sprintf("\t%v\n", s)
+ }
+ }
+ return suggestionsString
+}
+
+func (c *Command) findNext(next string) *Command {
+ matches := make([]*Command, 0)
+ for _, cmd := range c.commands {
+ if cmd.Name() == next || cmd.HasAlias(next) {
+ cmd.commandCalledAs.name = next
+ return cmd
+ }
+ if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+ matches = append(matches, cmd)
+ }
+ }
+
+ if len(matches) == 1 {
+ return matches[0]
+ }
+
+ return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+ flags := []string{}
+ inFlag := false
+
+ for i, arg := range args {
+ switch {
+ // A long flag with a space separated value
+ case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+ // TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+ inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+ flags = append(flags, arg)
+ continue
+ // A short flag with a space separated value
+ case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+ inFlag = true
+ flags = append(flags, arg)
+ continue
+ // The value for a flag
+ case inFlag:
+ inFlag = false
+ flags = append(flags, arg)
+ continue
+ // A flag without a value, or with an `=` separated value
+ case isFlagArg(arg):
+ flags = append(flags, arg)
+ continue
+ }
+
+ cmd := c.findNext(arg)
+ if cmd == nil {
+ return c, args, nil
+ }
+
+ if err := c.ParseFlags(flags); err != nil {
+ return nil, args, err
+ }
+ return cmd.Traverse(args[i+1:])
+ }
+ return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+ suggestions := []string{}
+ for _, cmd := range c.commands {
+ if cmd.IsAvailableCommand() {
+ levenshteinDistance := ld(typedName, cmd.Name(), true)
+ suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+ suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+ if suggestByLevenshtein || suggestByPrefix {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ for _, explicitSuggestion := range cmd.SuggestFor {
+ if strings.EqualFold(typedName, explicitSuggestion) {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ }
+ }
+ }
+ return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+ if c.HasParent() {
+ fn(c.Parent())
+ c.Parent().VisitParents(fn)
+ }
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+ if c.HasParent() {
+ return c.Parent().Root()
+ }
+ return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+ return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+ if c == nil {
+ return fmt.Errorf("Called Execute() on a nil Command")
+ }
+
+ if len(c.Deprecated) > 0 {
+ c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+ }
+
+ // initialize help and version flag at the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpFlag()
+ c.InitDefaultVersionFlag()
+
+ err = c.ParseFlags(a)
+ if err != nil {
+ return c.FlagErrorFunc()(c, err)
+ }
+
+ // If help is called, regardless of other flags, return we want help.
+ // Also say we need help if the command isn't runnable.
+ helpVal, err := c.Flags().GetBool("help")
+ if err != nil {
+ // should be impossible to get here as we always declare a help
+ // flag in InitDefaultHelpFlag()
+ c.Println("\"help\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+
+ if helpVal {
+ return flag.ErrHelp
+ }
+
+ // for back-compat, only add version flag behavior if version is defined
+ if c.Version != "" {
+ versionVal, err := c.Flags().GetBool("version")
+ if err != nil {
+ c.Println("\"version\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+ if versionVal {
+ err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+ }
+
+ if !c.Runnable() {
+ return flag.ErrHelp
+ }
+
+ c.preRun()
+
+ argWoFlags := c.Flags().Args()
+ if c.DisableFlagParsing {
+ argWoFlags = a
+ }
+
+ if err := c.ValidateArgs(argWoFlags); err != nil {
+ return err
+ }
+
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPreRunE != nil {
+ if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPreRun != nil {
+ p.PersistentPreRun(c, argWoFlags)
+ break
+ }
+ }
+ if c.PreRunE != nil {
+ if err := c.PreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PreRun != nil {
+ c.PreRun(c, argWoFlags)
+ }
+
+ if err := c.validateRequiredFlags(); err != nil {
+ return err
+ }
+ if c.RunE != nil {
+ if err := c.RunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else {
+ c.Run(c, argWoFlags)
+ }
+ if c.PostRunE != nil {
+ if err := c.PostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PostRun != nil {
+ c.PostRun(c, argWoFlags)
+ }
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPostRunE != nil {
+ if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPostRun != nil {
+ p.PersistentPostRun(c, argWoFlags)
+ break
+ }
+ }
+
+ return nil
+}
+
+func (c *Command) preRun() {
+ for _, x := range initializers {
+ x()
+ }
+}
+
+// ExecuteContext is the same as Execute(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions.
+func (c *Command) ExecuteContext(ctx context.Context) error {
+ c.ctx = ctx
+ return c.Execute()
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+ _, err := c.ExecuteC()
+ return err
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+ if c.ctx == nil {
+ c.ctx = context.Background()
+ }
+
+ // Regardless of what command execute is called on, run on Root only
+ if c.HasParent() {
+ return c.Root().ExecuteC()
+ }
+
+ // windows hook
+ if preExecHookFn != nil {
+ preExecHookFn(c)
+ }
+
+ // initialize help as the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpCmd()
+
+ args := c.args
+
+ // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+ if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+ args = os.Args[1:]
+ }
+
+ // initialize the hidden command to be used for bash completion
+ c.initCompleteCmd(args)
+
+ var flags []string
+ if c.TraverseChildren {
+ cmd, flags, err = c.Traverse(args)
+ } else {
+ cmd, flags, err = c.Find(args)
+ }
+ if err != nil {
+ // If found parse to a subcommand and then failed, talk about the subcommand
+ if cmd != nil {
+ c = cmd
+ }
+ if !c.SilenceErrors {
+ c.PrintErrln("Error:", err.Error())
+ c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath())
+ }
+ return c, err
+ }
+
+ cmd.commandCalledAs.called = true
+ if cmd.commandCalledAs.name == "" {
+ cmd.commandCalledAs.name = cmd.Name()
+ }
+
+ // We have to pass global context to children command
+ // if context is present on the parent command.
+ if cmd.ctx == nil {
+ cmd.ctx = c.ctx
+ }
+
+ err = cmd.execute(flags)
+ if err != nil {
+ // Always show help if requested, even if SilenceErrors is in
+ // effect
+ if err == flag.ErrHelp {
+ cmd.HelpFunc()(cmd, args)
+ return cmd, nil
+ }
+
+ // If root command has SilenceErrors flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceErrors && !c.SilenceErrors {
+ c.PrintErrln("Error:", err.Error())
+ }
+
+ // If root command has SilenceUsage flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceUsage && !c.SilenceUsage {
+ c.Println(cmd.UsageString())
+ }
+ }
+ return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+ if c.Args == nil {
+ return nil
+ }
+ return c.Args(c, args)
+}
+
+func (c *Command) validateRequiredFlags() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+ missingFlagNames := []string{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+ if !found {
+ return
+ }
+ if (requiredAnnotation[0] == "true") && !pflag.Changed {
+ missingFlagNames = append(missingFlagNames, pflag.Name)
+ }
+ })
+
+ if len(missingFlagNames) > 0 {
+ return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+ }
+ return nil
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("help") == nil {
+ usage := "help for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ c.Flags().BoolP("help", "h", false, usage)
+ }
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+ if c.Version == "" {
+ return
+ }
+
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("version") == nil {
+ usage := "version for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ if c.Flags().ShorthandLookup("v") == nil {
+ c.Flags().BoolP("version", "v", false, usage)
+ } else {
+ c.Flags().Bool("version", false, usage)
+ }
+ }
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+ if !c.HasSubCommands() {
+ return
+ }
+
+ if c.helpCommand == nil {
+ c.helpCommand = &Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
+Simply type ` + c.Name() + ` help [path to command] for full details.`,
+ ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ var completions []string
+ cmd, _, e := c.Root().Find(args)
+ if e != nil {
+ return nil, ShellCompDirectiveNoFileComp
+ }
+ if cmd == nil {
+ // Root help command.
+ cmd = c.Root()
+ }
+ for _, subCmd := range cmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ }
+ }
+ return completions, ShellCompDirectiveNoFileComp
+ },
+ Run: func(c *Command, args []string) {
+ cmd, _, e := c.Root().Find(args)
+ if cmd == nil || e != nil {
+ c.Printf("Unknown help topic %#q\n", args)
+ CheckErr(c.Root().Usage())
+ } else {
+ cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ CheckErr(cmd.Help())
+ }
+ },
+ }
+ }
+ c.RemoveCommand(c.helpCommand)
+ c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+ c.parent = nil
+ c.commands = nil
+ c.helpCommand = nil
+ c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int { return len(c) }
+func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+ // do not sort commands if it already sorted or sorting was disabled
+ if EnableCommandSorting && !c.commandsAreSorted {
+ sort.Sort(commandSorterByName(c.commands))
+ c.commandsAreSorted = true
+ }
+ return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+ for i, x := range cmds {
+ if cmds[i] == c {
+ panic("Command can't be a child of itself")
+ }
+ cmds[i].parent = c
+ // update max lengths
+ usageLen := len(x.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(x.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(x.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ // If global normalization function exists, update all children
+ if c.globNormFunc != nil {
+ x.SetGlobalNormalizationFunc(c.globNormFunc)
+ }
+ c.commands = append(c.commands, x)
+ c.commandsAreSorted = false
+ }
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+ commands := []*Command{}
+main:
+ for _, command := range c.commands {
+ for _, cmd := range cmds {
+ if command == cmd {
+ command.parent = nil
+ continue main
+ }
+ }
+ commands = append(commands, command)
+ }
+ c.commands = commands
+ // recompute all lengths
+ c.commandsMaxUseLen = 0
+ c.commandsMaxCommandPathLen = 0
+ c.commandsMaxNameLen = 0
+ for _, command := range c.commands {
+ usageLen := len(command.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(command.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(command.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ }
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+ fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
+// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErr(i ...interface{}) {
+ fmt.Fprint(c.ErrOrStderr(), i...)
+}
+
+// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrln(i ...interface{}) {
+ c.PrintErr(fmt.Sprintln(i...))
+}
+
+// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrf(format string, i ...interface{}) {
+ c.PrintErr(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+ if c.HasParent() {
+ return c.Parent().CommandPath() + " " + c.Name()
+ }
+ return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+ var useline string
+ if c.HasParent() {
+ useline = c.parent.CommandPath() + " " + c.Use
+ } else {
+ useline = c.Use
+ }
+ if c.DisableFlagsInUseLine {
+ return useline
+ }
+ if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+ useline += " [flags]"
+ }
+ return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+ c.Println("DebugFlags called on", c.Name())
+ var debugflags func(*Command)
+
+ debugflags = func(x *Command) {
+ if x.HasFlags() || x.HasPersistentFlags() {
+ c.Println(x.Name())
+ }
+ if x.HasFlags() {
+ x.flags.VisitAll(func(f *flag.Flag) {
+ if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
+ }
+ })
+ }
+ if x.HasPersistentFlags() {
+ x.pflags.VisitAll(func(f *flag.Flag) {
+ if x.HasFlags() {
+ if x.flags.Lookup(f.Name) == nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ })
+ }
+ c.Println(x.flagErrorBuf)
+ if x.HasSubCommands() {
+ for _, y := range x.commands {
+ debugflags(y)
+ }
+ }
+ }
+
+ debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+ name := c.Use
+ i := strings.Index(name, " ")
+ if i >= 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+ for _, a := range c.Aliases {
+ if a == s {
+ return true
+ }
+ }
+ return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+ if c.commandCalledAs.called {
+ return c.commandCalledAs.name
+ }
+ return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+ if strings.HasPrefix(c.Name(), prefix) {
+ c.commandCalledAs.name = c.Name()
+ return true
+ }
+ for _, alias := range c.Aliases {
+ if strings.HasPrefix(alias, prefix) {
+ c.commandCalledAs.name = alias
+ return true
+ }
+ }
+ return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+ return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+ return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+ return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+ return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+ if len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ if c.HasParent() && c.Parent().helpCommand == c {
+ return false
+ }
+
+ if c.Runnable() || c.HasAvailableSubCommands() {
+ return true
+ }
+
+ return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+ // if a command is runnable, deprecated, or hidden it is not a 'help' command
+ if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ // if any non-help sub commands are found, the command is not a 'help' command
+ for _, sub := range c.commands {
+ if !sub.IsAdditionalHelpTopicCommand() {
+ return false
+ }
+ }
+
+ // the command either has no sub commands, or no non-help sub commands
+ return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+ // return true on the first found available 'help' sub command
+ for _, sub := range c.commands {
+ if sub.IsAdditionalHelpTopicCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available 'help' sub commands
+ return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+ // return true on the first found available (non deprecated/help/hidden)
+ // sub command
+ for _, sub := range c.commands {
+ if sub.IsAvailableCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available (non deprecated/help/hidden)
+ // sub commands
+ return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+ return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+ return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+ if c.flags == nil {
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.flags.SetOutput(c.flagErrorBuf)
+ }
+
+ return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+ persistentFlags := c.PersistentFlags()
+
+ out := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.LocalFlags().VisitAll(func(f *flag.Flag) {
+ if persistentFlags.Lookup(f.Name) == nil {
+ out.AddFlag(f)
+ }
+ })
+ return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+func (c *Command) LocalFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.lflags == nil {
+ c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.lflags.SetOutput(c.flagErrorBuf)
+ }
+ c.lflags.SortFlags = c.Flags().SortFlags
+ if c.globNormFunc != nil {
+ c.lflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ addToLocal := func(f *flag.Flag) {
+ if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
+ c.lflags.AddFlag(f)
+ }
+ }
+ c.Flags().VisitAll(addToLocal)
+ c.PersistentFlags().VisitAll(addToLocal)
+ return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parent commands.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.iflags == nil {
+ c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.iflags.SetOutput(c.flagErrorBuf)
+ }
+
+ local := c.LocalFlags()
+ if c.globNormFunc != nil {
+ c.iflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.parentsPflags.VisitAll(func(f *flag.Flag) {
+ if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+ c.iflags.AddFlag(f)
+ }
+ })
+ return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+ return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+ if c.pflags == nil {
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.pflags.SetOutput(c.flagErrorBuf)
+ }
+ return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+ c.flagErrorBuf = new(bytes.Buffer)
+ c.flagErrorBuf.Reset()
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.flags.SetOutput(c.flagErrorBuf)
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.pflags.SetOutput(c.flagErrorBuf)
+
+ c.lflags = nil
+ c.iflags = nil
+ c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+ return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+ return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+ return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+ return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+ return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+ return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+ return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+ return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+ flag = c.Flags().Lookup(name)
+
+ if flag == nil {
+ flag = c.persistentFlag(name)
+ }
+
+ return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+ if c.HasPersistentFlags() {
+ flag = c.PersistentFlags().Lookup(name)
+ }
+
+ if flag == nil {
+ c.updateParentsPflags()
+ flag = c.parentsPflags.Lookup(name)
+ }
+ return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ beforeErrorBufLen := c.flagErrorBuf.Len()
+ c.mergePersistentFlags()
+
+ // do it here after merging all flags and just before parse
+ c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+ err := c.Flags().Parse(args)
+ // Print warnings if they occurred (e.g. deprecated flag messages).
+ if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+ c.Print(c.flagErrorBuf.String())
+ }
+
+ return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+ return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+ c.updateParentsPflags()
+ c.Flags().AddFlagSet(c.PersistentFlags())
+ c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+ if c.parentsPflags == nil {
+ c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.parentsPflags.SetOutput(c.flagErrorBuf)
+ c.parentsPflags.SortFlags = false
+ }
+
+ if c.globNormFunc != nil {
+ c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+ c.VisitParents(func(parent *Command) {
+ c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+ })
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 000000000..6159c1cc1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,5 @@
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 000000000..8768b1736
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,26 @@
+// +build windows
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+ if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+ c.Print(MousetrapHelpText)
+ if MousetrapDisplayDuration > 0 {
+ time.Sleep(MousetrapDisplayDuration)
+ } else {
+ c.Println("Press return to continue...")
+ fmt.Scanln()
+ }
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/custom_completions.go
new file mode 100644
index 000000000..fa060c147
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/custom_completions.go
@@ -0,0 +1,557 @@
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+const (
+ // ShellCompRequestCmd is the name of the hidden command that is used to request
+ // completion results from the program. It is used by the shell completion scripts.
+ ShellCompRequestCmd = "__complete"
+ // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
+ // completion results without their description. It is used by the shell completion scripts.
+ ShellCompNoDescRequestCmd = "__completeNoDesc"
+)
+
+// Global map of flag completion functions.
+var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
+
+// ShellCompDirective is a bit map representing the different behaviors the shell
+// can be instructed to have once completions have been provided.
+type ShellCompDirective int
+
+const (
+ // ShellCompDirectiveError indicates an error occurred and completions should be ignored.
+ ShellCompDirectiveError ShellCompDirective = 1 << iota
+
+ // ShellCompDirectiveNoSpace indicates that the shell should not add a space
+ // after the completion even if there is a single completion provided.
+ ShellCompDirectiveNoSpace
+
+ // ShellCompDirectiveNoFileComp indicates that the shell should not provide
+ // file completion even when no completion is provided.
+ // This currently does not work for zsh or bash < 4
+ ShellCompDirectiveNoFileComp
+
+ // ShellCompDirectiveFilterFileExt indicates that the provided completions
+ // should be used as file extension filters.
+ // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename()
+ // is a shortcut to using this directive explicitly. The BashCompFilenameExt
+ // annotation can also be used to obtain the same behavior for flags.
+ ShellCompDirectiveFilterFileExt
+
+ // ShellCompDirectiveFilterDirs indicates that only directory names should
+ // be provided in file completion. To request directory names within another
+ // directory, the returned completions should specify the directory within
+ // which to search. The BashCompSubdirsInDir annotation can be used to
+ // obtain the same behavior but only for flags.
+ ShellCompDirectiveFilterDirs
+
+ // ===========================================================================
+
+ // All directives using iota should be above this one.
+ // For internal use.
+ shellCompDirectiveMaxValue
+
+ // ShellCompDirectiveDefault indicates to let the shell perform its default
+ // behavior after completions have been provided.
+ // This one must be last to avoid messing up the iota count.
+ ShellCompDirectiveDefault ShellCompDirective = 0
+)
+
+// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
+func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
+ }
+ if _, exists := flagCompletionFunctions[flag]; exists {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
+ }
+ flagCompletionFunctions[flag] = f
+ return nil
+}
+
+// Returns a string listing the different directive enabled in the specified parameter
+func (d ShellCompDirective) string() string {
+ var directives []string
+ if d&ShellCompDirectiveError != 0 {
+ directives = append(directives, "ShellCompDirectiveError")
+ }
+ if d&ShellCompDirectiveNoSpace != 0 {
+ directives = append(directives, "ShellCompDirectiveNoSpace")
+ }
+ if d&ShellCompDirectiveNoFileComp != 0 {
+ directives = append(directives, "ShellCompDirectiveNoFileComp")
+ }
+ if d&ShellCompDirectiveFilterFileExt != 0 {
+ directives = append(directives, "ShellCompDirectiveFilterFileExt")
+ }
+ if d&ShellCompDirectiveFilterDirs != 0 {
+ directives = append(directives, "ShellCompDirectiveFilterDirs")
+ }
+ if len(directives) == 0 {
+ directives = append(directives, "ShellCompDirectiveDefault")
+ }
+
+ if d >= shellCompDirectiveMaxValue {
+ return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
+ }
+ return strings.Join(directives, ", ")
+}
+
+// Adds a special hidden command that can be used to request custom completions.
+func (c *Command) initCompleteCmd(args []string) {
+ completeCmd := &Command{
+ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
+ Aliases: []string{ShellCompNoDescRequestCmd},
+ DisableFlagsInUseLine: true,
+ Hidden: true,
+ DisableFlagParsing: true,
+ Args: MinimumNArgs(1),
+ Short: "Request shell completion choices for the specified command-line",
+ Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
+ "to request completion choices for the specified command-line.", ShellCompRequestCmd),
+ Run: func(cmd *Command, args []string) {
+ finalCmd, completions, directive, err := cmd.getCompletions(args)
+ if err != nil {
+ CompErrorln(err.Error())
+ // Keep going for multiple reasons:
+ // 1- There could be some valid completions even though there was an error
+ // 2- Even without completions, we need to print the directive
+ }
+
+ noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
+ for _, comp := range completions {
+ if noDescriptions {
+ // Remove any description that may be included following a tab character.
+ comp = strings.Split(comp, "\t")[0]
+ }
+
+ // Make sure we only write the first line to the output.
+ // This is needed if a description contains a linebreak.
+ // Otherwise the shell scripts will interpret the other lines as new flags
+ // and could therefore provide a wrong completion.
+ comp = strings.Split(comp, "\n")[0]
+
+ // Finally trim the completion. This is especially important to get rid
+ // of a trailing tab when there are no description following it.
+ // For example, a sub-command without a description should not be completed
+ // with a tab at the end (or else zsh will show a -- following it
+ // although there is no description).
+ comp = strings.TrimSpace(comp)
+
+ // Print each possible completion to stdout for the completion script to consume.
+ fmt.Fprintln(finalCmd.OutOrStdout(), comp)
+ }
+
+ if directive >= shellCompDirectiveMaxValue {
+ directive = ShellCompDirectiveDefault
+ }
+
+ // As the last printout, print the completion directive for the completion script to parse.
+ // The directive integer must be that last character following a single colon (:).
+ // The completion script expects :
+ fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive)
+
+ // Print some helpful info to stderr for the user to understand.
+ // Output from stderr must be ignored by the completion script.
+ fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
+ },
+ }
+ c.AddCommand(completeCmd)
+ subCmd, _, err := c.Find(args)
+ if err != nil || subCmd.Name() != ShellCompRequestCmd {
+ // Only create this special command if it is actually being called.
+ // This reduces possible side-effects of creating such a command;
+ // for example, having this command would cause problems to a
+ // cobra program that only consists of the root command, since this
+ // command would cause the root command to suddenly have a subcommand.
+ c.RemoveCommand(completeCmd)
+ }
+}
+
+func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
+ // The last argument, which is not completely typed by the user,
+ // should not be part of the list of arguments
+ toComplete := args[len(args)-1]
+ trimmedArgs := args[:len(args)-1]
+
+ var finalCmd *Command
+ var finalArgs []string
+ var err error
+ // Find the real command for which completion must be performed
+ // check if we need to traverse here to parse local flags on parent commands
+ if c.Root().TraverseChildren {
+ finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs)
+ } else {
+ finalCmd, finalArgs, err = c.Root().Find(trimmedArgs)
+ }
+ if err != nil {
+ // Unable to find the real command. E.g., someInvalidCmd
+ return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs)
+ }
+
+ // Check if we are doing flag value completion before parsing the flags.
+ // This is important because if we are completing a flag value, we need to also
+ // remove the flag name argument from the list of finalArgs or else the parsing
+ // could fail due to an invalid value (incomplete) for the flag.
+ flag, finalArgs, toComplete, err := checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
+ if err != nil {
+ // Error while attempting to parse flags
+ return finalCmd, []string{}, ShellCompDirectiveDefault, err
+ }
+
+ // Parse the flags early so we can check if required flags are set
+ if err = finalCmd.ParseFlags(finalArgs); err != nil {
+ return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
+ }
+
+ if flag != nil {
+ // Check if we are completing a flag value subject to annotations
+ if validExts, present := flag.Annotations[BashCompFilenameExt]; present {
+ if len(validExts) != 0 {
+ // File completion filtered by extensions
+ return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil
+ }
+
+ // The annotation requests simple file completion. There is no reason to do
+ // that since it is the default behavior anyway. Let's ignore this annotation
+ // in case the program also registered a completion function for this flag.
+ // Even though it is a mistake on the program's side, let's be nice when we can.
+ }
+
+ if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present {
+ if len(subDir) == 1 {
+ // Directory completion from within a directory
+ return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil
+ }
+ // Directory completion
+ return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil
+ }
+ }
+
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
+ // the flag name to be complete
+ if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") {
+ var completions []string
+
+ // First check for required flags
+ completions = completeRequireFlags(finalCmd, toComplete)
+
+ // If we have not found any required flags, only then can we show regular flags
+ if len(completions) == 0 {
+ doCompleteFlags := func(flag *pflag.Flag) {
+ if !flag.Changed ||
+ strings.Contains(flag.Value.Type(), "Slice") ||
+ strings.Contains(flag.Value.Type(), "Array") {
+ // If the flag is not already present, or if it can be specified multiple times (Array or Slice)
+ // we suggest it as a completion
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ }
+ }
+
+ // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
+ // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
+ // non-inherited flags.
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteFlags(flag)
+ })
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteFlags(flag)
+ })
+ }
+
+ directive := ShellCompDirectiveNoFileComp
+ if len(completions) == 1 && strings.HasSuffix(completions[0], "=") {
+ // If there is a single completion, the shell usually adds a space
+ // after the completion. We don't want that if the flag ends with an =
+ directive = ShellCompDirectiveNoSpace
+ }
+ return finalCmd, completions, directive, nil
+ }
+
+ // We only remove the flags from the arguments if DisableFlagParsing is not set.
+ // This is important for commands which have requested to do their own flag completion.
+ if !finalCmd.DisableFlagParsing {
+ finalArgs = finalCmd.Flags().Args()
+ }
+
+ var completions []string
+ directive := ShellCompDirectiveDefault
+ if flag == nil {
+ foundLocalNonPersistentFlag := false
+ // If TraverseChildren is true on the root command we don't check for
+ // local flags because we can use a local flag on a parent command
+ if !finalCmd.Root().TraverseChildren {
+ // Check if there are any local, non-persistent flags on the command-line
+ localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
+ foundLocalNonPersistentFlag = true
+ }
+ })
+ }
+
+ // Complete subcommand names, including the help command
+ if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
+ // We only complete sub-commands if:
+ // - there are no arguments on the command-line and
+ // - there are no local, non-peristent flag on the command-line or TraverseChildren is true
+ for _, subCmd := range finalCmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ directive = ShellCompDirectiveNoFileComp
+ }
+ }
+ }
+
+ // Complete required flags even without the '-' prefix
+ completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
+
+ // Always complete ValidArgs, even if we are completing a subcommand name.
+ // This is for commands that have both subcommands and ValidArgs.
+ if len(finalCmd.ValidArgs) > 0 {
+ if len(finalArgs) == 0 {
+ // ValidArgs are only for the first argument
+ for _, validArg := range finalCmd.ValidArgs {
+ if strings.HasPrefix(validArg, toComplete) {
+ completions = append(completions, validArg)
+ }
+ }
+ directive = ShellCompDirectiveNoFileComp
+
+ // If no completions were found within commands or ValidArgs,
+ // see if there are any ArgAliases that should be completed.
+ if len(completions) == 0 {
+ for _, argAlias := range finalCmd.ArgAliases {
+ if strings.HasPrefix(argAlias, toComplete) {
+ completions = append(completions, argAlias)
+ }
+ }
+ }
+ }
+
+ // If there are ValidArgs specified (even if they don't match), we stop completion.
+ // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
+ return finalCmd, completions, directive, nil
+ }
+
+ // Let the logic continue so as to add any ValidArgsFunction completions,
+ // even if we already found sub-commands.
+ // This is for commands that have subcommands but also specify a ValidArgsFunction.
+ }
+
+ // Find the completion function for the flag or command
+ var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+ if flag != nil {
+ completionFn = flagCompletionFunctions[flag]
+ } else {
+ completionFn = finalCmd.ValidArgsFunction
+ }
+ if completionFn != nil {
+ // Go custom completion defined for this flag or command.
+ // Call the registered completion function to get the completions.
+ var comps []string
+ comps, directive = completionFn(finalCmd, finalArgs, toComplete)
+ completions = append(completions, comps...)
+ }
+
+ return finalCmd, completions, directive, nil
+}
+
+func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
+ if nonCompletableFlag(flag) {
+ return []string{}
+ }
+
+ var completions []string
+ flagName := "--" + flag.Name
+ if strings.HasPrefix(flagName, toComplete) {
+ // Flag without the =
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+
+ // Why suggest both long forms: --flag and --flag= ?
+ // This forces the user to *always* have to type either an = or a space after the flag name.
+ // Let's be nice and avoid making users have to do that.
+ // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it.
+ // The = form will still work, we just won't suggest it.
+ // This also makes the list of suggested flags shorter as we avoid all the = forms.
+ //
+ // if len(flag.NoOptDefVal) == 0 {
+ // // Flag requires a value, so it can be suffixed with =
+ // flagName += "="
+ // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ // }
+ }
+
+ flagName = "-" + flag.Shorthand
+ if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ }
+
+ return completions
+}
+
+func completeRequireFlags(finalCmd *Command, toComplete string) []string {
+ var completions []string
+
+ doCompleteRequiredFlags := func(flag *pflag.Flag) {
+ if _, present := flag.Annotations[BashCompOneRequiredFlag]; present {
+ if !flag.Changed {
+ // If the flag is not already present, we suggest it as a completion
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ }
+ }
+ }
+
+ // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
+ // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
+ // non-inherited flags.
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteRequiredFlags(flag)
+ })
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteRequiredFlags(flag)
+ })
+
+ return completions
+}
+
+func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
+ if finalCmd.DisableFlagParsing {
+ // We only do flag completion if we are allowed to parse flags
+ // This is important for commands which have requested to do their own flag completion.
+ return nil, args, lastArg, nil
+ }
+
+ var flagName string
+ trimmedArgs := args
+ flagWithEqual := false
+
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as that function
+ // requires the flag name to be complete
+ if len(lastArg) > 0 && lastArg[0] == '-' {
+ if index := strings.Index(lastArg, "="); index >= 0 {
+ // Flag with an =
+ flagName = strings.TrimLeft(lastArg[:index], "-")
+ lastArg = lastArg[index+1:]
+ flagWithEqual = true
+ } else {
+ // Normal flag completion
+ return nil, args, lastArg, nil
+ }
+ }
+
+ if len(flagName) == 0 {
+ if len(args) > 0 {
+ prevArg := args[len(args)-1]
+ if isFlagArg(prevArg) {
+ // Only consider the case where the flag does not contain an =.
+ // If the flag contains an = it means it has already been fully processed,
+ // so we don't need to deal with it here.
+ if index := strings.Index(prevArg, "="); index < 0 {
+ flagName = strings.TrimLeft(prevArg, "-")
+
+ // Remove the uncompleted flag or else there could be an error created
+ // for an invalid value for that flag
+ trimmedArgs = args[:len(args)-1]
+ }
+ }
+ }
+ }
+
+ if len(flagName) == 0 {
+ // Not doing flag completion
+ return nil, trimmedArgs, lastArg, nil
+ }
+
+ flag := findFlag(finalCmd, flagName)
+ if flag == nil {
+ // Flag not supported by this command, nothing to complete
+ err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName)
+ return nil, nil, "", err
+ }
+
+ if !flagWithEqual {
+ if len(flag.NoOptDefVal) != 0 {
+ // We had assumed dealing with a two-word flag but the flag is a boolean flag.
+ // In that case, there is no value following it, so we are not really doing flag completion.
+ // Reset everything to do noun completion.
+ trimmedArgs = args
+ flag = nil
+ }
+ }
+
+ return flag, trimmedArgs, lastArg, nil
+}
+
+func findFlag(cmd *Command, name string) *pflag.Flag {
+ flagSet := cmd.Flags()
+ if len(name) == 1 {
+ // First convert the short flag into a long flag
+ // as the cmd.Flag() search only accepts long flags
+ if short := flagSet.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ set := cmd.InheritedFlags()
+ if short = set.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ return nil
+ }
+ }
+ }
+ return cmd.Flag(name)
+}
+
+// CompDebug prints the specified string to the same file as where the
+// completion script prints its logs.
+// Note that completion printouts should never be on stdout as they would
+// be wrongly interpreted as actual completion choices by the completion script.
+func CompDebug(msg string, printToStdErr bool) {
+ msg = fmt.Sprintf("[Debug] %s", msg)
+
+ // Such logs are only printed when the user has set the environment
+ // variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+ if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
+ f, err := os.OpenFile(path,
+ os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err == nil {
+ defer f.Close()
+ WriteStringAndCheck(f, msg)
+ }
+ }
+
+ if printToStdErr {
+ // Must print to stderr for this not to be read by the completion script.
+ fmt.Fprint(os.Stderr, msg)
+ }
+}
+
+// CompDebugln prints the specified string with a newline at the end
+// to the same file as where the completion script prints its logs.
+// Such logs are only printed when the user has set the environment
+// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+func CompDebugln(msg string, printToStdErr bool) {
+ CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
+}
+
+// CompError prints the specified completion message to stderr.
+func CompError(msg string) {
+ msg = fmt.Sprintf("[Error] %s", msg)
+ CompDebug(msg, true)
+}
+
+// CompErrorln prints the specified completion message to stderr with a newline at the end.
+func CompErrorln(msg string) {
+ CompError(fmt.Sprintf("%s\n", msg))
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
new file mode 100644
index 000000000..3e112347d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -0,0 +1,207 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
+ // Variables should not contain a '-' or ':' character
+ nameForVar := name
+ nameForVar = strings.Replace(nameForVar, "-", "_", -1)
+ nameForVar = strings.Replace(nameForVar, ":", "_", -1)
+
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
+function __%[1]s_debug
+ set file "$BASH_COMP_DEBUG_FILE"
+ if test -n "$file"
+ echo "$argv" >> $file
+ end
+end
+
+function __%[1]s_perform_completion
+ __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv"
+
+ set args (string split -- " " "$argv")
+ set lastArg "$args[-1]"
+
+ __%[1]s_debug "args: $args"
+ __%[1]s_debug "last arg: $lastArg"
+
+ set emptyArg ""
+ if test -z "$lastArg"
+ __%[1]s_debug "Setting emptyArg"
+ set emptyArg \"\"
+ end
+ __%[1]s_debug "emptyArg: $emptyArg"
+
+ if not type -q "$args[1]"
+ # This can happen when "complete --do-complete %[2]s" is called when running this script.
+ __%[1]s_debug "Cannot find $args[1]. No completions."
+ return
+ end
+
+ set requestComp "$args[1] %[3]s $args[2..-1] $emptyArg"
+ __%[1]s_debug "Calling $requestComp"
+
+ set results (eval $requestComp 2> /dev/null)
+ set comps $results[1..-2]
+ set directiveLine $results[-1]
+
+ # For Fish, when completing a flag with an = (e.g., -n=)
+ # completions must be prefixed with the flag
+ set flagPrefix (string match -r -- '-.*=' "$lastArg")
+
+ __%[1]s_debug "Comps: $comps"
+ __%[1]s_debug "DirectiveLine: $directiveLine"
+ __%[1]s_debug "flagPrefix: $flagPrefix"
+
+ for comp in $comps
+ printf "%%s%%s\n" "$flagPrefix" "$comp"
+ end
+
+ printf "%%s\n" "$directiveLine"
+end
+
+# This function does three things:
+# 1- Obtain the completions and store them in the global __%[1]s_comp_results
+# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed
+# and unset it otherwise
+# 3- Return true if the completion results are not empty
+function __%[1]s_prepare_completions
+ # Start fresh
+ set --erase __%[1]s_comp_do_file_comp
+ set --erase __%[1]s_comp_results
+
+ # Check if the command-line is already provided. This is useful for testing.
+ if not set --query __%[1]s_comp_commandLine
+ # Use the -c flag to allow for completion in the middle of the line
+ set __%[1]s_comp_commandLine (commandline -c)
+ end
+ __%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine"
+
+ set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine")
+ set --erase __%[1]s_comp_commandLine
+ __%[1]s_debug "Completion results: $results"
+
+ if test -z "$results"
+ __%[1]s_debug "No completion, probably due to a failure"
+ # Might as well do file completion, in case it helps
+ set --global __%[1]s_comp_do_file_comp 1
+ return 1
+ end
+
+ set directive (string sub --start 2 $results[-1])
+ set --global __%[1]s_comp_results $results[1..-2]
+
+ __%[1]s_debug "Completions are: $__%[1]s_comp_results"
+ __%[1]s_debug "Directive is: $directive"
+
+ set shellCompDirectiveError %[4]d
+ set shellCompDirectiveNoSpace %[5]d
+ set shellCompDirectiveNoFileComp %[6]d
+ set shellCompDirectiveFilterFileExt %[7]d
+ set shellCompDirectiveFilterDirs %[8]d
+
+ if test -z "$directive"
+ set directive 0
+ end
+
+ set compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2)
+ if test $compErr -eq 1
+ __%[1]s_debug "Received error directive: aborting."
+ # Might as well do file completion, in case it helps
+ set --global __%[1]s_comp_do_file_comp 1
+ return 1
+ end
+
+ set filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2)
+ set dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2)
+ if test $filefilter -eq 1; or test $dirfilter -eq 1
+ __%[1]s_debug "File extension filtering or directory filtering not supported"
+ # Do full file completion instead
+ set --global __%[1]s_comp_do_file_comp 1
+ return 1
+ end
+
+ set nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2)
+ set nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2)
+
+ __%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
+
+ # Important not to quote the variable for count to work
+ set numComps (count $__%[1]s_comp_results)
+ __%[1]s_debug "numComps: $numComps"
+
+ if test $numComps -eq 1; and test $nospace -ne 0
+ # To support the "nospace" directive we trick the shell
+ # by outputting an extra, longer completion.
+ __%[1]s_debug "Adding second completion to perform nospace directive"
+ set --append __%[1]s_comp_results $__%[1]s_comp_results[1].
+ end
+
+ if test $numComps -eq 0; and test $nofiles -eq 0
+ __%[1]s_debug "Requesting file completion"
+ set --global __%[1]s_comp_do_file_comp 1
+ end
+
+ # If we don't want file completion, we must return true even if there
+ # are no completions found. This is because fish will perform the last
+ # completion command, even if its condition is false, if no other
+ # completion command was triggered
+ return (not set --query __%[1]s_comp_do_file_comp)
+end
+
+# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves
+# so we can properly delete any completions provided by another script.
+# The space after the the program name is essential to trigger completion for the program
+# and not completion of the program name itself.
+complete --do-complete "%[2]s " > /dev/null 2>&1
+# Using '> /dev/null 2>&1' since '&>' is not supported in older versions of fish.
+
+# Remove any pre-existing completions for the program since we will be handling all of them.
+complete -c %[2]s -e
+
+# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions
+# is called first. It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable.
+#
+# This completion will be run second as complete commands are added FILO.
+# It triggers file completion choices when __%[1]s_comp_do_file_comp is set.
+complete -c %[2]s -n 'set --query __%[1]s_comp_do_file_comp'
+
+# This completion will be run first as complete commands are added FILO.
+# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results and __%[1]s_comp_do_file_comp.
+# It provides the program's completion choices.
+complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+
+`, nameForVar, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+}
+
+// GenFishCompletion generates fish completion file and writes to the passed writer.
+func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genFishComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenFishCompletionFile generates fish completion file.
+func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenFishCompletion(outFile, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md
new file mode 100644
index 000000000..19b2ed129
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.md
@@ -0,0 +1,4 @@
+## Generating Fish Completions For Your cobra.Command
+
+Please refer to [Shell Completions](shell_completions.md) for details.
+
diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod
new file mode 100644
index 000000000..ff5614405
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/go.mod
@@ -0,0 +1,12 @@
+module github.com/spf13/cobra
+
+go 1.12
+
+require (
+ github.com/cpuguy83/go-md2man/v2 v2.0.0
+ github.com/inconshreveable/mousetrap v1.0.0
+ github.com/mitchellh/go-homedir v1.1.0
+ github.com/spf13/pflag v1.0.5
+ github.com/spf13/viper v1.7.0
+ gopkg.in/yaml.v2 v2.4.0
+)
diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum
new file mode 100644
index 000000000..9328ee3ee
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/go.sum
@@ -0,0 +1,313 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
new file mode 100644
index 000000000..c55be71cd
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -0,0 +1,285 @@
+// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
+// can be downloaded separately for windows 7 or 8.1).
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*-
+
+function __%[1]s_debug {
+ if ($env:BASH_COMP_DEBUG_FILE) {
+ "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE"
+ }
+}
+
+filter __%[1]s_escapeStringWithSpecialChars {
+`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+`
+}
+
+Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
+ param(
+ $WordToComplete,
+ $CommandAst,
+ $CursorPosition
+ )
+
+ # Get the current command line and convert into a string
+ $Command = $CommandAst.CommandElements
+ $Command = "$Command"
+
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CursorPosition location, so we need
+ # to truncate the command-line ($Command) up to the $CursorPosition location.
+ # Make sure the $Command is longer then the $CursorPosition before we truncate.
+ # This happens because the $Command does not include the last space.
+ if ($Command.Length -gt $CursorPosition) {
+ $Command=$Command.Substring(0,$CursorPosition)
+ }
+ __%[1]s_debug "Truncated command: $Command"
+
+ $ShellCompDirectiveError=%[3]d
+ $ShellCompDirectiveNoSpace=%[4]d
+ $ShellCompDirectiveNoFileComp=%[5]d
+ $ShellCompDirectiveFilterFileExt=%[6]d
+ $ShellCompDirectiveFilterDirs=%[7]d
+
+ # Prepare the command to request completions for the program.
+ # Split the command at the first space to separate the program and arguments.
+ $Program,$Arguments = $Command.Split(" ",2)
+ $RequestComp="$Program %[2]s $Arguments"
+ __%[1]s_debug "RequestComp: $RequestComp"
+
+ # we cannot use $WordToComplete because it
+ # has the wrong values if the cursor was moved
+ # so use the last argument
+ if ($WordToComplete -ne "" ) {
+ $WordToComplete = $Arguments.Split(" ")[-1]
+ }
+ __%[1]s_debug "New WordToComplete: $WordToComplete"
+
+
+ # Check for flag with equal sign
+ $IsEqualFlag = ($WordToComplete -Like "--*=*" )
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Completing equal sign flag"
+ # Remove the flag part
+ $Flag,$WordToComplete = $WordToComplete.Split("=",2)
+ }
+
+ if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) {
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+`
+`+" $RequestComp=\"$RequestComp\" + ' `\"`\"' "+`
+ }
+
+ __%[1]s_debug "Calling $RequestComp"
+ #call the command store the output in $out and redirect stderr and stdout to null
+ # $Out is an array contains each line per element
+ Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
+
+
+ # get directive from last line
+ [int]$Directive = $Out[-1].TrimStart(':')
+ if ($Directive -eq "") {
+ # There is no directive specified
+ $Directive = 0
+ }
+ __%[1]s_debug "The completion directive is: $Directive"
+
+ # remove directive (last element) from out
+ $Out = $Out | Where-Object { $_ -ne $Out[-1] }
+ __%[1]s_debug "The completions are: $Out"
+
+ if (($Directive -band $ShellCompDirectiveError) -ne 0 ) {
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ }
+
+ $Longest = 0
+ $Values = $Out | ForEach-Object {
+ #Split the output in name and description
+`+" $Name, $Description = $_.Split(\"`t\",2)"+`
+ __%[1]s_debug "Name: $Name Description: $Description"
+
+ # Look for the longest completion so that we can format things nicely
+ if ($Longest -lt $Name.Length) {
+ $Longest = $Name.Length
+ }
+
+ # Set the description to a one space string if there is none set.
+ # This is needed because the CompletionResult does not accept an empty string as argument
+ if (-Not $Description) {
+ $Description = " "
+ }
+ @{Name="$Name";Description="$Description"}
+ }
+
+
+ $Space = " "
+ if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) {
+ # remove the space here
+ __%[1]s_debug "ShellCompDirectiveNoSpace is called"
+ $Space = ""
+ }
+
+ if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
+ __%[1]s_debug "ShellCompDirectiveNoFileComp is called"
+
+ if ($Values.Length -eq 0) {
+ # Just print an empty string here so the
+ # shell does not start to complete paths.
+ # We cannot use CompletionResult here because
+ # it does not accept an empty string as argument.
+ ""
+ return
+ }
+ }
+
+ if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or
+ (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) {
+ __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported"
+
+ # return here to prevent the completion of the extensions
+ return
+ }
+
+ $Values = $Values | Where-Object {
+ # filter the result
+ $_.Name -like "$WordToComplete*"
+
+ # Join the flag back if we have a equal sign flag
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Join the equal sign flag back to the completion value"
+ $_.Name = $Flag + "=" + $_.Name
+ }
+ }
+
+ # Get the current mode
+ $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function
+ __%[1]s_debug "Mode: $Mode"
+
+ $Values | ForEach-Object {
+
+ # store temporay because switch will overwrite $_
+ $comp = $_
+
+ # PowerShell supports three different completion modes
+ # - TabCompleteNext (default windows style - on each key press the next option is displayed)
+ # - Complete (works like bash)
+ # - MenuComplete (works like zsh)
+ # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function
+
+ # CompletionResult Arguments:
+ # 1) CompletionText text to be used as the auto completion result
+ # 2) ListItemText text to be displayed in the suggestion list
+ # 3) ResultType type of completion result
+ # 4) ToolTip text for the tooltip with details about the object
+
+ switch ($Mode) {
+
+ # bash like
+ "Complete" {
+
+ if ($Values.Length -eq 1) {
+ __%[1]s_debug "Only one completion left"
+
+ # insert space after value
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+
+ } else {
+ # Add the proper number of spaces to align the descriptions
+ while($comp.Name.Length -lt $Longest) {
+ $comp.Name = $comp.Name + " "
+ }
+
+ # Check for empty description and only add parentheses if needed
+ if ($($comp.Description) -eq " " ) {
+ $Description = ""
+ } else {
+ $Description = " ($($comp.Description))"
+ }
+
+ [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)")
+ }
+ }
+
+ # zsh like
+ "MenuComplete" {
+ # insert space after value
+ # MenuComplete will automatically show the ToolTip of
+ # the highlighted value at the bottom of the suggestions.
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ }
+
+ # TabCompleteNext and in case we get something unknown
+ Default {
+ # Like MenuComplete but we don't want to add a space here because
+ # the user need to press space anyway to get the completion.
+ # Description will not be shown because thats not possible with TabCompleteNext
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ }
+ }
+
+ }
+}
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+}
+
+func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genPowerShellComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genPowerShellCompletion(outFile, includeDesc)
+}
+
+// GenPowerShellCompletionFile generates powershell completion file without descriptions.
+func (c *Command) GenPowerShellCompletionFile(filename string) error {
+ return c.genPowerShellCompletionFile(filename, false)
+}
+
+// GenPowerShellCompletion generates powershell completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletion(w io.Writer) error {
+ return c.genPowerShellCompletion(w, false)
+}
+
+// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
+func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error {
+ return c.genPowerShellCompletionFile(filename, true)
+}
+
+// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error {
+ return c.genPowerShellCompletion(w, true)
+}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md
new file mode 100644
index 000000000..c449f1e5c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.md
@@ -0,0 +1,3 @@
+# Generating PowerShell Completions For Your Own cobra.Command
+
+Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details.
diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md
new file mode 100644
index 000000000..d98a71e36
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md
@@ -0,0 +1,38 @@
+## Projects using Cobra
+
+- [Arduino CLI](https://github.com/arduino/arduino-cli)
+- [Bleve](http://www.blevesearch.com/)
+- [CockroachDB](http://www.cockroachlabs.com/)
+- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk)
+- [Delve](https://github.com/derekparker/delve)
+- [Docker (distribution)](https://github.com/docker/distribution)
+- [Etcd](https://etcd.io/)
+- [Gardener](https://github.com/gardener/gardenctl)
+- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl)
+- [Git Bump](https://github.com/erdaltsksn/git-bump)
+- [Github CLI](https://github.com/cli/cli)
+- [GitHub Labeler](https://github.com/erdaltsksn/gh-label)
+- [Golangci-lint](https://golangci-lint.run)
+- [GopherJS](http://www.gopherjs.org/)
+- [Helm](https://helm.sh)
+- [Hugo](https://gohugo.io)
+- [Istio](https://istio.io)
+- [Kool](https://github.com/kool-dev/kool)
+- [Kubernetes](http://kubernetes.io/)
+- [Linkerd](https://linkerd.io/)
+- [Mattermost-server](https://github.com/mattermost/mattermost-server)
+- [Metal Stack CLI](https://github.com/metal-stack/metalctl)
+- [Moby (former Docker)](https://github.com/moby/moby)
+- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+- [OpenShift](https://www.openshift.com/)
+- [Ory Hydra](https://github.com/ory/hydra)
+- [Ory Kratos](https://github.com/ory/kratos)
+- [Pouch](https://github.com/alibaba/pouch)
+- [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
+- [Prototool](https://github.com/uber/prototool)
+- [Random](https://github.com/erdaltsksn/random)
+- [Rclone](https://rclone.org/)
+- [Skaffold](https://skaffold.dev/)
+- [Tendermint](https://github.com/tendermint/tendermint)
+- [Twitch CLI](https://github.com/twitchdev/twitch-cli)
+- [Werf](https://werf.io/)
diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go
new file mode 100644
index 000000000..d99bf91e5
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/shell_completions.go
@@ -0,0 +1,84 @@
+package cobra
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// MarkFlagRequired instructs the various shell completion implementations to
+// prioritize the named flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired instructs the various shell completion implementations to
+// prioritize the named persistent flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired instructs the various shell completion implementations to
+// prioritize the named flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for the named flag to the specified file extensions.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// The bash completion script will call the bash function f for the flag.
+//
+// This will only work for bash completion.
+// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+// to register a Go function which will work across all shells.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename instructs the various shell completion
+// implementations to limit completions for the named persistent flag to the
+// specified file extensions.
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for the named flag to the specified file extensions.
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+ return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// The bash completion script will call the bash function f for the flag.
+//
+// This will only work for bash completion.
+// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+// to register a Go function which will work across all shells.
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+ return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// limit completions for the named flag to directory names.
+func (c *Command) MarkFlagDirname(name string) error {
+ return MarkFlagDirname(c.Flags(), name)
+}
+
+// MarkPersistentFlagDirname instructs the various shell completion
+// implementations to limit completions for the named persistent flag to
+// directory names.
+func (c *Command) MarkPersistentFlagDirname(name string) error {
+ return MarkFlagDirname(c.PersistentFlags(), name)
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// limit completions for the named flag to directory names.
+func MarkFlagDirname(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{})
+}
diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md
new file mode 100644
index 000000000..cd533ac3d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/shell_completions.md
@@ -0,0 +1,483 @@
+# Generating shell completions
+
+Cobra can generate shell completions for multiple shells.
+The currently supported shells are:
+- Bash
+- Zsh
+- fish
+- PowerShell
+
+If you are using the generator, you can create a completion command by running
+
+```bash
+cobra add completion
+```
+and then modifying the generated `cmd/completion.go` file to look something like this
+(writing the shell script to stdout allows the most flexible use):
+
+```go
+var completionCmd = &cobra.Command{
+ Use: "completion [bash|zsh|fish|powershell]",
+ Short: "Generate completion script",
+ Long: `To load completions:
+
+Bash:
+
+ $ source <(yourprogram completion bash)
+
+ # To load completions for each session, execute once:
+ # Linux:
+ $ yourprogram completion bash > /etc/bash_completion.d/yourprogram
+ # macOS:
+ $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram
+
+Zsh:
+
+ # If shell completion is not already enabled in your environment,
+ # you will need to enable it. You can execute the following once:
+
+ $ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+ # To load completions for each session, execute once:
+ $ yourprogram completion zsh > "${fpath[1]}/_yourprogram"
+
+ # You will need to start a new shell for this setup to take effect.
+
+fish:
+
+ $ yourprogram completion fish | source
+
+ # To load completions for each session, execute once:
+ $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish
+
+PowerShell:
+
+ PS> yourprogram completion powershell | Out-String | Invoke-Expression
+
+ # To load completions for every new session, run:
+ PS> yourprogram completion powershell > yourprogram.ps1
+ # and source this file from your PowerShell profile.
+`,
+ DisableFlagsInUseLine: true,
+ ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
+ Args: cobra.ExactValidArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ switch args[0] {
+ case "bash":
+ cmd.Root().GenBashCompletion(os.Stdout)
+ case "zsh":
+ cmd.Root().GenZshCompletion(os.Stdout)
+ case "fish":
+ cmd.Root().GenFishCompletion(os.Stdout, true)
+ case "powershell":
+ cmd.Root().GenPowerShellCompletion(os.Stdout)
+ }
+ },
+}
+```
+
+**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed.
+
+# Customizing completions
+
+The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values.
+
+## Completion of nouns
+
+### Static completion of nouns
+
+Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field.
+For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them.
+Some simplified code from `kubectl get` looks like:
+
+```go
+validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+
+cmd := &cobra.Command{
+ Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
+ Short: "Display one or many resources",
+ Long: get_long,
+ Example: get_example,
+ Run: func(cmd *cobra.Command, args []string) {
+ cobra.CheckErr(RunGet(f, out, cmd, args))
+ },
+ ValidArgs: validArgs,
+}
+```
+
+Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like:
+
+```bash
+$ kubectl get [tab][tab]
+node pod replicationcontroller service
+```
+
+#### Aliases for nouns
+
+If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
+
+```go
+argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+
+cmd := &cobra.Command{
+ ...
+ ValidArgs: validArgs,
+ ArgAliases: argAliases
+}
+```
+
+The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
+the completion algorithm if entered manually, e.g. in:
+
+```bash
+$ kubectl get rc [tab][tab]
+backend frontend database
+```
+
+Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of
+replication controllers following `rc`.
+
+### Dynamic completion of nouns
+
+In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both.
+Simplified code from `helm status` looks like:
+
+```go
+cmd := &cobra.Command{
+ Use: "status RELEASE_NAME",
+ Short: "Display the status of the named release",
+ Long: status_long,
+ RunE: func(cmd *cobra.Command, args []string) {
+ RunGet(args[0])
+ },
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+}
+```
+Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster.
+Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like:
+
+```bash
+$ helm status [tab][tab]
+harbor notary rook thanos
+```
+You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp`
+```go
+// Indicates that the shell will perform its default behavior after completions
+// have been provided (this implies none of the other directives).
+ShellCompDirectiveDefault
+
+// Indicates an error occurred and completions should be ignored.
+ShellCompDirectiveError
+
+// Indicates that the shell should not add a space after the completion,
+// even if there is a single completion provided.
+ShellCompDirectiveNoSpace
+
+// Indicates that the shell should not provide file completion even when
+// no completion is provided.
+ShellCompDirectiveNoFileComp
+
+// Indicates that the returned completions should be used as file extension filters.
+// For example, to complete only files of the form *.json or *.yaml:
+// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt
+// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename()
+// is a shortcut to using this directive explicitly.
+//
+ShellCompDirectiveFilterFileExt
+
+// Indicates that only directory names should be provided in file completion.
+// For example:
+// return nil, ShellCompDirectiveFilterDirs
+// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly.
+//
+// To request directory names within another directory, the returned completions
+// should specify a single directory name within which to search. For example,
+// to complete directories within "themes/":
+// return []string{"themes"}, ShellCompDirectiveFilterDirs
+//
+ShellCompDirectiveFilterDirs
+```
+
+***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function.
+
+#### Debugging
+
+Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly:
+```bash
+$ helm __complete status har
+harbor
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command:
+```bash
+$ helm __complete status ""
+harbor
+notary
+rook
+thanos
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code:
+```go
+// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
+// is set to a file path) and optionally prints to stderr.
+cobra.CompDebug(msg string, printToStdErr bool) {
+cobra.CompDebugln(msg string, printToStdErr bool)
+
+// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
+// is set to a file path) and to stderr.
+cobra.CompError(msg string)
+cobra.CompErrorln(msg string)
+```
+***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above.
+
+## Completions for flags
+
+### Mark flags as required
+
+Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so:
+
+```go
+cmd.MarkFlagRequired("pod")
+cmd.MarkFlagRequired("container")
+```
+
+and you'll get something like
+
+```bash
+$ kubectl exec [tab][tab]
+-c --container= -p --pod=
+```
+
+### Specify dynamic flag completion
+
+As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function.
+
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault
+})
+```
+Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so:
+
+```bash
+$ helm status --output [tab][tab]
+json table yaml
+```
+
+#### Debugging
+
+You can also easily debug your Go completion code for flags:
+```bash
+$ helm __complete status --output ""
+json
+table
+yaml
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above.
+
+### Specify valid filename extensions for flags that take a filename
+
+To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so:
+```go
+flagName := "output"
+cmd.MarkFlagFilename(flagName, "yaml", "json")
+```
+or
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt})
+```
+
+### Limit flag completions to directory names
+
+To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so:
+```go
+flagName := "output"
+cmd.MarkFlagDirname(flagName)
+```
+or
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return nil, cobra.ShellCompDirectiveFilterDirs
+})
+```
+To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so:
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs
+})
+```
+### Descriptions for completions
+
+`zsh`, `fish` and `powershell` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh:
+```
+$ helm s[tab]
+search -- search for a keyword in charts
+show -- show information of a chart
+status -- displays the status of the named release
+```
+while using fish:
+```
+$ helm s[tab]
+search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release)
+```
+
+Cobra allows you to add annotations to your own completions. Simply add the annotation text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example:
+```go
+ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp
+}}
+```
+or
+```go
+ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"}
+```
+## Bash completions
+
+### Dependencies
+
+The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion))
+
+### Aliases
+
+You can also configure `bash` aliases for your program and they will also support completions.
+
+```bash
+alias aliasname=origcommand
+complete -o default -F __start_origcommand aliasname
+
+# and now when you run `aliasname` completion will make
+# suggestions as it did for `origcommand`.
+
+$ aliasname
+completion firstcommand secondcommand
+```
+### Bash legacy dynamic completions
+
+For backward compatibility, Cobra still supports its bash legacy dynamic completion solution.
+Please refer to [Bash Completions](bash_completions.md) for details.
+
+## Zsh completions
+
+Cobra supports native zsh completion generated from the root `cobra.Command`.
+The generated completion script should be put somewhere in your `$fpath` and be named
+`_`. You will need to start a new shell for the completions to become available.
+
+Zsh supports descriptions for completions. Cobra will provide the description automatically,
+based on usage information. Cobra provides a way to completely disable such descriptions by
+using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make
+this a configurable option to your users.
+```
+# With descriptions
+$ helm s[tab]
+search -- search for a keyword in charts
+show -- show information of a chart
+status -- displays the status of the named release
+
+# Without descriptions
+$ helm s[tab]
+search show status
+```
+*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`.
+
+### Limitations
+
+* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation).
+ * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`).
+* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`.
+ * You should instead use `RegisterFlagCompletionFunc()`.
+
+### Zsh completions standardization
+
+Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced.
+Please refer to [Zsh Completions](zsh_completions.md) for details.
+
+## fish completions
+
+Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users.
+```
+# With descriptions
+$ helm s[tab]
+search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release)
+
+# Without descriptions
+$ helm s[tab]
+search show status
+```
+*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`.
+
+### Limitations
+
+* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation).
+ * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`).
+* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`.
+ * You should instead use `RegisterFlagCompletionFunc()`.
+* The following flag completion annotations are not supported and will be ignored for `fish`:
+ * `BashCompFilenameExt` (filtering by file extension)
+ * `BashCompSubdirsInDir` (filtering by directory)
+* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`:
+ * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension)
+ * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory)
+* Similarly, the following completion directives are not supported and will be ignored for `fish`:
+ * `ShellCompDirectiveFilterFileExt` (filtering by file extension)
+ * `ShellCompDirectiveFilterDirs` (filtering by directory)
+
+## PowerShell completions
+
+Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users.
+
+The script is designed to support all three PowerShell completion modes:
+
+* TabCompleteNext (default windows style - on each key press the next option is displayed)
+* Complete (works like bash)
+* MenuComplete (works like zsh)
+
+You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode.
+
+Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles.
+
+```
+# With descriptions and Mode 'Complete'
+$ helm s[tab]
+search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release)
+
+# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions.
+$ helm s[tab]
+search show status
+
+search for a keyword in charts
+
+# Without descriptions
+$ helm s[tab]
+search show status
+```
+
+### Limitations
+
+* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation).
+ * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`).
+* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`.
+ * You should instead use `RegisterFlagCompletionFunc()`.
+* The following flag completion annotations are not supported and will be ignored for `powershell`:
+ * `BashCompFilenameExt` (filtering by file extension)
+ * `BashCompSubdirsInDir` (filtering by directory)
+* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`:
+ * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension)
+ * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory)
+* Similarly, the following completion directives are not supported and will be ignored for `powershell`:
+ * `ShellCompDirectiveFilterFileExt` (filtering by file extension)
+ * `ShellCompDirectiveFilterDirs` (filtering by directory)
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 000000000..2e840285f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,240 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// GenZshCompletionFile generates zsh completion file including descriptions.
+func (c *Command) GenZshCompletionFile(filename string) error {
+ return c.genZshCompletionFile(filename, true)
+}
+
+// GenZshCompletion generates zsh completion file including descriptions
+// and writes it to the passed writer.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+ return c.genZshCompletion(w, true)
+}
+
+// GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
+func (c *Command) GenZshCompletionFileNoDesc(filename string) error {
+ return c.genZshCompletionFile(filename, false)
+}
+
+// GenZshCompletionNoDesc generates zsh completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenZshCompletionNoDesc(w io.Writer) error {
+ return c.genZshCompletion(w, false)
+}
+
+// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
+// not consistent with Bash completion. It has therefore been disabled.
+// Instead, when no other completion is specified, file completion is done by
+// default for every argument. One can disable file completion on a per-argument
+// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
+// To achieve file extension filtering, one can use ValidArgsFunction and
+// ShellCompDirectiveFilterFileExt.
+//
+// Deprecated
+func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error {
+ return nil
+}
+
+// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
+// been disabled.
+// To achieve the same behavior across all shells, one can use
+// ValidArgs (for the first argument only) or ValidArgsFunction for
+// any argument (can include the first one also).
+//
+// Deprecated
+func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error {
+ return nil
+}
+
+func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genZshCompletion(outFile, includeDesc)
+}
+
+func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genZshComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s
+
+# zsh completion for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
+{
+ local file="$BASH_COMP_DEBUG_FILE"
+ if [[ -n ${file} ]]; then
+ echo "$*" >> "${file}"
+ fi
+}
+
+_%[1]s()
+{
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+
+ local lastParam lastChar flagPrefix requestComp out directive compCount comp lastComp
+ local -a completions
+
+ __%[1]s_debug "\n========= starting completion logic =========="
+ __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CURRENT location, so we need
+ # to truncate the command-line ($words) up to the $CURRENT location.
+ # (We cannot use $CURSOR as its value does not work when a command is an alias.)
+ words=("${=words[1,CURRENT]}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
+
+ lastParam=${words[-1]}
+ lastChar=${lastParam[-1]}
+ __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}"
+
+ # For zsh, when completing a flag with an = (e.g., %[1]s -n=)
+ # completions must be prefixed with the flag
+ setopt local_options BASH_REMATCH
+ if [[ "${lastParam}" =~ '-.*=' ]]; then
+ # We are dealing with a flag with an =
+ flagPrefix="-P ${BASH_REMATCH}"
+ fi
+
+ # Prepare the command to obtain completions
+ requestComp="${words[1]} %[2]s ${words[2,-1]}"
+ if [ "${lastChar}" = "" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go completion code.
+ __%[1]s_debug "Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "About to call: eval ${requestComp}"
+
+ # Use eval to handle any environment variables and such
+ out=$(eval ${requestComp} 2>/dev/null)
+ __%[1]s_debug "completion output: ${out}"
+
+ # Extract the directive integer following a : from the last line
+ local lastLine
+ while IFS='\n' read -r line; do
+ lastLine=${line}
+ done < <(printf "%%s\n" "${out[@]}")
+ __%[1]s_debug "last line: ${lastLine}"
+
+ if [ "${lastLine[1]}" = : ]; then
+ directive=${lastLine[2,-1]}
+ # Remove the directive including the : and the newline
+ local suffix
+ (( suffix=${#lastLine}+2))
+ out=${out[1,-$suffix]}
+ else
+ # There is no directive specified. Leave $out as is.
+ __%[1]s_debug "No directive found. Setting do default"
+ directive=0
+ fi
+
+ __%[1]s_debug "directive: ${directive}"
+ __%[1]s_debug "completions: ${out}"
+ __%[1]s_debug "flagPrefix: ${flagPrefix}"
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ __%[1]s_debug "Completion received error. Ignoring completions."
+ return
+ fi
+
+ compCount=0
+ while IFS='\n' read -r comp; do
+ if [ -n "$comp" ]; then
+ # If requested, completions are returned with a description.
+ # The description is preceded by a TAB character.
+ # For zsh's _describe, we need to use a : instead of a TAB.
+ # We first need to escape any : as part of the completion itself.
+ comp=${comp//:/\\:}
+
+ local tab=$(printf '\t')
+ comp=${comp//$tab/:}
+
+ ((compCount++))
+ __%[1]s_debug "Adding completion: ${comp}"
+ completions+=${comp}
+ lastComp=$comp
+ fi
+ done < <(printf "%%s\n" "${out[@]}")
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local filteringCmd
+ filteringCmd='_files'
+ for filter in ${completions[@]}; do
+ if [ ${filter[1]} != '*' ]; then
+ # zsh requires a glob pattern to do file filtering
+ filter="\*.$filter"
+ fi
+ filteringCmd+=" -g $filter"
+ done
+ filteringCmd+=" ${flagPrefix}"
+
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ _arguments '*:filename:'"$filteringCmd"
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+ local subDir
+ subdir="${completions[1]}"
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ pushd "${subdir}" >/dev/null 2>&1
+ else
+ __%[1]s_debug "Listing directories in ."
+ fi
+
+ _arguments '*:dirname:_files -/'" ${flagPrefix}"
+ if [ -n "$subdir" ]; then
+ popd >/dev/null 2>&1
+ fi
+ elif [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ] && [ ${compCount} -eq 1 ]; then
+ __%[1]s_debug "Activating nospace."
+ # We can use compadd here as there is no description when
+ # there is only one completion.
+ compadd -S '' "${lastComp}"
+ elif [ ${compCount} -eq 0 ]; then
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ __%[1]s_debug "deactivating file completion"
+ else
+ # Perform file completion
+ __%[1]s_debug "activating file completion"
+ _arguments '*:filename:_files'" ${flagPrefix}"
+ fi
+ else
+ _describe "completions" completions $(echo $flagPrefix)
+ fi
+}
+
+# don't run the completion function when being source-ed or eval-ed
+if [ "$funcstack[1]" = "_%[1]s" ]; then
+ _%[1]s
+fi
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md
new file mode 100644
index 000000000..7cff61787
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.md
@@ -0,0 +1,48 @@
+## Generating Zsh Completion For Your cobra.Command
+
+Please refer to [Shell Completions](shell_completions.md) for details.
+
+## Zsh completions standardization
+
+Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced.
+
+### Deprecation summary
+
+See further below for more details on these deprecations.
+
+* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored.
+* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored.
+ * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`.
+* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored.
+ * Instead use `ValidArgsFunction`.
+
+### Behavioral changes
+
+**Noun completion**
+|Old behavior|New behavior|
+|---|---|
+|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis|
+|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`|
+`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored|
+|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)|
+|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior|
+
+**Flag-value completion**
+
+|Old behavior|New behavior|
+|---|---|
+|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion|
+|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored|
+|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)|
+|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells|
+|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`|
+|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`|
+
+**Improvements**
+
+* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`)
+* File completion by default if no other completions found
+* Handling of required flags
+* File extension filtering no longer mutually exclusive with bash usage
+* Completion of directory names *within* another directory
+* Support for `=` form of flags
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 000000000..c3da29013
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
new file mode 100644
index 000000000..00d04cb9b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.travis.yml
@@ -0,0 +1,22 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+install:
+ - go get golang.org/x/lint/golint
+ - export PATH=$GOPATH/bin:$PATH
+ - go install ./...
+
+script:
+ - verify/all.sh -v
+ - go test ./...
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 000000000..63ed1cfea
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 000000000..7eacc5bdb
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,296 @@
+[](https://travis-ci.org/spf13/pflag)
+[](https://goreportcard.com/report/github.com/spf13/pflag)
+[](https://godoc.org/github.com/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+ go get github.com/spf13/pflag
+
+Run tests by running:
+
+ go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helper functions available to get the value stored in a Flag if you have a FlagSet but find
+it difficult to keep up with all of the pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+}
+flag.VarP(&flagVal, "varname", "v", "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| ------------- | ------------- |
+| --flagname=1357 | ip=1357 |
+| --flagname | ip=4321 |
+| [nothing] | ip=1234 |
+
+## Command line flag syntax
+
+```
+--flag // boolean flags, or flags with no option default values
+--flag x // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ from := []string{"-", "_"}
+ to := "."
+ for _, sep := range from {
+ name = strings.Replace(name, sep, to, -1)
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "old-flag-name":
+ name = "new-flag-name"
+ break
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## Disable sorting of flags
+`pflag` allows you to disable sorting of flags for help and usage message.
+
+**Example**:
+```go
+flags.BoolP("verbose", "v", false, "verbose output")
+flags.String("coolflag", "yeaah", "it's really cool flag")
+flags.Int("usefulflag", 777, "sometimes it's very useful")
+flags.SortFlags = false
+flags.PrintDefaults()
+```
+**Output**:
+```
+ -v, --verbose verbose output
+ --coolflag string it's really cool flag (default "yeaah")
+ --usefulflag int sometimes it's very useful (default 777)
+```
+
+
+## Supporting Go flags when using pflag
+In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
+to support flags defined by third-party dependencies (e.g. `golang/glog`).
+
+**Example**: You want to add the Go flags to the `CommandLine` flagset
+```go
+import (
+ goflag "flag"
+ flag "github.com/spf13/pflag"
+)
+
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+
+func main() {
+ flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
+ flag.Parse()
+}
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/spf13/pflag
+[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 000000000..c4c5c0bfd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Type() string {
+ return "bool"
+}
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+ return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+ val, err := f.getFlagType(name, "bool", boolConv)
+ if err != nil {
+ return false, err
+ }
+ return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+ return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+ p := new(bool)
+ f.BoolVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+ b := CommandLine.BoolP(name, shorthand, value, usage)
+ return b
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 000000000..3731370d6
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,185 @@
+package pflag
+
+import (
+ "io"
+ "strconv"
+ "strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+ value *[]bool
+ changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+ bsv := new(boolSliceValue)
+ bsv.value = p
+ *bsv.value = val
+ return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse boolean values into slice
+ out := make([]bool, 0, len(boolStrSlice))
+ for _, boolStr := range boolStrSlice {
+ b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+ if err != nil {
+ return err
+ }
+ out = append(out, b)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+ return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+ boolStrSlice := make([]string, len(*s.value))
+ for i, b := range *s.value {
+ boolStrSlice[i] = strconv.FormatBool(b)
+ }
+
+ out, _ := writeAsCSV(boolStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *boolSliceValue) fromString(val string) (bool, error) {
+ return strconv.ParseBool(val)
+}
+
+func (s *boolSliceValue) toString(val bool) string {
+ return strconv.FormatBool(val)
+}
+
+func (s *boolSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *boolSliceValue) Replace(val []string) error {
+ out := make([]bool, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *boolSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []bool{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]bool, len(ss))
+ for i, t := range ss {
+ var err error
+ out[i], err = strconv.ParseBool(t)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+ val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+ if err != nil {
+ return []bool{}, err
+ }
+ return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
new file mode 100644
index 000000000..67d530457
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -0,0 +1,209 @@
+package pflag
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "strings"
+)
+
+// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
+type bytesHexValue []byte
+
+// String implements pflag.Value.String.
+func (bytesHex bytesHexValue) String() string {
+ return fmt.Sprintf("%X", []byte(bytesHex))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesHex *bytesHexValue) Set(value string) error {
+ bin, err := hex.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesHex = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesHexValue) Type() string {
+ return "bytesHex"
+}
+
+func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
+ *p = val
+ return (*bytesHexValue)(p)
+}
+
+func bytesHexConv(sval string) (interface{}, error) {
+
+ bin, err := hex.DecodeString(sval)
+
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesHex return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesHex(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, "", value, usage)
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, shorthand, value, usage)
+}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+ bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesBase64 = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+ return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+ *p = val
+ return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+ bin, err := base64.StdEncoding.DecodeString(sval)
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 000000000..a0b2679f7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,96 @@
+package pflag
+
+import "strconv"
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+ *p = val
+ return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+ // "+1" means that no specific value was passed, so increment
+ if s == "+1" {
+ *i = countValue(*i + 1)
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 0, 0)
+ *i = countValue(v)
+ return err
+}
+
+func (i *countValue) Type() string {
+ return "count"
+}
+
+func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
+
+func countConv(sval string) (interface{}, error) {
+ i, err := strconv.Atoi(sval)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+ val, err := f.getFlagType(name, "count", countConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+ f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+ flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+ flag.NoOptDefVal = "+1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+ CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+ CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, "", usage)
+ return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, shorthand, usage)
+ return p
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func Count(name string, usage string) *int {
+ return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+ return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 000000000..e9debef88
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+ "time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Type() string {
+ return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+ return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+ val, err := f.getFlagType(name, "duration", durationConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, "", value, usage)
+ return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go
new file mode 100644
index 000000000..badadda53
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// -- durationSlice Value
+type durationSliceValue struct {
+ value *[]time.Duration
+ changed bool
+}
+
+func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
+ dsv := new(durationSliceValue)
+ dsv.value = p
+ *dsv.value = val
+ return dsv
+}
+
+func (s *durationSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *durationSliceValue) Type() string {
+ return "durationSlice"
+}
+
+func (s *durationSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%s", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *durationSliceValue) fromString(val string) (time.Duration, error) {
+ return time.ParseDuration(val)
+}
+
+func (s *durationSliceValue) toString(val time.Duration) string {
+ return fmt.Sprintf("%s", val)
+}
+
+func (s *durationSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *durationSliceValue) Replace(val []string) error {
+ out := make([]time.Duration, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *durationSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func durationSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []time.Duration{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetDurationSlice returns the []time.Duration value of a flag with the given name
+func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
+ val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
+ if err != nil {
+ return []time.Duration{}, err
+ }
+ return val.([]time.Duration), nil
+}
+
+// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
+// The argument p points to a []time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
+// The argument p points to a duration[] variable in which to store the value of the flag.
+func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, "", value, usage)
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 000000000..24a5036e9
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,1239 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+ import flag "github.com/spf13/pflag"
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+ var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+ var flagvar int
+ func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+ }
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+ flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+ flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+ fmt.Println("ip has value ", *ip)
+ fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+ var ip = flag.IntP("flagname", "f", 1234, "help message")
+ var flagvar bool
+ func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+ }
+ flag.VarP(&flagval, "varname", "v", "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+ --flag // boolean flags only
+ --flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+ // boolean flags
+ -f
+ -abc
+ // non-boolean flags
+ -n 1234
+ -Ifile
+ // mixed
+ -abcs "hello"
+ -abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+ "bytes"
+ "errors"
+ goflag "flag"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+ // ContinueOnError will return an err from Parse() if an error is found
+ ContinueOnError ErrorHandling = iota
+ // ExitOnError will call os.Exit(2) if an error is found when parsing
+ ExitOnError
+ // PanicOnError will panic() if an error is found when parsing flags
+ PanicOnError
+)
+
+// ParseErrorsWhitelist defines the parsing errors that can be ignored
+type ParseErrorsWhitelist struct {
+ // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
+ UnknownFlags bool
+}
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+
+ // SortFlags is used to indicate, if user wants to have sorted flags in
+ // help/usage messages.
+ SortFlags bool
+
+ // ParseErrorsWhitelist is used to configure a whitelist of errors
+ ParseErrorsWhitelist ParseErrorsWhitelist
+
+ name string
+ parsed bool
+ actual map[NormalizedName]*Flag
+ orderedActual []*Flag
+ sortedActual []*Flag
+ formal map[NormalizedName]*Flag
+ orderedFormal []*Flag
+ sortedFormal []*Flag
+ shorthands map[byte]*Flag
+ args []string // arguments after flags
+ argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use out() accessor
+ interspersed bool // allow interspersed option/non-option args
+ normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+
+ addedGoFlagSets []*goflag.FlagSet
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Name string // name as it appears on command line
+ Shorthand string // one-letter abbreviated flag
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+ Changed bool // If the user set the value (or if left to default)
+ NoOptDefVal string // default value (as text); if the flag is on the command line without any options
+ Deprecated string // If this flag is deprecated, this string is the new or now thing to use
+ Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
+ ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+ Annotations map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+ String() string
+ Set(string) error
+ Type() string
+}
+
+// SliceValue is a secondary interface to all flags which hold a list
+// of values. This allows full control over the value of list flags,
+// and avoids complicated marshalling and unmarshalling to csv.
+type SliceValue interface {
+ // Append adds the specified value to the end of the flag value list.
+ Append(string) error
+ // Replace will fully overwrite any data currently in the flag value list.
+ Replace([]string) error
+ // GetSlice returns the flag value list as an array of strings.
+ GetSlice() []string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+ list := make(sort.StringSlice, len(flags))
+ i := 0
+ for k := range flags {
+ list[i] = string(k)
+ i++
+ }
+ list.Sort()
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[NormalizedName(name)]
+ }
+ return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl". A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+ f.normalizeNameFunc = n
+ f.sortedFormal = f.sortedFormal[:0]
+ for fname, flag := range f.formal {
+ nname := f.normalizeFlagName(flag.Name)
+ if fname == nname {
+ continue
+ }
+ flag.Name = string(nname)
+ delete(f.formal, fname)
+ f.formal[nname] = flag
+ if _, set := f.actual[fname]; set {
+ delete(f.actual, fname)
+ f.actual[nname] = flag
+ }
+ }
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+ if f.normalizeNameFunc != nil {
+ return f.normalizeNameFunc
+ }
+ return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+ n := f.GetNormalizeFunc()
+ return n(f, name)
+}
+
+func (f *FlagSet) out() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ if len(f.formal) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.formal) != len(f.sortedFormal) {
+ f.sortedFormal = sortFlags(f.formal)
+ }
+ flags = f.sortedFormal
+ } else {
+ flags = f.orderedFormal
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
+func (f *FlagSet) HasFlags() bool {
+ return len(f.formal) > 0
+}
+
+// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
+// that are not hidden.
+func (f *FlagSet) HasAvailableFlags() bool {
+ for _, flag := range f.formal {
+ if !flag.Hidden {
+ return true
+ }
+ }
+ return false
+}
+
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ if len(f.actual) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.actual) != len(f.sortedActual) {
+ f.sortedActual = sortFlags(f.actual)
+ }
+ flags = f.sortedActual
+ } else {
+ flags = f.orderedActual
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.lookup(f.normalizeFlagName(name))
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+ if name == "" {
+ return nil
+ }
+ if len(name) > 1 {
+ msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ c := name[0]
+ return f.shorthands[c]
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+ return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+ flag := f.Lookup(name)
+ if flag == nil {
+ err := fmt.Errorf("flag accessed but not defined: %s", name)
+ return nil, err
+ }
+
+ if flag.Value.Type() != ftype {
+ err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+ return nil, err
+ }
+
+ sval := flag.Value.String()
+ result, err := convFunc(sval)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+ return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.Deprecated = usageMessage
+ flag.Hidden = true
+ return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.ShorthandDeprecated = usageMessage
+ return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ flag.Hidden = true
+ return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.Lookup(name)
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+ return CommandLine.ShorthandLookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+
+ err := flag.Value.Set(value)
+ if err != nil {
+ var flagName string
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ flagName = fmt.Sprintf("--%s", flag.Name)
+ }
+ return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
+ }
+
+ if !flag.Changed {
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ f.orderedActual = append(f.orderedActual, flag)
+
+ flag.Changed = true
+ }
+
+ if flag.Deprecated != "" {
+ fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[key] = values
+ return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+ flag := f.Lookup(name)
+ // If a flag doesn't exist, it wasn't changed....
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+ usages := f.FlagUsages()
+ fmt.Fprint(f.out(), usages)
+}
+
+// defaultIsZeroValue returns true if the default value for this flag represents
+// a zero value.
+func (f *Flag) defaultIsZeroValue() bool {
+ switch f.Value.(type) {
+ case boolFlag:
+ return f.DefValue == "false"
+ case *durationValue:
+ // Beginning in Go 1.7, duration zero values are "0s"
+ return f.DefValue == "0" || f.DefValue == "0s"
+ case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
+ return f.DefValue == "0"
+ case *stringValue:
+ return f.DefValue == ""
+ case *ipValue, *ipMaskValue, *ipNetValue:
+ return f.DefValue == ""
+ case *intSliceValue, *stringSliceValue, *stringArrayValue:
+ return f.DefValue == "[]"
+ default:
+ switch f.Value.String() {
+ case "false":
+ return true
+ case "":
+ return true
+ case "":
+ return true
+ case "0":
+ return true
+ }
+ return false
+ }
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+ // Look for a back-quoted name, but avoid the strings package.
+ usage = flag.Usage
+ for i := 0; i < len(usage); i++ {
+ if usage[i] == '`' {
+ for j := i + 1; j < len(usage); j++ {
+ if usage[j] == '`' {
+ name = usage[i+1 : j]
+ usage = usage[:i] + name + usage[j+1:]
+ return name, usage
+ }
+ }
+ break // Only one back quote; use type name.
+ }
+ }
+
+ name = flag.Value.Type()
+ switch name {
+ case "bool":
+ name = ""
+ case "float64":
+ name = "float"
+ case "int64":
+ name = "int"
+ case "uint64":
+ name = "uint"
+ case "stringSlice":
+ name = "strings"
+ case "intSlice":
+ name = "ints"
+ case "uintSlice":
+ name = "uints"
+ case "boolSlice":
+ name = "bools"
+ }
+
+ return
+}
+
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+ if i+slop > len(s) {
+ return s, ""
+ }
+
+ w := strings.LastIndexAny(s[:i], " \t\n")
+ if w <= 0 {
+ return s, ""
+ }
+ nlPos := strings.LastIndex(s[:i], "\n")
+ if nlPos > 0 && nlPos < w {
+ return s[:nlPos], s[nlPos+1:]
+ }
+ return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+ if w == 0 {
+ return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ // space between indent i and end of line width w into which
+ // we should wrap the text.
+ wrap := w - i
+
+ var r, l string
+
+ // Not enough space for sensible wrapping. Wrap as a block on
+ // the next line instead.
+ if wrap < 24 {
+ i = 16
+ wrap = w - i
+ r += "\n" + strings.Repeat(" ", i)
+ }
+ // If still not enough space then don't even try to wrap.
+ if wrap < 24 {
+ return strings.Replace(s, "\n", r, -1)
+ }
+
+ // Try to avoid short orphan words on the final line, by
+ // allowing wrapN to go a bit over if that would fit in the
+ // remainder of the line.
+ slop := 5
+ wrap = wrap - slop
+
+ // Handle first line, which is indented by the caller (or the
+ // special case above)
+ l, s = wrapN(wrap, slop, s)
+ r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
+
+ // Now wrap the rest
+ for s != "" {
+ var t string
+
+ t, s = wrapN(wrap, slop, s)
+ r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+ buf := new(bytes.Buffer)
+
+ lines := make([]string, 0, len(f.formal))
+
+ maxlen := 0
+ f.VisitAll(func(flag *Flag) {
+ if flag.Hidden {
+ return
+ }
+
+ line := ""
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ line = fmt.Sprintf(" --%s", flag.Name)
+ }
+
+ varname, usage := UnquoteUsage(flag)
+ if varname != "" {
+ line += " " + varname
+ }
+ if flag.NoOptDefVal != "" {
+ switch flag.Value.Type() {
+ case "string":
+ line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
+ case "bool":
+ if flag.NoOptDefVal != "true" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ case "count":
+ if flag.NoOptDefVal != "+1" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ default:
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ }
+
+ // This special character will be replaced with spacing once the
+ // correct alignment is calculated
+ line += "\x00"
+ if len(line) > maxlen {
+ maxlen = len(line)
+ }
+
+ line += usage
+ if !flag.defaultIsZeroValue() {
+ if flag.Value.Type() == "string" {
+ line += fmt.Sprintf(" (default %q)", flag.DefValue)
+ } else {
+ line += fmt.Sprintf(" (default %s)", flag.DefValue)
+ }
+ }
+ if len(flag.Deprecated) != 0 {
+ line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
+ }
+
+ lines = append(lines, line)
+ })
+
+ for _, line := range lines {
+ sidx := strings.Index(line, "\x00")
+ spacing := strings.Repeat(" ", maxlen-sidx)
+ // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+ fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+ }
+
+ return buf.String()
+}
+
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+ return f.FlagUsagesWrapped(0)
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+ fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
+ f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+var Usage = func() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
+ }
+ return f.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+ f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: name,
+ Shorthand: shorthand,
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ }
+ f.AddFlag(flag)
+ return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+ f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+ normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+ _, alreadyThere := f.formal[normalizedFlagName]
+ if alreadyThere {
+ msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+ fmt.Fprintln(f.out(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if f.formal == nil {
+ f.formal = make(map[NormalizedName]*Flag)
+ }
+
+ flag.Name = string(normalizedFlagName)
+ f.formal[normalizedFlagName] = flag
+ f.orderedFormal = append(f.orderedFormal, flag)
+
+ if flag.Shorthand == "" {
+ return
+ }
+ if len(flag.Shorthand) > 1 {
+ msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ if f.shorthands == nil {
+ f.shorthands = make(map[byte]*Flag)
+ }
+ c := flag.Shorthand[0]
+ used, alreadyThere := f.shorthands[c]
+ if alreadyThere {
+ msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored.
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(flag *Flag) {
+ if f.Lookup(flag.Name) == nil {
+ f.AddFlag(flag)
+ }
+ })
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+ CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ if f.errorHandling != ContinueOnError {
+ fmt.Fprintln(f.out(), err)
+ f.usage()
+ }
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+ if f == CommandLine {
+ Usage()
+ } else if f.Usage == nil {
+ defaultUsage(f)
+ } else {
+ f.Usage()
+ }
+}
+
+//--unknown (args will be empty)
+//--unknown --next-flag ... (args will be --next-flag ...)
+//--unknown arg ... (args will be arg ...)
+func stripUnknownFlagValue(args []string) []string {
+ if len(args) == 0 {
+ //--unknown
+ return args
+ }
+
+ first := args[0]
+ if len(first) > 0 && first[0] == '-' {
+ //--unknown --next-flag ...
+ return args
+ }
+
+ //--unknown arg ... (args will be arg ...)
+ if len(args) > 1 {
+ return args[1:]
+ }
+ return nil
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ name := s[2:]
+ if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+ err = f.failf("bad flag syntax: %s", s)
+ return
+ }
+
+ split := strings.SplitN(name, "=", 2)
+ name = split[0]
+ flag, exists := f.formal[f.normalizeFlagName(name)]
+
+ if !exists {
+ switch {
+ case name == "help":
+ f.usage()
+ return a, ErrHelp
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // --unknown=unknownval arg ...
+ // we do not want to lose arg in this case
+ if len(split) >= 2 {
+ return a, nil
+ }
+
+ return stripUnknownFlagValue(a), nil
+ default:
+ err = f.failf("unknown flag: --%s", name)
+ return
+ }
+ }
+
+ var value string
+ if len(split) == 2 {
+ // '--flag=arg'
+ value = split[1]
+ } else if flag.NoOptDefVal != "" {
+ // '--flag' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(a) > 0 {
+ // '--flag arg'
+ value = a[0]
+ a = a[1:]
+ } else {
+ // '--flag' (arg was required)
+ err = f.failf("flag needs an argument: %s", s)
+ return
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+
+ if strings.HasPrefix(shorthands, "test.") {
+ return
+ }
+
+ outShorts = shorthands[1:]
+ c := shorthands[0]
+
+ flag, exists := f.shorthands[c]
+ if !exists {
+ switch {
+ case c == 'h':
+ f.usage()
+ err = ErrHelp
+ return
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // '-f=arg arg ...'
+ // we do not want to lose arg in this case
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ outShorts = ""
+ return
+ }
+
+ outArgs = stripUnknownFlagValue(outArgs)
+ return
+ default:
+ err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+ return
+ }
+ }
+
+ var value string
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ // '-f=arg'
+ value = shorthands[2:]
+ outShorts = ""
+ } else if flag.NoOptDefVal != "" {
+ // '-f' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(shorthands) > 1 {
+ // '-farg'
+ value = shorthands[1:]
+ outShorts = ""
+ } else if len(args) > 0 {
+ // '-f arg'
+ value = args[0]
+ outArgs = args[1:]
+ } else {
+ // '-f' (arg was required)
+ err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+ return
+ }
+
+ if flag.ShorthandDeprecated != "" {
+ fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ shorthands := s[1:]
+
+ // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
+ for len(shorthands) > 0 {
+ shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ if !f.interspersed {
+ f.args = append(f.args, s)
+ f.args = append(f.args, args...)
+ return nil
+ }
+ f.args = append(f.args, s)
+ continue
+ }
+
+ if s[1] == '-' {
+ if len(s) == 2 { // "--" terminates the flags
+ f.argsLenAtDash = len(f.args)
+ f.args = append(f.args, args...)
+ break
+ }
+ args, err = f.parseLongArg(s, args, fn)
+ } else {
+ args, err = f.parseShortArg(s, args, fn)
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+ if f.addedGoFlagSets != nil {
+ for _, goFlagSet := range f.addedGoFlagSets {
+ goFlagSet.Parse(nil)
+ }
+ }
+ f.parsed = true
+
+ if len(arguments) < 0 {
+ return nil
+ }
+
+ f.args = make([]string, 0, len(arguments))
+
+ set := func(flag *Flag, value string) error {
+ return f.Set(flag.Name, value)
+ }
+
+ err := f.parseArgs(arguments, set)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ fmt.Println(err)
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+type parseFunc func(flag *Flag, value string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+
+ err := f.parseArgs(arguments, fn)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+ return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.ParseAll(os.Args[1:], fn)
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+ CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name,
+// error handling property and SortFlags set to true.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ argsLenAtDash: -1,
+ interspersed: true,
+ SortFlags: true,
+ }
+ return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+ f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ f.name = name
+ f.errorHandling = errorHandling
+ f.argsLenAtDash = -1
+}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 000000000..a243f81f7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+ *p = val
+ return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ *f = float32Value(v)
+ return err
+}
+
+func (f *float32Value) Type() string {
+ return "float32"
+}
+
+func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
+
+func float32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseFloat(sval, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+ val, err := f.getFlagType(name, "float32", float32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go
new file mode 100644
index 000000000..caa352741
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float32Slice Value
+type float32SliceValue struct {
+ value *[]float32
+ changed bool
+}
+
+func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue {
+ isv := new(float32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = float32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float32SliceValue) Type() string {
+ return "float32Slice"
+}
+
+func (s *float32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float32SliceValue) fromString(val string) (float32, error) {
+ t64, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(t64), nil
+}
+
+func (s *float32SliceValue) toString(val float32) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float32SliceValue) Replace(val []string) error {
+ out := make([]float32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = float32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetFloat32Slice return the []float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) {
+ val, err := f.getFlagType(name, "float32Slice", float32SliceConv)
+ if err != nil {
+ return []float32{}, err
+ }
+ return val.([]float32), nil
+}
+
+// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string.
+// The argument p points to a float32[] variable in which to store the value of the flag.
+func Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func Float32Slice(name string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, "", value, usage)
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 000000000..04b5492a7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Type() string {
+ return "float64"
+}
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+func float64Conv(sval string) (interface{}, error) {
+ return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+ val, err := f.getFlagType(name, "float64", float64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go
new file mode 100644
index 000000000..85bf3073d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float64Slice Value
+type float64SliceValue struct {
+ value *[]float64
+ changed bool
+}
+
+func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue {
+ isv := new(float64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float64SliceValue) Type() string {
+ return "float64Slice"
+}
+
+func (s *float64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float64SliceValue) fromString(val string) (float64, error) {
+ return strconv.ParseFloat(val, 64)
+}
+
+func (s *float64SliceValue) toString(val float64) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float64SliceValue) Replace(val []string) error {
+ out := make([]float64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetFloat64Slice return the []float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) {
+ val, err := f.getFlagType(name, "float64Slice", float64SliceConv)
+ if err != nil {
+ return []float64{}, err
+ }
+ return val.([]float64), nil
+}
+
+// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string.
+// The argument p points to a float64[] variable in which to store the value of the flag.
+func Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func Float64Slice(name string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, "", value, usage)
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod
new file mode 100644
index 000000000..b2287eec1
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/go.mod
@@ -0,0 +1,3 @@
+module github.com/spf13/pflag
+
+go 1.12
diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 000000000..d3dd72b7f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ goflag "flag"
+ "reflect"
+ "strings"
+)
+
+// flagValueWrapper implements pflag.Value around a flag.Value. The main
+// difference here is the addition of the Type method that returns a string
+// name of the type. As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+ inner goflag.Value
+ flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+ goflag.Value
+ IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+ // If the flag.Value happens to also be a pflag.Value, just use it directly.
+ if pv, ok := v.(Value); ok {
+ return pv
+ }
+
+ pv := &flagValueWrapper{
+ inner: v,
+ }
+
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+ return pv
+}
+
+func (v *flagValueWrapper) String() string {
+ return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+ return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+ return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
+// with both `-v` and `--v` in flags. If the golang flag was more than a single
+// character (ex: `verbose`) it will only be accessible via `--verbose`
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: goflag.Name,
+ Usage: goflag.Usage,
+ Value: wrapFlagValue(goflag.Value),
+ // Looks like golang flags don't set DefValue correctly :-(
+ //DefValue: goflag.DefValue,
+ DefValue: goflag.Value.String(),
+ }
+ // Ex: if the golang flag was -v, allow both -v and --v to work
+ if len(flag.Name) == 1 {
+ flag.Shorthand = flag.Name
+ }
+ if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+ flag.NoOptDefVal = "true"
+ }
+ return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+ if f.Lookup(goflag.Name) != nil {
+ return
+ }
+ newflag := PFlagFromGoFlag(goflag)
+ f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(goflag *goflag.Flag) {
+ f.AddGoFlag(goflag)
+ })
+ if f.addedGoFlagSets == nil {
+ f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
+ }
+ f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
+}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 000000000..1474b89df
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Type() string {
+ return "int"
+}
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+func intConv(sval string) (interface{}, error) {
+ return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+ val, err := f.getFlagType(name, "int", intConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, "", value, usage)
+ return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+ return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 000000000..f1a01d05e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+ *p = val
+ return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 16)
+ *i = int16Value(v)
+ return err
+}
+
+func (i *int16Value) Type() string {
+ return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+ val, err := f.getFlagType(name, "int16", int16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 000000000..9b95944f0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 32)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Type() string {
+ return "int32"
+}
+
+func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+ val, err := f.getFlagType(name, "int32", int32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go
new file mode 100644
index 000000000..ff128ff06
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int32Slice Value
+type int32SliceValue struct {
+ value *[]int32
+ changed bool
+}
+
+func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue {
+ isv := new(int32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = int32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int32SliceValue) Type() string {
+ return "int32Slice"
+}
+
+func (s *int32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int32SliceValue) fromString(val string) (int32, error) {
+ t64, err := strconv.ParseInt(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(t64), nil
+}
+
+func (s *int32SliceValue) toString(val int32) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int32SliceValue) Replace(val []string) error {
+ out := make([]int32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = int32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetInt32Slice return the []int32 value of a flag with the given name
+func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) {
+ val, err := f.getFlagType(name, "int32Slice", int32SliceConv)
+ if err != nil {
+ return []int32{}, err
+ }
+ return val.([]int32), nil
+}
+
+// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string.
+// The argument p points to a int32[] variable in which to store the value of the flag.
+func Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func Int32Slice(name string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, "", value, usage)
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 000000000..0026d781d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Type() string {
+ return "int64"
+}
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int64Conv(sval string) (interface{}, error) {
+ return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+ val, err := f.getFlagType(name, "int64", int64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go
new file mode 100644
index 000000000..25464638f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int64Slice Value
+type int64SliceValue struct {
+ value *[]int64
+ changed bool
+}
+
+func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue {
+ isv := new(int64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int64SliceValue) Type() string {
+ return "int64Slice"
+}
+
+func (s *int64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int64SliceValue) fromString(val string) (int64, error) {
+ return strconv.ParseInt(val, 0, 64)
+}
+
+func (s *int64SliceValue) toString(val int64) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int64SliceValue) Replace(val []string) error {
+ out := make([]int64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetInt64Slice return the []int64 value of a flag with the given name
+func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) {
+ val, err := f.getFlagType(name, "int64Slice", int64SliceConv)
+ if err != nil {
+ return []int64{}, err
+ }
+ return val.([]int64), nil
+}
+
+// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string.
+// The argument p points to a int64[] variable in which to store the value of the flag.
+func Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func Int64Slice(name string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, "", value, usage)
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 000000000..4da92228e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+ *p = val
+ return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 8)
+ *i = int8Value(v)
+ return err
+}
+
+func (i *int8Value) Type() string {
+ return "int8"
+}
+
+func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+ val, err := f.getFlagType(name, "int8", int8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 000000000..e71c39d91
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,158 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+ value *[]int
+ changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+ isv := new(intSliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *intSliceValue) Type() string {
+ return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *intSliceValue) Append(val string) error {
+ i, err := strconv.Atoi(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *intSliceValue) Replace(val []string) error {
+ out := make([]int, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *intSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = strconv.Itoa(d)
+ }
+ return out
+}
+
+func intSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+ val, err := f.getFlagType(name, "intSlice", intSliceConv)
+ if err != nil {
+ return []int{}, err
+ }
+ return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 000000000..3d414ba69
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,94 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+ *p = val
+ return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+ ip := net.ParseIP(strings.TrimSpace(s))
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP: %q", s)
+ }
+ *i = ipValue(ip)
+ return nil
+}
+
+func (i *ipValue) Type() string {
+ return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+ ip := net.ParseIP(sval)
+ if ip != nil {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+ val, err := f.getFlagType(name, "ip", ipConv)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 000000000..775faae4f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,186 @@
+package pflag
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+ value *[]net.IP
+ changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+ ipsv := new(ipSliceValue)
+ ipsv.value = p
+ *ipsv.value = val
+ return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse ip values into slice
+ out := make([]net.IP, 0, len(ipStrSlice))
+ for _, ipStr := range ipStrSlice {
+ ip := net.ParseIP(strings.TrimSpace(ipStr))
+ if ip == nil {
+ return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+ }
+ out = append(out, ip)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+ return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+ ipStrSlice := make([]string, len(*s.value))
+ for i, ip := range *s.value {
+ ipStrSlice[i] = ip.String()
+ }
+
+ out, _ := writeAsCSV(ipStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *ipSliceValue) fromString(val string) (net.IP, error) {
+ return net.ParseIP(strings.TrimSpace(val)), nil
+}
+
+func (s *ipSliceValue) toString(val net.IP) string {
+ return val.String()
+}
+
+func (s *ipSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *ipSliceValue) Replace(val []string) error {
+ out := make([]net.IP, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *ipSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []net.IP{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]net.IP, len(ss))
+ for i, sval := range ss {
+ ip := net.ParseIP(strings.TrimSpace(sval))
+ if ip == nil {
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+ }
+ out[i] = ip
+ }
+ return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+ val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+ if err != nil {
+ return []net.IP{}, err
+ }
+ return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 000000000..5bd44bd21
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+ *p = val
+ return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+ ip := ParseIPv4Mask(s)
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP mask: %q", s)
+ }
+ *i = ipMaskValue(ip)
+ return nil
+}
+
+func (i *ipMaskValue) Type() string {
+ return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+ mask := net.ParseIP(s)
+ if mask == nil {
+ if len(s) != 8 {
+ return nil
+ }
+ // net.IPMask.String() actually outputs things like ffffff00
+ // so write a horrible parser for that as well :-(
+ m := []int{}
+ for i := 0; i < 4; i++ {
+ b := "0x" + s[2*i:2*i+2]
+ d, err := strconv.ParseInt(b, 0, 0)
+ if err != nil {
+ return nil
+ }
+ m = append(m, int(d))
+ }
+ s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+ mask = net.ParseIP(s)
+ if mask == nil {
+ return nil
+ }
+ }
+ return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+ mask := ParseIPv4Mask(sval)
+ if mask == nil {
+ return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+ }
+ return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+ val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 000000000..e2c1b8bcd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,98 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+ n := net.IPNet(ipnet)
+ return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(value))
+ if err != nil {
+ return err
+ }
+ *ipnet = ipNetValue(*n)
+ return nil
+}
+
+func (*ipNetValue) Type() string {
+ return "ipNet"
+}
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+ *p = val
+ return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err == nil {
+ return *n, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNet", ipNetConv)
+ if err != nil {
+ return net.IPNet{}, err
+ }
+ return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 000000000..04e0a26ff
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,80 @@
+package pflag
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+func (s *stringValue) Type() string {
+ return "string"
+}
+
+func (s *stringValue) String() string { return string(*s) }
+
+func stringConv(sval string) (interface{}, error) {
+ return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+ val, err := f.getFlagType(name, "string", stringConv)
+ if err != nil {
+ return "", err
+ }
+ return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, "", value, usage)
+ return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+ return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
new file mode 100644
index 000000000..4894af818
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -0,0 +1,129 @@
+package pflag
+
+// -- stringArray Value
+type stringArrayValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
+ ssv := new(stringArrayValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func (s *stringArrayValue) Set(val string) error {
+ if !s.changed {
+ *s.value = []string{val}
+ s.changed = true
+ } else {
+ *s.value = append(*s.value, val)
+ }
+ return nil
+}
+
+func (s *stringArrayValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringArrayValue) Replace(val []string) error {
+ out := make([]string, len(val))
+ for i, d := range val {
+ var err error
+ out[i] = d
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *stringArrayValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = d
+ }
+ return out
+}
+
+func (s *stringArrayValue) Type() string {
+ return "stringArray"
+}
+
+func (s *stringArrayValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func stringArrayConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a array with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringArray return the []string value of a flag with the given name
+func (f *FlagSet) GetStringArray(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringArray", stringArrayConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArrayVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArray(name string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, "", value, usage)
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 000000000..3cb2e69db
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,163 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "strings"
+)
+
+// -- stringSlice Value
+type stringSliceValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+ ssv := new(stringSliceValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
+func writeAsCSV(vals []string) (string, error) {
+ b := &bytes.Buffer{}
+ w := csv.NewWriter(b)
+ err := w.Write(vals)
+ if err != nil {
+ return "", err
+ }
+ w.Flush()
+ return strings.TrimSuffix(b.String(), "\n"), nil
+}
+
+func (s *stringSliceValue) Set(val string) error {
+ v, err := readAsCSV(val)
+ if err != nil {
+ return err
+ }
+ if !s.changed {
+ *s.value = v
+ } else {
+ *s.value = append(*s.value, v...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringSliceValue) Type() string {
+ return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func (s *stringSliceValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringSliceValue) Replace(val []string) error {
+ *s.value = val
+ return nil
+}
+
+func (s *stringSliceValue) GetSlice() []string {
+ return *s.value
+}
+
+func stringSliceConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a slice with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSlice(name string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go
new file mode 100644
index 000000000..5ceda3965
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt Value
+type stringToIntValue struct {
+ value *map[string]int
+ changed bool
+}
+
+func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
+ ssv := new(stringToIntValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToIntValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToIntValue) Type() string {
+ return "stringToInt"
+}
+
+func (s *stringToIntValue) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.Itoa(v))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToIntConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt return the map[string]int value of a flag with the given name
+func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
+ val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
+ if err != nil {
+ return map[string]int{}, err
+ }
+ return val.(map[string]int), nil
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, "", value, usage)
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go
new file mode 100644
index 000000000..a807a04a0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int64.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt64 Value
+type stringToInt64Value struct {
+ value *map[string]int64
+ changed bool
+}
+
+func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value {
+ ssv := new(stringToInt64Value)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToInt64Value) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToInt64Value) Type() string {
+ return "stringToInt64"
+}
+
+func (s *stringToInt64Value) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.FormatInt(v, 10))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToInt64Conv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt64 return the map[string]int64 value of a flag with the given name
+func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) {
+ val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv)
+ if err != nil {
+ return map[string]int64{}, err
+ }
+ return val.(map[string]int64), nil
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, "", value, usage)
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
new file mode 100644
index 000000000..890a01afc
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_string.go
@@ -0,0 +1,160 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+ "strings"
+)
+
+// -- stringToString Value
+type stringToStringValue struct {
+ value *map[string]string
+ changed bool
+}
+
+func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
+ ssv := new(stringToStringValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToStringValue) Set(val string) error {
+ var ss []string
+ n := strings.Count(val, "=")
+ switch n {
+ case 0:
+ return fmt.Errorf("%s must be formatted as key=value", val)
+ case 1:
+ ss = append(ss, strings.Trim(val, `"`))
+ default:
+ r := csv.NewReader(strings.NewReader(val))
+ var err error
+ ss, err = r.Read()
+ if err != nil {
+ return err
+ }
+ }
+
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToStringValue) Type() string {
+ return "stringToString"
+}
+
+func (s *stringToStringValue) String() string {
+ records := make([]string, 0, len(*s.value)>>1)
+ for k, v := range *s.value {
+ records = append(records, k+"="+v)
+ }
+
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ if err := w.Write(records); err != nil {
+ panic(err)
+ }
+ w.Flush()
+ return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+func stringToStringConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]string{}, nil
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil, err
+ }
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out, nil
+}
+
+// GetStringToString return the map[string]string value of a flag with the given name
+func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
+ val, err := f.getFlagType(name, "stringToString", stringToStringConv)
+ if err != nil {
+ return map[string]string{}, err
+ }
+ return val.(map[string]string), nil
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToString(name string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, "", value, usage)
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 000000000..dcbc2b758
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Type() string {
+ return "uint"
+}
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uintConv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+ val, err := f.getFlagType(name, "uint", uintConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, "", value, usage)
+ return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 000000000..7e9914edd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+ *p = val
+ return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ *i = uint16Value(v)
+ return err
+}
+
+func (i *uint16Value) Type() string {
+ return "uint16"
+}
+
+func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+ val, err := f.getFlagType(name, "uint16", uint16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 000000000..d8024539b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint32 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+ *p = val
+ return (*uint32Value)(p)
+}
+
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) Type() string {
+ return "uint32"
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+ val, err := f.getFlagType(name, "uint32", uint32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 000000000..f62240f2c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Type() string {
+ return "uint64"
+}
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint64Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 64)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+ val, err := f.getFlagType(name, "uint64", uint64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 000000000..bb0e83c1f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+ *p = val
+ return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 8)
+ *i = uint8Value(v)
+ return err
+}
+
+func (i *uint8Value) Type() string {
+ return "uint8"
+}
+
+func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+ val, err := f.getFlagType(name, "uint8", uint8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 000000000..5fa924835
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,168 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+ value *[]uint
+ changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+ uisv := new(uintSliceValue)
+ uisv.value = p
+ *uisv.value = val
+ return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return err
+ }
+ out[i] = uint(u)
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *uintSliceValue) Type() string {
+ return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *uintSliceValue) fromString(val string) (uint, error) {
+ t, err := strconv.ParseUint(val, 10, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(t), nil
+}
+
+func (s *uintSliceValue) toString(val uint) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *uintSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *uintSliceValue) Replace(val []string) error {
+ out := make([]uint, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *uintSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []uint{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = uint(u)
+ }
+ return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+ val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+ if err != nil {
+ return []uint{}, err
+ }
+ return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/sys/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/sys/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/golang.org/x/sys/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/sys/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
new file mode 100644
index 000000000..e07899b90
--- /dev/null
+++ b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
@@ -0,0 +1,30 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package unsafeheader contains header declarations for the Go runtime's
+// slice and string implementations.
+//
+// This package allows x/sys to use types equivalent to
+// reflect.SliceHeader and reflect.StringHeader without introducing
+// a dependency on the (relatively heavy) "reflect" package.
+package unsafeheader
+
+import (
+ "unsafe"
+)
+
+// Slice is the runtime representation of a slice.
+// It cannot be used safely or portably and its representation may change in a later release.
+type Slice struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+}
+
+// String is the runtime representation of a string.
+// It cannot be used safely or portably and its representation may change in a later release.
+type String struct {
+ Data unsafe.Pointer
+ Len int
+}
diff --git a/vendor/golang.org/x/sys/plan9/asm.s b/vendor/golang.org/x/sys/plan9/asm.s
new file mode 100644
index 000000000..06449ebfa
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/asm.s
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ¡use(SB),NOSPLIT,$0
+ RET
diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_386.s b/vendor/golang.org/x/sys/plan9/asm_plan9_386.s
new file mode 100644
index 000000000..bc5cab1f3
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/asm_plan9_386.s
@@ -0,0 +1,30 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+//
+// System call support for 386, Plan 9
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-32
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-44
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall¡RawSyscall6(SB)
+
+TEXT ¡seek(SB),NOSPLIT,$0-36
+ JMP syscall¡seek(SB)
+
+TEXT ¡exit(SB),NOSPLIT,$4-4
+ JMP syscall¡exit(SB)
diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s b/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s
new file mode 100644
index 000000000..d3448e675
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s
@@ -0,0 +1,30 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+//
+// System call support for amd64, Plan 9
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-64
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-88
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡RawSyscall6(SB)
+
+TEXT ¡seek(SB),NOSPLIT,$0-56
+ JMP syscall¡seek(SB)
+
+TEXT ¡exit(SB),NOSPLIT,$8-8
+ JMP syscall¡exit(SB)
diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s b/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s
new file mode 100644
index 000000000..afb7c0a9b
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s
@@ -0,0 +1,25 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// System call support for plan9 on arm
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-32
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-44
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall¡RawSyscall6(SB)
+
+TEXT ¡seek(SB),NOSPLIT,$0-36
+ JMP syscall¡exit(SB)
diff --git a/vendor/golang.org/x/sys/plan9/const_plan9.go b/vendor/golang.org/x/sys/plan9/const_plan9.go
new file mode 100644
index 000000000..b4e85a3a9
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/const_plan9.go
@@ -0,0 +1,70 @@
+package plan9
+
+// Plan 9 Constants
+
+// Open modes
+const (
+ O_RDONLY = 0
+ O_WRONLY = 1
+ O_RDWR = 2
+ O_TRUNC = 16
+ O_CLOEXEC = 32
+ O_EXCL = 0x1000
+)
+
+// Rfork flags
+const (
+ RFNAMEG = 1 << 0
+ RFENVG = 1 << 1
+ RFFDG = 1 << 2
+ RFNOTEG = 1 << 3
+ RFPROC = 1 << 4
+ RFMEM = 1 << 5
+ RFNOWAIT = 1 << 6
+ RFCNAMEG = 1 << 10
+ RFCENVG = 1 << 11
+ RFCFDG = 1 << 12
+ RFREND = 1 << 13
+ RFNOMNT = 1 << 14
+)
+
+// Qid.Type bits
+const (
+ QTDIR = 0x80
+ QTAPPEND = 0x40
+ QTEXCL = 0x20
+ QTMOUNT = 0x10
+ QTAUTH = 0x08
+ QTTMP = 0x04
+ QTFILE = 0x00
+)
+
+// Dir.Mode bits
+const (
+ DMDIR = 0x80000000
+ DMAPPEND = 0x40000000
+ DMEXCL = 0x20000000
+ DMMOUNT = 0x10000000
+ DMAUTH = 0x08000000
+ DMTMP = 0x04000000
+ DMREAD = 0x4
+ DMWRITE = 0x2
+ DMEXEC = 0x1
+)
+
+const (
+ STATMAX = 65535
+ ERRMAX = 128
+ STATFIXLEN = 49
+)
+
+// Mount and bind flags
+const (
+ MREPL = 0x0000
+ MBEFORE = 0x0001
+ MAFTER = 0x0002
+ MORDER = 0x0003
+ MCREATE = 0x0004
+ MCACHE = 0x0010
+ MMASK = 0x0017
+)
diff --git a/vendor/golang.org/x/sys/plan9/dir_plan9.go b/vendor/golang.org/x/sys/plan9/dir_plan9.go
new file mode 100644
index 000000000..0955e0c53
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/dir_plan9.go
@@ -0,0 +1,212 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Plan 9 directory marshalling. See intro(5).
+
+package plan9
+
+import "errors"
+
+var (
+ ErrShortStat = errors.New("stat buffer too short")
+ ErrBadStat = errors.New("malformed stat buffer")
+ ErrBadName = errors.New("bad character in file name")
+)
+
+// A Qid represents a 9P server's unique identification for a file.
+type Qid struct {
+ Path uint64 // the file server's unique identification for the file
+ Vers uint32 // version number for given Path
+ Type uint8 // the type of the file (plan9.QTDIR for example)
+}
+
+// A Dir contains the metadata for a file.
+type Dir struct {
+ // system-modified data
+ Type uint16 // server type
+ Dev uint32 // server subtype
+
+ // file data
+ Qid Qid // unique id from server
+ Mode uint32 // permissions
+ Atime uint32 // last read time
+ Mtime uint32 // last write time
+ Length int64 // file length
+ Name string // last element of path
+ Uid string // owner name
+ Gid string // group name
+ Muid string // last modifier name
+}
+
+var nullDir = Dir{
+ Type: ^uint16(0),
+ Dev: ^uint32(0),
+ Qid: Qid{
+ Path: ^uint64(0),
+ Vers: ^uint32(0),
+ Type: ^uint8(0),
+ },
+ Mode: ^uint32(0),
+ Atime: ^uint32(0),
+ Mtime: ^uint32(0),
+ Length: ^int64(0),
+}
+
+// Null assigns special "don't touch" values to members of d to
+// avoid modifying them during plan9.Wstat.
+func (d *Dir) Null() { *d = nullDir }
+
+// Marshal encodes a 9P stat message corresponding to d into b
+//
+// If there isn't enough space in b for a stat message, ErrShortStat is returned.
+func (d *Dir) Marshal(b []byte) (n int, err error) {
+ n = STATFIXLEN + len(d.Name) + len(d.Uid) + len(d.Gid) + len(d.Muid)
+ if n > len(b) {
+ return n, ErrShortStat
+ }
+
+ for _, c := range d.Name {
+ if c == '/' {
+ return n, ErrBadName
+ }
+ }
+
+ b = pbit16(b, uint16(n)-2)
+ b = pbit16(b, d.Type)
+ b = pbit32(b, d.Dev)
+ b = pbit8(b, d.Qid.Type)
+ b = pbit32(b, d.Qid.Vers)
+ b = pbit64(b, d.Qid.Path)
+ b = pbit32(b, d.Mode)
+ b = pbit32(b, d.Atime)
+ b = pbit32(b, d.Mtime)
+ b = pbit64(b, uint64(d.Length))
+ b = pstring(b, d.Name)
+ b = pstring(b, d.Uid)
+ b = pstring(b, d.Gid)
+ b = pstring(b, d.Muid)
+
+ return n, nil
+}
+
+// UnmarshalDir decodes a single 9P stat message from b and returns the resulting Dir.
+//
+// If b is too small to hold a valid stat message, ErrShortStat is returned.
+//
+// If the stat message itself is invalid, ErrBadStat is returned.
+func UnmarshalDir(b []byte) (*Dir, error) {
+ if len(b) < STATFIXLEN {
+ return nil, ErrShortStat
+ }
+ size, buf := gbit16(b)
+ if len(b) != int(size)+2 {
+ return nil, ErrBadStat
+ }
+ b = buf
+
+ var d Dir
+ d.Type, b = gbit16(b)
+ d.Dev, b = gbit32(b)
+ d.Qid.Type, b = gbit8(b)
+ d.Qid.Vers, b = gbit32(b)
+ d.Qid.Path, b = gbit64(b)
+ d.Mode, b = gbit32(b)
+ d.Atime, b = gbit32(b)
+ d.Mtime, b = gbit32(b)
+
+ n, b := gbit64(b)
+ d.Length = int64(n)
+
+ var ok bool
+ if d.Name, b, ok = gstring(b); !ok {
+ return nil, ErrBadStat
+ }
+ if d.Uid, b, ok = gstring(b); !ok {
+ return nil, ErrBadStat
+ }
+ if d.Gid, b, ok = gstring(b); !ok {
+ return nil, ErrBadStat
+ }
+ if d.Muid, b, ok = gstring(b); !ok {
+ return nil, ErrBadStat
+ }
+
+ return &d, nil
+}
+
+// pbit8 copies the 8-bit number v to b and returns the remaining slice of b.
+func pbit8(b []byte, v uint8) []byte {
+ b[0] = byte(v)
+ return b[1:]
+}
+
+// pbit16 copies the 16-bit number v to b in little-endian order and returns the remaining slice of b.
+func pbit16(b []byte, v uint16) []byte {
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ return b[2:]
+}
+
+// pbit32 copies the 32-bit number v to b in little-endian order and returns the remaining slice of b.
+func pbit32(b []byte, v uint32) []byte {
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+ return b[4:]
+}
+
+// pbit64 copies the 64-bit number v to b in little-endian order and returns the remaining slice of b.
+func pbit64(b []byte, v uint64) []byte {
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+ b[4] = byte(v >> 32)
+ b[5] = byte(v >> 40)
+ b[6] = byte(v >> 48)
+ b[7] = byte(v >> 56)
+ return b[8:]
+}
+
+// pstring copies the string s to b, prepending it with a 16-bit length in little-endian order, and
+// returning the remaining slice of b..
+func pstring(b []byte, s string) []byte {
+ b = pbit16(b, uint16(len(s)))
+ n := copy(b, s)
+ return b[n:]
+}
+
+// gbit8 reads an 8-bit number from b and returns it with the remaining slice of b.
+func gbit8(b []byte) (uint8, []byte) {
+ return uint8(b[0]), b[1:]
+}
+
+// gbit16 reads a 16-bit number in little-endian order from b and returns it with the remaining slice of b.
+func gbit16(b []byte) (uint16, []byte) {
+ return uint16(b[0]) | uint16(b[1])<<8, b[2:]
+}
+
+// gbit32 reads a 32-bit number in little-endian order from b and returns it with the remaining slice of b.
+func gbit32(b []byte) (uint32, []byte) {
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24, b[4:]
+}
+
+// gbit64 reads a 64-bit number in little-endian order from b and returns it with the remaining slice of b.
+func gbit64(b []byte) (uint64, []byte) {
+ lo := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ hi := uint32(b[4]) | uint32(b[5])<<8 | uint32(b[6])<<16 | uint32(b[7])<<24
+ return uint64(lo) | uint64(hi)<<32, b[8:]
+}
+
+// gstring reads a string from b, prefixed with a 16-bit length in little-endian order.
+// It returns the string with the remaining slice of b and a boolean. If the length is
+// greater than the number of bytes in b, the boolean will be false.
+func gstring(b []byte) (string, []byte, bool) {
+ n, b := gbit16(b)
+ if int(n) > len(b) {
+ return "", b, false
+ }
+ return string(b[:n]), b[n:], true
+}
diff --git a/vendor/golang.org/x/sys/plan9/env_plan9.go b/vendor/golang.org/x/sys/plan9/env_plan9.go
new file mode 100644
index 000000000..8f1918004
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/env_plan9.go
@@ -0,0 +1,31 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Plan 9 environment variables.
+
+package plan9
+
+import (
+ "syscall"
+)
+
+func Getenv(key string) (value string, found bool) {
+ return syscall.Getenv(key)
+}
+
+func Setenv(key, value string) error {
+ return syscall.Setenv(key, value)
+}
+
+func Clearenv() {
+ syscall.Clearenv()
+}
+
+func Environ() []string {
+ return syscall.Environ()
+}
+
+func Unsetenv(key string) error {
+ return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/plan9/errors_plan9.go b/vendor/golang.org/x/sys/plan9/errors_plan9.go
new file mode 100644
index 000000000..65fe74d3e
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/errors_plan9.go
@@ -0,0 +1,50 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package plan9
+
+import "syscall"
+
+// Constants
+const (
+ // Invented values to support what package os expects.
+ O_CREAT = 0x02000
+ O_APPEND = 0x00400
+ O_NOCTTY = 0x00000
+ O_NONBLOCK = 0x00000
+ O_SYNC = 0x00000
+ O_ASYNC = 0x00000
+
+ S_IFMT = 0x1f000
+ S_IFIFO = 0x1000
+ S_IFCHR = 0x2000
+ S_IFDIR = 0x4000
+ S_IFBLK = 0x6000
+ S_IFREG = 0x8000
+ S_IFLNK = 0xa000
+ S_IFSOCK = 0xc000
+)
+
+// Errors
+var (
+ EINVAL = syscall.NewError("bad arg in system call")
+ ENOTDIR = syscall.NewError("not a directory")
+ EISDIR = syscall.NewError("file is a directory")
+ ENOENT = syscall.NewError("file does not exist")
+ EEXIST = syscall.NewError("file already exists")
+ EMFILE = syscall.NewError("no free file descriptors")
+ EIO = syscall.NewError("i/o error")
+ ENAMETOOLONG = syscall.NewError("file name too long")
+ EINTR = syscall.NewError("interrupted")
+ EPERM = syscall.NewError("permission denied")
+ EBUSY = syscall.NewError("no free devices")
+ ETIMEDOUT = syscall.NewError("connection timed out")
+ EPLAN9 = syscall.NewError("not supported by plan 9")
+
+ // The following errors do not correspond to any
+ // Plan 9 system messages. Invented to support
+ // what package os and others expect.
+ EACCES = syscall.NewError("access permission denied")
+ EAFNOSUPPORT = syscall.NewError("address family not supported by protocol")
+)
diff --git a/vendor/golang.org/x/sys/plan9/mkall.sh b/vendor/golang.org/x/sys/plan9/mkall.sh
new file mode 100644
index 000000000..1650fbcc7
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/mkall.sh
@@ -0,0 +1,150 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# The plan9 package provides access to the raw system call
+# interface of the underlying operating system. Porting Go to
+# a new architecture/operating system combination requires
+# some manual effort, though there are tools that automate
+# much of the process. The auto-generated files have names
+# beginning with z.
+#
+# This script runs or (given -n) prints suggested commands to generate z files
+# for the current system. Running those commands is not automatic.
+# This script is documentation more than anything else.
+#
+# * asm_${GOOS}_${GOARCH}.s
+#
+# This hand-written assembly file implements system call dispatch.
+# There are three entry points:
+#
+# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
+# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr);
+# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
+#
+# The first and second are the standard ones; they differ only in
+# how many arguments can be passed to the kernel.
+# The third is for low-level use by the ForkExec wrapper;
+# unlike the first two, it does not call into the scheduler to
+# let it know that a system call is running.
+#
+# * syscall_${GOOS}.go
+#
+# This hand-written Go file implements system calls that need
+# special handling and lists "//sys" comments giving prototypes
+# for ones that can be auto-generated. Mksyscall reads those
+# comments to generate the stubs.
+#
+# * syscall_${GOOS}_${GOARCH}.go
+#
+# Same as syscall_${GOOS}.go except that it contains code specific
+# to ${GOOS} on one particular architecture.
+#
+# * types_${GOOS}.c
+#
+# This hand-written C file includes standard C headers and then
+# creates typedef or enum names beginning with a dollar sign
+# (use of $ in variable names is a gcc extension). The hardest
+# part about preparing this file is figuring out which headers to
+# include and which symbols need to be #defined to get the
+# actual data structures that pass through to the kernel system calls.
+# Some C libraries present alternate versions for binary compatibility
+# and translate them on the way in and out of system calls, but
+# there is almost always a #define that can get the real ones.
+# See types_darwin.c and types_linux.c for examples.
+#
+# * zerror_${GOOS}_${GOARCH}.go
+#
+# This machine-generated file defines the system's error numbers,
+# error strings, and signal numbers. The generator is "mkerrors.sh".
+# Usually no arguments are needed, but mkerrors.sh will pass its
+# arguments on to godefs.
+#
+# * zsyscall_${GOOS}_${GOARCH}.go
+#
+# Generated by mksyscall.pl; see syscall_${GOOS}.go above.
+#
+# * zsysnum_${GOOS}_${GOARCH}.go
+#
+# Generated by mksysnum_${GOOS}.
+#
+# * ztypes_${GOOS}_${GOARCH}.go
+#
+# Generated by godefs; see types_${GOOS}.c above.
+
+GOOSARCH="${GOOS}_${GOARCH}"
+
+# defaults
+mksyscall="go run mksyscall.go"
+mkerrors="./mkerrors.sh"
+zerrors="zerrors_$GOOSARCH.go"
+mksysctl=""
+zsysctl="zsysctl_$GOOSARCH.go"
+mksysnum=
+mktypes=
+run="sh"
+
+case "$1" in
+-syscalls)
+ for i in zsyscall*go
+ do
+ sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
+ rm _$i
+ done
+ exit 0
+ ;;
+-n)
+ run="cat"
+ shift
+esac
+
+case "$#" in
+0)
+ ;;
+*)
+ echo 'usage: mkall.sh [-n]' 1>&2
+ exit 2
+esac
+
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+plan9_386)
+ mkerrors=
+ mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,386"
+ mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
+ mktypes="XXX"
+ ;;
+plan9_amd64)
+ mkerrors=
+ mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,amd64"
+ mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
+ mktypes="XXX"
+ ;;
+plan9_arm)
+ mkerrors=
+ mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,arm"
+ mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
+ mktypes="XXX"
+ ;;
+*)
+ echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+(
+ if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
+ case "$GOOS" in
+ plan9)
+ syscall_goos="syscall_$GOOS.go"
+ if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos |gofmt >zsyscall_$GOOSARCH.go"; fi
+ ;;
+ esac
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |gofmt >ztypes_$GOOSARCH.go"; fi
+) | $run
diff --git a/vendor/golang.org/x/sys/plan9/mkerrors.sh b/vendor/golang.org/x/sys/plan9/mkerrors.sh
new file mode 100644
index 000000000..85309c4a5
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/mkerrors.sh
@@ -0,0 +1,246 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Generate Go code listing errors and other #defined constant
+# values (ENAMETOOLONG etc.), by asking the preprocessor
+# about the definitions.
+
+unset LANG
+export LC_ALL=C
+export LC_CTYPE=C
+
+CC=${CC:-gcc}
+
+uname=$(uname)
+
+includes='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+ccflags="$@"
+
+# Write go tool cgo -godefs input.
+(
+ echo package plan9
+ echo
+ echo '/*'
+ indirect="includes_$(uname)"
+ echo "${!indirect} $includes"
+ echo '*/'
+ echo 'import "C"'
+ echo
+ echo 'const ('
+
+ # The gcc command line prints all the #defines
+ # it encounters while processing the input
+ echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags |
+ awk '
+ $1 != "#define" || $2 ~ /\(/ || $3 == "" {next}
+
+ $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers
+ $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next}
+ $2 ~ /^(SCM_SRCRT)$/ {next}
+ $2 ~ /^(MAP_FAILED)$/ {next}
+
+ $2 !~ /^ETH_/ &&
+ $2 !~ /^EPROC_/ &&
+ $2 !~ /^EQUIV_/ &&
+ $2 !~ /^EXPR_/ &&
+ $2 ~ /^E[A-Z0-9_]+$/ ||
+ $2 ~ /^B[0-9_]+$/ ||
+ $2 ~ /^V[A-Z0-9]+$/ ||
+ $2 ~ /^CS[A-Z0-9]/ ||
+ $2 ~ /^I(SIG|CANON|CRNL|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
+ $2 ~ /^IGN/ ||
+ $2 ~ /^IX(ON|ANY|OFF)$/ ||
+ $2 ~ /^IN(LCR|PCK)$/ ||
+ $2 ~ /(^FLU?SH)|(FLU?SH$)/ ||
+ $2 ~ /^C(LOCAL|READ)$/ ||
+ $2 == "BRKINT" ||
+ $2 == "HUPCL" ||
+ $2 == "PENDIN" ||
+ $2 == "TOSTOP" ||
+ $2 ~ /^PAR/ ||
+ $2 ~ /^SIG[^_]/ ||
+ $2 ~ /^O[CNPFP][A-Z]+[^_][A-Z]+$/ ||
+ $2 ~ /^IN_/ ||
+ $2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
+ $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
+ $2 == "ICMPV6_FILTER" ||
+ $2 == "SOMAXCONN" ||
+ $2 == "NAME_MAX" ||
+ $2 == "IFNAMSIZ" ||
+ $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ ||
+ $2 ~ /^SYSCTL_VERS/ ||
+ $2 ~ /^(MS|MNT)_/ ||
+ $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
+ $2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ ||
+ $2 ~ /^LINUX_REBOOT_CMD_/ ||
+ $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
+ $2 !~ "NLA_TYPE_MASK" &&
+ $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ ||
+ $2 ~ /^SIOC/ ||
+ $2 ~ /^TIOC/ ||
+ $2 !~ "RTF_BITS" &&
+ $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
+ $2 ~ /^BIOC/ ||
+ $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ ||
+ $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ ||
+ $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
+ $2 ~ /^CLONE_[A-Z_]+/ ||
+ $2 !~ /^(BPF_TIMEVAL)$/ &&
+ $2 ~ /^(BPF|DLT)_/ ||
+ $2 !~ "WMESGLEN" &&
+ $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)}
+ $2 ~ /^__WCOREFLAG$/ {next}
+ $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
+
+ {next}
+ ' | sort
+
+ echo ')'
+) >_const.go
+
+# Pull out the error names for later.
+errors=$(
+ echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' |
+ sort
+)
+
+# Pull out the signal names for later.
+signals=$(
+ echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
+ egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
+ sort
+)
+
+# Again, writing regexps to a file.
+echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' |
+ sort >_error.grep
+echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
+ egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
+ sort >_signal.grep
+
+echo '// mkerrors.sh' "$@"
+echo '// Code generated by the command above; DO NOT EDIT.'
+echo
+go tool cgo -godefs -- "$@" _const.go >_error.out
+cat _error.out | grep -vf _error.grep | grep -vf _signal.grep
+echo
+echo '// Errors'
+echo 'const ('
+cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= Errno(\1)/'
+echo ')'
+
+echo
+echo '// Signals'
+echo 'const ('
+cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= Signal(\1)/'
+echo ')'
+
+# Run C program to print error and syscall strings.
+(
+ echo -E "
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define nelem(x) (sizeof(x)/sizeof((x)[0]))
+
+enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below
+
+int errors[] = {
+"
+ for i in $errors
+ do
+ echo -E ' '$i,
+ done
+
+ echo -E "
+};
+
+int signals[] = {
+"
+ for i in $signals
+ do
+ echo -E ' '$i,
+ done
+
+ # Use -E because on some systems bash builtin interprets \n itself.
+ echo -E '
+};
+
+static int
+intcmp(const void *a, const void *b)
+{
+ return *(int*)a - *(int*)b;
+}
+
+int
+main(void)
+{
+ int i, j, e;
+ char buf[1024], *p;
+
+ printf("\n\n// Error table\n");
+ printf("var errors = [...]string {\n");
+ qsort(errors, nelem(errors), sizeof errors[0], intcmp);
+ for(i=0; i 0 && errors[i-1] == e)
+ continue;
+ strcpy(buf, strerror(e));
+ // lowercase first letter: Bad -> bad, but STREAM -> STREAM.
+ if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
+ buf[0] += a - A;
+ printf("\t%d: \"%s\",\n", e, buf);
+ }
+ printf("}\n\n");
+
+ printf("\n\n// Signal table\n");
+ printf("var signals = [...]string {\n");
+ qsort(signals, nelem(signals), sizeof signals[0], intcmp);
+ for(i=0; i 0 && signals[i-1] == e)
+ continue;
+ strcpy(buf, strsignal(e));
+ // lowercase first letter: Bad -> bad, but STREAM -> STREAM.
+ if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
+ buf[0] += a - A;
+ // cut trailing : number.
+ p = strrchr(buf, ":"[0]);
+ if(p)
+ *p = '\0';
+ printf("\t%d: \"%s\",\n", e, buf);
+ }
+ printf("}\n\n");
+
+ return 0;
+}
+
+'
+) >_errors.c
+
+$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out
diff --git a/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh b/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh
new file mode 100644
index 000000000..3c3ab0581
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+COMMAND="mksysnum_plan9.sh $@"
+
+cat <= 10 {
+ buf[i] = byte(val%10 + '0')
+ i--
+ val /= 10
+ }
+ buf[i] = byte(val + '0')
+ return string(buf[i:])
+}
diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go
new file mode 100644
index 000000000..e7363a2f5
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/syscall.go
@@ -0,0 +1,116 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build plan9
+
+// Package plan9 contains an interface to the low-level operating system
+// primitives. OS details vary depending on the underlying system, and
+// by default, godoc will display the OS-specific documentation for the current
+// system. If you want godoc to display documentation for another
+// system, set $GOOS and $GOARCH to the desired system. For example, if
+// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
+// to freebsd and $GOARCH to arm.
+//
+// The primary use of this package is inside other packages that provide a more
+// portable interface to the system, such as "os", "time" and "net". Use
+// those packages rather than this one if you can.
+//
+// For details of the functions and data types in this package consult
+// the manuals for the appropriate operating system.
+//
+// These calls return err == nil to indicate success; otherwise
+// err represents an operating system error describing the failure and
+// holds a value of type syscall.ErrorString.
+package plan9 // import "golang.org/x/sys/plan9"
+
+import (
+ "bytes"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/internal/unsafeheader"
+)
+
+// ByteSliceFromString returns a NUL-terminated slice of bytes
+// containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, EINVAL).
+func ByteSliceFromString(s string) ([]byte, error) {
+ if strings.IndexByte(s, 0) != -1 {
+ return nil, EINVAL
+ }
+ a := make([]byte, len(s)+1)
+ copy(a, s)
+ return a, nil
+}
+
+// BytePtrFromString returns a pointer to a NUL-terminated array of
+// bytes containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, EINVAL).
+func BytePtrFromString(s string) (*byte, error) {
+ a, err := ByteSliceFromString(s)
+ if err != nil {
+ return nil, err
+ }
+ return &a[0], nil
+}
+
+// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any
+// bytes after the NUL removed.
+func ByteSliceToString(s []byte) string {
+ if i := bytes.IndexByte(s, 0); i != -1 {
+ s = s[:i]
+ }
+ return string(s)
+}
+
+// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string.
+// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated
+// at a zero byte; if the zero byte is not present, the program may crash.
+func BytePtrToString(p *byte) string {
+ if p == nil {
+ return ""
+ }
+ if *p == 0 {
+ return ""
+ }
+
+ // Find NUL terminator.
+ n := 0
+ for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ {
+ ptr = unsafe.Pointer(uintptr(ptr) + 1)
+ }
+
+ var s []byte
+ h := (*unsafeheader.Slice)(unsafe.Pointer(&s))
+ h.Data = unsafe.Pointer(p)
+ h.Len = n
+ h.Cap = n
+
+ return string(s)
+}
+
+// Single-word zero for use when we need a valid pointer to 0 bytes.
+// See mksyscall.pl.
+var _zero uintptr
+
+func (ts *Timespec) Unix() (sec int64, nsec int64) {
+ return int64(ts.Sec), int64(ts.Nsec)
+}
+
+func (tv *Timeval) Unix() (sec int64, nsec int64) {
+ return int64(tv.Sec), int64(tv.Usec) * 1000
+}
+
+func (ts *Timespec) Nano() int64 {
+ return int64(ts.Sec)*1e9 + int64(ts.Nsec)
+}
+
+func (tv *Timeval) Nano() int64 {
+ return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
+}
+
+// use is a no-op, but the compiler cannot see that it is.
+// Calling use(p) ensures that p is kept live until that point.
+//go:noescape
+func use(p unsafe.Pointer)
diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go
new file mode 100644
index 000000000..84e147148
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go
@@ -0,0 +1,349 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Plan 9 system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and
+// wrap it in our own nicer implementation.
+
+package plan9
+
+import (
+ "bytes"
+ "syscall"
+ "unsafe"
+)
+
+// A Note is a string describing a process note.
+// It implements the os.Signal interface.
+type Note string
+
+func (n Note) Signal() {}
+
+func (n Note) String() string {
+ return string(n)
+}
+
+var (
+ Stdin = 0
+ Stdout = 1
+ Stderr = 2
+)
+
+// For testing: clients can set this flag to force
+// creation of IPv6 sockets to return EAFNOSUPPORT.
+var SocketDisableIPv6 bool
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.ErrorString)
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.ErrorString)
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+
+func atoi(b []byte) (n uint) {
+ n = 0
+ for i := 0; i < len(b); i++ {
+ n = n*10 + uint(b[i]-'0')
+ }
+ return
+}
+
+func cstring(s []byte) string {
+ i := bytes.IndexByte(s, 0)
+ if i == -1 {
+ i = len(s)
+ }
+ return string(s[:i])
+}
+
+func errstr() string {
+ var buf [ERRMAX]byte
+
+ RawSyscall(SYS_ERRSTR, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)), 0)
+
+ buf[len(buf)-1] = 0
+ return cstring(buf[:])
+}
+
+// Implemented in assembly to import from runtime.
+func exit(code int)
+
+func Exit(code int) { exit(code) }
+
+func readnum(path string) (uint, error) {
+ var b [12]byte
+
+ fd, e := Open(path, O_RDONLY)
+ if e != nil {
+ return 0, e
+ }
+ defer Close(fd)
+
+ n, e := Pread(fd, b[:], 0)
+
+ if e != nil {
+ return 0, e
+ }
+
+ m := 0
+ for ; m < n && b[m] == ' '; m++ {
+ }
+
+ return atoi(b[m : n-1]), nil
+}
+
+func Getpid() (pid int) {
+ n, _ := readnum("#c/pid")
+ return int(n)
+}
+
+func Getppid() (ppid int) {
+ n, _ := readnum("#c/ppid")
+ return int(n)
+}
+
+func Read(fd int, p []byte) (n int, err error) {
+ return Pread(fd, p, -1)
+}
+
+func Write(fd int, p []byte) (n int, err error) {
+ return Pwrite(fd, p, -1)
+}
+
+var ioSync int64
+
+//sys fd2path(fd int, buf []byte) (err error)
+func Fd2path(fd int) (path string, err error) {
+ var buf [512]byte
+
+ e := fd2path(fd, buf[:])
+ if e != nil {
+ return "", e
+ }
+ return cstring(buf[:]), nil
+}
+
+//sys pipe(p *[2]int32) (err error)
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return syscall.ErrorString("bad arg in system call")
+ }
+ var pp [2]int32
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+// Underlying system call writes to newoffset via pointer.
+// Implemented in assembly to avoid allocation.
+func seek(placeholder uintptr, fd int, offset int64, whence int) (newoffset int64, err string)
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+ newoffset, e := seek(0, fd, offset, whence)
+
+ if newoffset == -1 {
+ err = syscall.ErrorString(e)
+ }
+ return
+}
+
+func Mkdir(path string, mode uint32) (err error) {
+ fd, err := Create(path, O_RDONLY, DMDIR|mode)
+
+ if fd != -1 {
+ Close(fd)
+ }
+
+ return
+}
+
+type Waitmsg struct {
+ Pid int
+ Time [3]uint32
+ Msg string
+}
+
+func (w Waitmsg) Exited() bool { return true }
+func (w Waitmsg) Signaled() bool { return false }
+
+func (w Waitmsg) ExitStatus() int {
+ if len(w.Msg) == 0 {
+ // a normal exit returns no message
+ return 0
+ }
+ return 1
+}
+
+//sys await(s []byte) (n int, err error)
+func Await(w *Waitmsg) (err error) {
+ var buf [512]byte
+ var f [5][]byte
+
+ n, err := await(buf[:])
+
+ if err != nil || w == nil {
+ return
+ }
+
+ nf := 0
+ p := 0
+ for i := 0; i < n && nf < len(f)-1; i++ {
+ if buf[i] == ' ' {
+ f[nf] = buf[p:i]
+ p = i + 1
+ nf++
+ }
+ }
+ f[nf] = buf[p:]
+ nf++
+
+ if nf != len(f) {
+ return syscall.ErrorString("invalid wait message")
+ }
+ w.Pid = int(atoi(f[0]))
+ w.Time[0] = uint32(atoi(f[1]))
+ w.Time[1] = uint32(atoi(f[2]))
+ w.Time[2] = uint32(atoi(f[3]))
+ w.Msg = cstring(f[4])
+ if w.Msg == "''" {
+ // await() returns '' for no error
+ w.Msg = ""
+ }
+ return
+}
+
+func Unmount(name, old string) (err error) {
+ fixwd()
+ oldp, err := BytePtrFromString(old)
+ if err != nil {
+ return err
+ }
+ oldptr := uintptr(unsafe.Pointer(oldp))
+
+ var r0 uintptr
+ var e syscall.ErrorString
+
+ // bind(2) man page: If name is zero, everything bound or mounted upon old is unbound or unmounted.
+ if name == "" {
+ r0, _, e = Syscall(SYS_UNMOUNT, _zero, oldptr, 0)
+ } else {
+ namep, err := BytePtrFromString(name)
+ if err != nil {
+ return err
+ }
+ r0, _, e = Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(namep)), oldptr, 0)
+ }
+
+ if int32(r0) == -1 {
+ err = e
+ }
+ return
+}
+
+func Fchdir(fd int) (err error) {
+ path, err := Fd2path(fd)
+
+ if err != nil {
+ return
+ }
+
+ return Chdir(path)
+}
+
+type Timespec struct {
+ Sec int32
+ Nsec int32
+}
+
+type Timeval struct {
+ Sec int32
+ Usec int32
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int32(nsec / 1e9)
+ return
+}
+
+func nsec() int64 {
+ var scratch int64
+
+ r0, _, _ := Syscall(SYS_NSEC, uintptr(unsafe.Pointer(&scratch)), 0, 0)
+ // TODO(aram): remove hack after I fix _nsec in the pc64 kernel.
+ if r0 == 0 {
+ return scratch
+ }
+ return int64(r0)
+}
+
+func Gettimeofday(tv *Timeval) error {
+ nsec := nsec()
+ *tv = NsecToTimeval(nsec)
+ return nil
+}
+
+func Getpagesize() int { return 0x1000 }
+
+func Getegid() (egid int) { return -1 }
+func Geteuid() (euid int) { return -1 }
+func Getgid() (gid int) { return -1 }
+func Getuid() (uid int) { return -1 }
+
+func Getgroups() (gids []int, err error) {
+ return make([]int, 0), nil
+}
+
+//sys open(path string, mode int) (fd int, err error)
+func Open(path string, mode int) (fd int, err error) {
+ fixwd()
+ return open(path, mode)
+}
+
+//sys create(path string, mode int, perm uint32) (fd int, err error)
+func Create(path string, mode int, perm uint32) (fd int, err error) {
+ fixwd()
+ return create(path, mode, perm)
+}
+
+//sys remove(path string) (err error)
+func Remove(path string) error {
+ fixwd()
+ return remove(path)
+}
+
+//sys stat(path string, edir []byte) (n int, err error)
+func Stat(path string, edir []byte) (n int, err error) {
+ fixwd()
+ return stat(path, edir)
+}
+
+//sys bind(name string, old string, flag int) (err error)
+func Bind(name string, old string, flag int) (err error) {
+ fixwd()
+ return bind(name, old, flag)
+}
+
+//sys mount(fd int, afd int, old string, flag int, aname string) (err error)
+func Mount(fd int, afd int, old string, flag int, aname string) (err error) {
+ fixwd()
+ return mount(fd, afd, old, flag, aname)
+}
+
+//sys wstat(path string, edir []byte) (err error)
+func Wstat(path string, edir []byte) (err error) {
+ fixwd()
+ return wstat(path, edir)
+}
+
+//sys chdir(path string) (err error)
+//sys Dup(oldfd int, newfd int) (fd int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys Close(fd int) (err error)
+//sys Fstat(fd int, edir []byte) (n int, err error)
+//sys Fwstat(fd int, edir []byte) (err error)
diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
new file mode 100644
index 000000000..6819bc209
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
@@ -0,0 +1,284 @@
+// go run mksyscall.go -l32 -plan9 -tags plan9,386 syscall_plan9.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build plan9,386
+
+package plan9
+
+import "unsafe"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fd2path(fd int, buf []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]int32) (err error) {
+ r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func await(s []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(s) > 0 {
+ _p0 = unsafe.Pointer(&s[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func open(path string, mode int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func create(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func remove(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func stat(path string, edir []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(name string, old string, flag int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(fd int, afd int, old string, flag int, aname string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(aname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wstat(path string, edir []byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int, newfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, edir []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fwstat(fd int, edir []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
new file mode 100644
index 000000000..418abbbfc
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
@@ -0,0 +1,284 @@
+// go run mksyscall.go -l32 -plan9 -tags plan9,amd64 syscall_plan9.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build plan9,amd64
+
+package plan9
+
+import "unsafe"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fd2path(fd int, buf []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]int32) (err error) {
+ r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func await(s []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(s) > 0 {
+ _p0 = unsafe.Pointer(&s[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func open(path string, mode int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func create(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func remove(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func stat(path string, edir []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(name string, old string, flag int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(fd int, afd int, old string, flag int, aname string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(aname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wstat(path string, edir []byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int, newfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, edir []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fwstat(fd int, edir []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
new file mode 100644
index 000000000..3e8a1a58c
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
@@ -0,0 +1,284 @@
+// go run mksyscall.go -l32 -plan9 -tags plan9,arm syscall_plan9.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build plan9,arm
+
+package plan9
+
+import "unsafe"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fd2path(fd int, buf []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]int32) (err error) {
+ r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func await(s []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(s) > 0 {
+ _p0 = unsafe.Pointer(&s[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func open(path string, mode int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func create(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func remove(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func stat(path string, edir []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(name string, old string, flag int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(fd int, afd int, old string, flag int, aname string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(aname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wstat(path string, edir []byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int, newfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, edir []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fwstat(fd int, edir []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go b/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go
new file mode 100644
index 000000000..22e8abd43
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go
@@ -0,0 +1,49 @@
+// mksysnum_plan9.sh /opt/plan9/sys/src/libc/9syscall/sys.h
+// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
+
+package plan9
+
+const (
+ SYS_SYSR1 = 0
+ SYS_BIND = 2
+ SYS_CHDIR = 3
+ SYS_CLOSE = 4
+ SYS_DUP = 5
+ SYS_ALARM = 6
+ SYS_EXEC = 7
+ SYS_EXITS = 8
+ SYS_FAUTH = 10
+ SYS_SEGBRK = 12
+ SYS_OPEN = 14
+ SYS_OSEEK = 16
+ SYS_SLEEP = 17
+ SYS_RFORK = 19
+ SYS_PIPE = 21
+ SYS_CREATE = 22
+ SYS_FD2PATH = 23
+ SYS_BRK_ = 24
+ SYS_REMOVE = 25
+ SYS_NOTIFY = 28
+ SYS_NOTED = 29
+ SYS_SEGATTACH = 30
+ SYS_SEGDETACH = 31
+ SYS_SEGFREE = 32
+ SYS_SEGFLUSH = 33
+ SYS_RENDEZVOUS = 34
+ SYS_UNMOUNT = 35
+ SYS_SEMACQUIRE = 37
+ SYS_SEMRELEASE = 38
+ SYS_SEEK = 39
+ SYS_FVERSION = 40
+ SYS_ERRSTR = 41
+ SYS_STAT = 42
+ SYS_FSTAT = 43
+ SYS_WSTAT = 44
+ SYS_FWSTAT = 45
+ SYS_MOUNT = 46
+ SYS_AWAIT = 47
+ SYS_PREAD = 50
+ SYS_PWRITE = 51
+ SYS_TSEMACQUIRE = 52
+ SYS_NSEC = 53
+)
diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore
new file mode 100644
index 000000000..e3e0fc6f8
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/.gitignore
@@ -0,0 +1,2 @@
+_obj/
+unix.test
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
new file mode 100644
index 000000000..474efad0e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/README.md
@@ -0,0 +1,184 @@
+# Building `sys/unix`
+
+The sys/unix package provides access to the raw system call interface of the
+underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
+
+Porting Go to a new architecture/OS combination or adding syscalls, types, or
+constants to an existing architecture/OS pair requires some manual effort;
+however, there are tools that automate much of the process.
+
+## Build Systems
+
+There are currently two ways we generate the necessary files. We are currently
+migrating the build system to use containers so the builds are reproducible.
+This is being done on an OS-by-OS basis. Please update this documentation as
+components of the build system change.
+
+### Old Build System (currently for `GOOS != "linux"`)
+
+The old build system generates the Go files based on the C header files
+present on your system. This means that files
+for a given GOOS/GOARCH pair must be generated on a system with that OS and
+architecture. This also means that the generated code can differ from system
+to system, based on differences in the header files.
+
+To avoid this, if you are using the old build system, only generate the Go
+files on an installation with unmodified header files. It is also important to
+keep track of which version of the OS the files were generated from (ex.
+Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
+and have each OS upgrade correspond to a single change.
+
+To build the files for your current OS and architecture, make sure GOOS and
+GOARCH are set correctly and run `mkall.sh`. This will generate the files for
+your specific system. Running `mkall.sh -n` shows the commands that will be run.
+
+Requirements: bash, go
+
+### New Build System (currently for `GOOS == "linux"`)
+
+The new build system uses a Docker container to generate the go files directly
+from source checkouts of the kernel and various system libraries. This means
+that on any platform that supports Docker, all the files using the new build
+system can be generated at once, and generated files will not change based on
+what the person running the scripts has installed on their computer.
+
+The OS specific files for the new build system are located in the `${GOOS}`
+directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
+the kernel or system library updates, modify the Dockerfile at
+`${GOOS}/Dockerfile` to checkout the new release of the source.
+
+To build all the files under the new build system, you must be on an amd64/Linux
+system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
+then generate all of the files for all of the GOOS/GOARCH pairs in the new build
+system. Running `mkall.sh -n` shows the commands that will be run.
+
+Requirements: bash, go, docker
+
+## Component files
+
+This section describes the various files used in the code generation process.
+It also contains instructions on how to modify these files to add a new
+architecture/OS or to add additional syscalls, types, or constants. Note that
+if you are using the new build system, the scripts/programs cannot be called normally.
+They must be called from within the docker container.
+
+### asm files
+
+The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
+call dispatch. There are three entry points:
+```
+ func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+ func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+ func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+```
+The first and second are the standard ones; they differ only in how many
+arguments can be passed to the kernel. The third is for low-level use by the
+ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
+let it know that a system call is running.
+
+When porting Go to a new architecture/OS, this file must be implemented for
+each GOOS/GOARCH pair.
+
+### mksysnum
+
+Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
+for the old system). This program takes in a list of header files containing the
+syscall number declarations and parses them to produce the corresponding list of
+Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
+constants.
+
+Adding new syscall numbers is mostly done by running the build on a sufficiently
+new installation of the target OS (or updating the source checkouts for the
+new build system). However, depending on the OS, you may need to update the
+parsing in mksysnum.
+
+### mksyscall.go
+
+The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
+hand-written Go files which implement system calls (for unix, the specific OS,
+or the specific OS/Architecture pair respectively) that need special handling
+and list `//sys` comments giving prototypes for ones that can be generated.
+
+The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
+them into syscalls. This requires the name of the prototype in the comment to
+match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
+prototype can be exported (capitalized) or not.
+
+Adding a new syscall often just requires adding a new `//sys` function prototype
+with the desired arguments and a capitalized name so it is exported. However, if
+you want the interface to the syscall to be different, often one will make an
+unexported `//sys` prototype, and then write a custom wrapper in
+`syscall_${GOOS}.go`.
+
+### types files
+
+For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
+`types_${GOOS}.go` on the old system). This file includes standard C headers and
+creates Go type aliases to the corresponding C types. The file is then fed
+through godef to get the Go compatible definitions. Finally, the generated code
+is fed though mkpost.go to format the code correctly and remove any hidden or
+private identifiers. This cleaned-up code is written to
+`ztypes_${GOOS}_${GOARCH}.go`.
+
+The hardest part about preparing this file is figuring out which headers to
+include and which symbols need to be `#define`d to get the actual data
+structures that pass through to the kernel system calls. Some C libraries
+preset alternate versions for binary compatibility and translate them on the
+way in and out of system calls, but there is almost always a `#define` that can
+get the real ones.
+See `types_darwin.go` and `linux/types.go` for examples.
+
+To add a new type, add in the necessary include statement at the top of the
+file (if it is not already there) and add in a type alias line. Note that if
+your type is significantly different on different architectures, you may need
+some `#if/#elif` macros in your include statements.
+
+### mkerrors.sh
+
+This script is used to generate the system's various constants. This doesn't
+just include the error numbers and error strings, but also the signal numbers
+and a wide variety of miscellaneous constants. The constants come from the list
+of include files in the `includes_${uname}` variable. A regex then picks out
+the desired `#define` statements, and generates the corresponding Go constants.
+The error numbers and strings are generated from `#include `, and the
+signal numbers and strings are generated from `#include `. All of
+these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
+`_errors.c`, which prints out all the constants.
+
+To add a constant, add the header that includes it to the appropriate variable.
+Then, edit the regex (if necessary) to match the desired constant. Avoid making
+the regex too broad to avoid matching unintended constants.
+
+### mkmerge.go
+
+This program is used to extract duplicate const, func, and type declarations
+from the generated architecture-specific files listed below, and merge these
+into a common file for each OS.
+
+The merge is performed in the following steps:
+1. Construct the set of common code that is idential in all architecture-specific files.
+2. Write this common code to the merged file.
+3. Remove the common code from all architecture-specific files.
+
+
+## Generated files
+
+### `zerrors_${GOOS}_${GOARCH}.go`
+
+A file containing all of the system's generated error numbers, error strings,
+signal numbers, and constants. Generated by `mkerrors.sh` (see above).
+
+### `zsyscall_${GOOS}_${GOARCH}.go`
+
+A file containing all the generated syscalls for a specific GOOS and GOARCH.
+Generated by `mksyscall.go` (see above).
+
+### `zsysnum_${GOOS}_${GOARCH}.go`
+
+A list of numeric constants for all the syscall number of the specific GOOS
+and GOARCH. Generated by mksysnum (see above).
+
+### `ztypes_${GOOS}_${GOARCH}.go`
+
+A file containing Go types for passing into (or returning from) syscalls.
+Generated by godefs and the types file (see above).
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
new file mode 100644
index 000000000..6e5c81acd
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -0,0 +1,86 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// CPU affinity functions
+
+package unix
+
+import (
+ "math/bits"
+ "unsafe"
+)
+
+const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
+
+// CPUSet represents a CPU affinity mask.
+type CPUSet [cpuSetSize]cpuMask
+
+func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
+ _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
+ if e != 0 {
+ return errnoErr(e)
+ }
+ return nil
+}
+
+// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedGetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
+}
+
+// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedSetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
+}
+
+// Zero clears the set s, so that it contains no CPUs.
+func (s *CPUSet) Zero() {
+ for i := range s {
+ s[i] = 0
+ }
+}
+
+func cpuBitsIndex(cpu int) int {
+ return cpu / _NCPUBITS
+}
+
+func cpuBitsMask(cpu int) cpuMask {
+ return cpuMask(1 << (uint(cpu) % _NCPUBITS))
+}
+
+// Set adds cpu to the set s.
+func (s *CPUSet) Set(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] |= cpuBitsMask(cpu)
+ }
+}
+
+// Clear removes cpu from the set s.
+func (s *CPUSet) Clear(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] &^= cpuBitsMask(cpu)
+ }
+}
+
+// IsSet reports whether cpu is in the set s.
+func (s *CPUSet) IsSet(cpu int) bool {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ return s[i]&cpuBitsMask(cpu) != 0
+ }
+ return false
+}
+
+// Count returns the number of CPUs in the set s.
+func (s *CPUSet) Count() int {
+ c := 0
+ for _, b := range s {
+ c += bits.OnesCount64(uint64(b))
+ }
+ return c
+}
diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go
new file mode 100644
index 000000000..abc89c104
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/aliases.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+// +build go1.9
+
+package unix
+
+import "syscall"
+
+type Signal = syscall.Signal
+type Errno = syscall.Errno
+type SysProcAttr = syscall.SysProcAttr
diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
new file mode 100644
index 000000000..db9171c2e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
@@ -0,0 +1,18 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
+//
+
+TEXT ¡syscall6(SB),NOSPLIT,$0-88
+ JMP syscall¡syscall6(SB)
+
+TEXT ¡rawSyscall6(SB),NOSPLIT,$0-88
+ JMP syscall¡rawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s
new file mode 100644
index 000000000..e0fcd9b3d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (freebsd || netbsd || openbsd) && gc
+// +build freebsd netbsd openbsd
+// +build gc
+
+#include "textflag.h"
+
+// System call support for 386 BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-28
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall¡Syscall9(SB)
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall¡RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
new file mode 100644
index 000000000..2b99c349a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc
+// +build darwin dragonfly freebsd netbsd openbsd
+// +build gc
+
+#include "textflag.h"
+
+// System call support for AMD64 BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-56
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall¡Syscall9(SB)
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
new file mode 100644
index 000000000..d702d4adc
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (freebsd || netbsd || openbsd) && gc
+// +build freebsd netbsd openbsd
+// +build gc
+
+#include "textflag.h"
+
+// System call support for ARM BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-28
+ B syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-40
+ B syscall¡Syscall6(SB)
+
+TEXT ¡Syscall9(SB),NOSPLIT,$0-52
+ B syscall¡Syscall9(SB)
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-28
+ B syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall¡RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
new file mode 100644
index 000000000..fe36a7391
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (darwin || freebsd || netbsd || openbsd) && gc
+// +build darwin freebsd netbsd openbsd
+// +build gc
+
+#include "textflag.h"
+
+// System call support for ARM64 BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-56
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall¡Syscall9(SB)
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s
new file mode 100644
index 000000000..8fd101d07
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s
@@ -0,0 +1,66 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for 386, Linux
+//
+
+// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
+// instead of the glibc-specific "CALL 0x10(GS)".
+#define INVOKE_SYSCALL INT $0x80
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-28
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡SyscallNoError(SB),NOSPLIT,$0-24
+ CALL runtime¡entersyscall(SB)
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ CALL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall¡RawSyscall6(SB)
+
+TEXT ¡RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ RET
+
+TEXT ¡socketcall(SB),NOSPLIT,$0-36
+ JMP syscall¡socketcall(SB)
+
+TEXT ¡rawsocketcall(SB),NOSPLIT,$0-36
+ JMP syscall¡rawsocketcall(SB)
+
+TEXT ¡seek(SB),NOSPLIT,$0-28
+ JMP syscall¡seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
new file mode 100644
index 000000000..7ed38e43c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
@@ -0,0 +1,58 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for AMD64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-56
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime¡entersyscall(SB)
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ CALL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡RawSyscall6(SB)
+
+TEXT ¡RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ RET
+
+TEXT ¡gettimeofday(SB),NOSPLIT,$0-16
+ JMP syscall¡gettimeofday(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
new file mode 100644
index 000000000..8ef1d5140
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
@@ -0,0 +1,57 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for arm, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-28
+ B syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-40
+ B syscall¡Syscall6(SB)
+
+TEXT ¡SyscallNoError(SB),NOSPLIT,$0-24
+ BL runtime¡entersyscall(SB)
+ MOVW trap+0(FP), R7
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ MOVW $0, R3
+ MOVW $0, R4
+ MOVW $0, R5
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ BL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-28
+ B syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall¡RawSyscall6(SB)
+
+TEXT ¡RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW trap+0(FP), R7 // syscall entry
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ RET
+
+TEXT ¡seek(SB),NOSPLIT,$0-28
+ B syscall¡seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
new file mode 100644
index 000000000..98ae02760
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
@@ -0,0 +1,53 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && arm64 && gc
+// +build linux
+// +build arm64
+// +build gc
+
+#include "textflag.h"
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-56
+ B syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-80
+ B syscall¡Syscall6(SB)
+
+TEXT ¡SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime¡entersyscall(SB)
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP) // r1
+ MOVD R1, r2+40(FP) // r2
+ BL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-56
+ B syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall¡RawSyscall6(SB)
+
+TEXT ¡RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP)
+ MOVD R1, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
new file mode 100644
index 000000000..21231d2ce
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
@@ -0,0 +1,57 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips64 || mips64le) && gc
+// +build linux
+// +build mips64 mips64le
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for mips64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-56
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡SyscallNoError(SB),NOSPLIT,$0-48
+ JAL runtime¡entersyscall(SB)
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ JAL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡RawSyscall6(SB)
+
+TEXT ¡RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
new file mode 100644
index 000000000..6783b26c6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
@@ -0,0 +1,55 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips || mipsle) && gc
+// +build linux
+// +build mips mipsle
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for mips, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-28
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall¡Syscall9(SB)
+
+TEXT ¡SyscallNoError(SB),NOSPLIT,$0-24
+ JAL runtime¡entersyscall(SB)
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW R0, R7
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP) // r1
+ MOVW R3, r2+20(FP) // r2
+ JAL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall¡RawSyscall6(SB)
+
+TEXT ¡RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP)
+ MOVW R3, r2+20(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
new file mode 100644
index 000000000..19d498934
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
@@ -0,0 +1,45 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (ppc64 || ppc64le) && gc
+// +build linux
+// +build ppc64 ppc64le
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime¡entersyscall(SB)
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ BL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
new file mode 100644
index 000000000..e42eb81d5
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
@@ -0,0 +1,49 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build riscv64 && gc
+// +build riscv64
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for linux/riscv64.
+//
+// Where available, just jump to package syscall's implementation of
+// these functions.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-56
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime¡entersyscall(SB)
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP) // r1
+ MOV A1, r2+40(FP) // r2
+ CALL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡RawSyscall6(SB)
+
+TEXT ¡RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP)
+ MOV A1, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
new file mode 100644
index 000000000..c46aab339
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
@@ -0,0 +1,57 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && s390x && gc
+// +build linux
+// +build s390x
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for s390x, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-56
+ BR syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-80
+ BR syscall¡Syscall6(SB)
+
+TEXT ¡SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime¡entersyscall(SB)
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ BL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-56
+ BR syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-80
+ BR syscall¡RawSyscall6(SB)
+
+TEXT ¡RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
new file mode 100644
index 000000000..5e7a1169c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
@@ -0,0 +1,30 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System call support for mips64, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ¡Syscall(SB),NOSPLIT,$0-56
+ JMP syscall¡Syscall(SB)
+
+TEXT ¡Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡Syscall6(SB)
+
+TEXT ¡Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall¡Syscall9(SB)
+
+TEXT ¡RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall¡RawSyscall(SB)
+
+TEXT ¡RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall¡RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
new file mode 100644
index 000000000..f8c5394c1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
@@ -0,0 +1,18 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
+//
+
+TEXT ¡sysvicall6(SB),NOSPLIT,$0-88
+ JMP syscall¡sysvicall6(SB)
+
+TEXT ¡rawSysvicall6(SB),NOSPLIT,$0-88
+ JMP syscall¡rawSysvicall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
new file mode 100644
index 000000000..3b54e1858
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
@@ -0,0 +1,426 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x && gc
+// +build zos
+// +build s390x
+// +build gc
+
+#include "textflag.h"
+
+#define PSALAA 1208(R0)
+#define GTAB64(x) 80(x)
+#define LCA64(x) 88(x)
+#define CAA(x) 8(x)
+#define EDCHPXV(x) 1016(x) // in the CAA
+#define SAVSTACK_ASYNC(x) 336(x) // in the LCA
+
+// SS_*, where x=SAVSTACK_ASYNC
+#define SS_LE(x) 0(x)
+#define SS_GO(x) 8(x)
+#define SS_ERRNO(x) 16(x)
+#define SS_ERRNOJR(x) 20(x)
+
+#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6
+
+TEXT ¡clearErrno(SB),NOSPLIT,$0-0
+ BL addrerrno<>(SB)
+ MOVD $0, 0(R3)
+ RET
+
+// Returns the address of errno in R3.
+TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get __errno FuncDesc.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ ADD $(0x156*16), R9
+ LMG 0(R9), R5, R6
+
+ // Switch to saved LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Call __errno function.
+ LE_CALL
+ NOPH
+
+ // Switch back to Go stack.
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+ RET
+
+TEXT ¡syscall_syscall(SB),NOSPLIT,$0-56
+ BL runtime¡entersyscall(SB)
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+32(FP)
+ MOVD R0, r2+40(FP)
+ MOVD R0, err+48(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL addrerrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+48(FP)
+done:
+ BL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡syscall_rawsyscall(SB),NOSPLIT,$0-56
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+32(FP)
+ MOVD R0, r2+40(FP)
+ MOVD R0, err+48(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL addrerrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+48(FP)
+done:
+ RET
+
+TEXT ¡syscall_syscall6(SB),NOSPLIT,$0-80
+ BL runtime¡entersyscall(SB)
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Fill in parameter list.
+ MOVD a4+32(FP), R12
+ MOVD R12, (2176+24)(R4)
+ MOVD a5+40(FP), R12
+ MOVD R12, (2176+32)(R4)
+ MOVD a6+48(FP), R12
+ MOVD R12, (2176+40)(R4)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+56(FP)
+ MOVD R0, r2+64(FP)
+ MOVD R0, err+72(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL addrerrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+72(FP)
+done:
+ BL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡syscall_rawsyscall6(SB),NOSPLIT,$0-80
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Fill in parameter list.
+ MOVD a4+32(FP), R12
+ MOVD R12, (2176+24)(R4)
+ MOVD a5+40(FP), R12
+ MOVD R12, (2176+32)(R4)
+ MOVD a6+48(FP), R12
+ MOVD R12, (2176+40)(R4)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+56(FP)
+ MOVD R0, r2+64(FP)
+ MOVD R0, err+72(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL ¡rrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+72(FP)
+done:
+ RET
+
+TEXT ¡syscall_syscall9(SB),NOSPLIT,$0
+ BL runtime¡entersyscall(SB)
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Fill in parameter list.
+ MOVD a4+32(FP), R12
+ MOVD R12, (2176+24)(R4)
+ MOVD a5+40(FP), R12
+ MOVD R12, (2176+32)(R4)
+ MOVD a6+48(FP), R12
+ MOVD R12, (2176+40)(R4)
+ MOVD a7+56(FP), R12
+ MOVD R12, (2176+48)(R4)
+ MOVD a8+64(FP), R12
+ MOVD R12, (2176+56)(R4)
+ MOVD a9+72(FP), R12
+ MOVD R12, (2176+64)(R4)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+80(FP)
+ MOVD R0, r2+88(FP)
+ MOVD R0, err+96(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL addrerrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+96(FP)
+done:
+ BL runtime¡exitsyscall(SB)
+ RET
+
+TEXT ¡syscall_rawsyscall9(SB),NOSPLIT,$0
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Fill in parameter list.
+ MOVD a4+32(FP), R12
+ MOVD R12, (2176+24)(R4)
+ MOVD a5+40(FP), R12
+ MOVD R12, (2176+32)(R4)
+ MOVD a6+48(FP), R12
+ MOVD R12, (2176+40)(R4)
+ MOVD a7+56(FP), R12
+ MOVD R12, (2176+48)(R4)
+ MOVD a8+64(FP), R12
+ MOVD R12, (2176+56)(R4)
+ MOVD a9+72(FP), R12
+ MOVD R12, (2176+64)(R4)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+80(FP)
+ MOVD R0, r2+88(FP)
+ MOVD R0, err+96(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL addrerrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+96(FP)
+done:
+ RET
+
+// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64)
+TEXT ¡svcCall(SB),NOSPLIT,$0
+ BL runtime¡save_g(SB) // Save g and stack pointer
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD R15, 0(R9)
+
+ MOVD argv+8(FP), R1 // Move function arguments into registers
+ MOVD dsa+16(FP), g
+ MOVD fnptr+0(FP), R15
+
+ BYTE $0x0D // Branch to function
+ BYTE $0xEF
+
+ BL runtime¡load_g(SB) // Restore g and stack pointer
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R15
+
+ RET
+
+// func svcLoad(name *byte) unsafe.Pointer
+TEXT ¡svcLoad(SB),NOSPLIT,$0
+ MOVD R15, R2 // Save go stack pointer
+ MOVD name+0(FP), R0 // Move SVC args into registers
+ MOVD $0x80000000, R1
+ MOVD $0, R15
+ BYTE $0x0A // SVC 08 LOAD
+ BYTE $0x08
+ MOVW R15, R3 // Save return code from SVC
+ MOVD R2, R15 // Restore go stack pointer
+ CMP R3, $0 // Check SVC return code
+ BNE error
+
+ MOVD $-2, R3 // Reset last bit of entry point to zero
+ AND R0, R3
+ MOVD R3, addr+8(FP) // Return entry point returned by SVC
+ CMP R0, R3 // Check if last bit of entry point was set
+ BNE done
+
+ MOVD R15, R2 // Save go stack pointer
+ MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08)
+ BYTE $0x0A // SVC 09 DELETE
+ BYTE $0x09
+ MOVD R2, R15 // Restore go stack pointer
+
+error:
+ MOVD $0, addr+8(FP) // Return 0 on failure
+done:
+ XOR R0, R0 // Reset r0 to 0
+ RET
+
+// func svcUnload(name *byte, fnptr unsafe.Pointer) int64
+TEXT ¡svcUnload(SB),NOSPLIT,$0
+ MOVD R15, R2 // Save go stack pointer
+ MOVD name+0(FP), R0 // Move SVC args into registers
+ MOVD addr+8(FP), R15
+ BYTE $0x0A // SVC 09
+ BYTE $0x09
+ XOR R0, R0 // Reset r0 to 0
+ MOVD R15, R1 // Save SVC return code
+ MOVD R2, R15 // Restore go stack pointer
+ MOVD R1, rc+0(FP) // Return SVC return code
+ RET
+
+// func gettid() uint64
+TEXT ¡gettid(SB), NOSPLIT, $0
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get CEECAATHDID
+ MOVD CAA(R8), R9
+ MOVD 0x3D0(R9), R9
+ MOVD R9, ret+0(FP)
+
+ RET
diff --git a/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
new file mode 100644
index 000000000..a178a6149
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Bluetooth sockets and messages
+
+package unix
+
+// Bluetooth Protocols
+const (
+ BTPROTO_L2CAP = 0
+ BTPROTO_HCI = 1
+ BTPROTO_SCO = 2
+ BTPROTO_RFCOMM = 3
+ BTPROTO_BNEP = 4
+ BTPROTO_CMTP = 5
+ BTPROTO_HIDP = 6
+ BTPROTO_AVDTP = 7
+)
+
+const (
+ HCI_CHANNEL_RAW = 0
+ HCI_CHANNEL_USER = 1
+ HCI_CHANNEL_MONITOR = 2
+ HCI_CHANNEL_CONTROL = 3
+ HCI_CHANNEL_LOGGING = 4
+)
+
+// Socketoption Level
+const (
+ SOL_BLUETOOTH = 0x112
+ SOL_HCI = 0x0
+ SOL_L2CAP = 0x6
+ SOL_RFCOMM = 0x12
+ SOL_SCO = 0x11
+)
diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go
new file mode 100644
index 000000000..0b7c6adb8
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go
@@ -0,0 +1,196 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd
+// +build freebsd
+
+package unix
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Go implementation of C mostly found in /usr/src/sys/kern/subr_capability.c
+
+const (
+ // This is the version of CapRights this package understands. See C implementation for parallels.
+ capRightsGoVersion = CAP_RIGHTS_VERSION_00
+ capArSizeMin = CAP_RIGHTS_VERSION_00 + 2
+ capArSizeMax = capRightsGoVersion + 2
+)
+
+var (
+ bit2idx = []int{
+ -1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1,
+ 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ }
+)
+
+func capidxbit(right uint64) int {
+ return int((right >> 57) & 0x1f)
+}
+
+func rightToIndex(right uint64) (int, error) {
+ idx := capidxbit(right)
+ if idx < 0 || idx >= len(bit2idx) {
+ return -2, fmt.Errorf("index for right 0x%x out of range", right)
+ }
+ return bit2idx[idx], nil
+}
+
+func caprver(right uint64) int {
+ return int(right >> 62)
+}
+
+func capver(rights *CapRights) int {
+ return caprver(rights.Rights[0])
+}
+
+func caparsize(rights *CapRights) int {
+ return capver(rights) + 2
+}
+
+// CapRightsSet sets the permissions in setrights in rights.
+func CapRightsSet(rights *CapRights, setrights []uint64) error {
+ // This is essentially a copy of cap_rights_vset()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return errors.New("bad rights size")
+ }
+
+ for _, right := range setrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return err
+ }
+ if i >= n {
+ return errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch")
+ }
+ rights.Rights[i] |= right
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch (after assign)")
+ }
+ }
+
+ return nil
+}
+
+// CapRightsClear clears the permissions in clearrights from rights.
+func CapRightsClear(rights *CapRights, clearrights []uint64) error {
+ // This is essentially a copy of cap_rights_vclear()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return errors.New("bad rights size")
+ }
+
+ for _, right := range clearrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return err
+ }
+ if i >= n {
+ return errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch")
+ }
+ rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF)
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch (after assign)")
+ }
+ }
+
+ return nil
+}
+
+// CapRightsIsSet checks whether all the permissions in setrights are present in rights.
+func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) {
+ // This is essentially a copy of cap_rights_is_vset()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return false, fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return false, errors.New("bad rights size")
+ }
+
+ for _, right := range setrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return false, errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return false, err
+ }
+ if i >= n {
+ return false, errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return false, errors.New("index mismatch")
+ }
+ if (rights.Rights[i] & right) != right {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func capright(idx uint64, bit uint64) uint64 {
+ return ((1 << (57 + idx)) | bit)
+}
+
+// CapRightsInit returns a pointer to an initialised CapRights structure filled with rights.
+// See man cap_rights_init(3) and rights(4).
+func CapRightsInit(rights []uint64) (*CapRights, error) {
+ var r CapRights
+ r.Rights[0] = (capRightsGoVersion << 62) | capright(0, 0)
+ r.Rights[1] = capright(1, 0)
+
+ err := CapRightsSet(&r, rights)
+ if err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// CapRightsLimit reduces the operations permitted on fd to at most those contained in rights.
+// The capability rights on fd can never be increased by CapRightsLimit.
+// See man cap_rights_limit(2) and rights(4).
+func CapRightsLimit(fd uintptr, rights *CapRights) error {
+ return capRightsLimit(int(fd), rights)
+}
+
+// CapRightsGet returns a CapRights structure containing the operations permitted on fd.
+// See man cap_rights_get(3) and rights(4).
+func CapRightsGet(fd uintptr) (*CapRights, error) {
+ r, err := CapRightsInit(nil)
+ if err != nil {
+ return nil, err
+ }
+ err = capRightsGet(capRightsGoVersion, int(fd), r)
+ if err != nil {
+ return nil, err
+ }
+ return r, nil
+}
diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go
new file mode 100644
index 000000000..394a3965b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/constants.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+package unix
+
+const (
+ R_OK = 0x4
+ W_OK = 0x2
+ X_OK = 0x1
+)
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
new file mode 100644
index 000000000..65a998508
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix && ppc
+// +build aix,ppc
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by AIX.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 16) & 0xffff)
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff)
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return uint64(((major) << 16) | (minor))
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
new file mode 100644
index 000000000..8fc08ad0a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix && ppc64
+// +build aix,ppc64
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used AIX.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x3fffffff00000000) >> 32)
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ return uint32((dev & 0x00000000ffffffff) >> 0)
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ var DEVNO64 uint64
+ DEVNO64 = 0x8000000000000000
+ return ((uint64(major) << 32) | (uint64(minor) & 0x00000000FFFFFFFF) | DEVNO64)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_darwin.go b/vendor/golang.org/x/sys/unix/dev_darwin.go
new file mode 100644
index 000000000..8d1dc0fa3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_darwin.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Darwin's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a Darwin device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 24) & 0xff)
+}
+
+// Minor returns the minor component of a Darwin device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffffff)
+}
+
+// Mkdev returns a Darwin device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 24) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
new file mode 100644
index 000000000..8502f202c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Dragonfly's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a DragonFlyBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a DragonFlyBSD device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a DragonFlyBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_freebsd.go b/vendor/golang.org/x/sys/unix/dev_freebsd.go
new file mode 100644
index 000000000..eba3b4bd3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_freebsd.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in FreeBSD's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a FreeBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a FreeBSD device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a FreeBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_linux.go b/vendor/golang.org/x/sys/unix/dev_linux.go
new file mode 100644
index 000000000..d165d6f30
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_linux.go
@@ -0,0 +1,42 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by the Linux kernel and glibc.
+//
+// The information below is extracted and adapted from bits/sysmacros.h in the
+// glibc sources:
+//
+// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
+// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
+// number and m is a hex digit of the minor number. This is backward compatible
+// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
+// backward compatible with the Linux kernel, which for some architectures uses
+// 32-bit dev_t, encoded as mmmM MMmm.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ major := uint32((dev & 0x00000000000fff00) >> 8)
+ major |= uint32((dev & 0xfffff00000000000) >> 32)
+ return major
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x00000000000000ff) >> 0)
+ minor |= uint32((dev & 0x00000ffffff00000) >> 12)
+ return minor
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) & 0x00000fff) << 8
+ dev |= (uint64(major) & 0xfffff000) << 32
+ dev |= (uint64(minor) & 0x000000ff) << 0
+ dev |= (uint64(minor) & 0xffffff00) << 12
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd.go b/vendor/golang.org/x/sys/unix/dev_netbsd.go
new file mode 100644
index 000000000..b4a203d0c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_netbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in NetBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a NetBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x000fff00) >> 8)
+}
+
+// Minor returns the minor component of a NetBSD device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xfff00000) >> 12)
+ return minor
+}
+
+// Mkdev returns a NetBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) << 8) & 0x000fff00
+ dev |= (uint64(minor) << 12) & 0xfff00000
+ dev |= (uint64(minor) << 0) & 0x000000ff
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd.go b/vendor/golang.org/x/sys/unix/dev_openbsd.go
new file mode 100644
index 000000000..f3430c42f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_openbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in OpenBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of an OpenBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x0000ff00) >> 8)
+}
+
+// Minor returns the minor component of an OpenBSD device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xffff0000) >> 8)
+ return minor
+}
+
+// Mkdev returns an OpenBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) << 8) & 0x0000ff00
+ dev |= (uint64(minor) << 8) & 0xffff0000
+ dev |= (uint64(minor) << 0) & 0x000000ff
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go
new file mode 100644
index 000000000..a388e59a0
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_zos.go
@@ -0,0 +1,29 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+// +build zos,s390x
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by z/OS.
+//
+// The information below is extracted and adapted from macros.
+
+package unix
+
+// Major returns the major component of a z/OS device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 16) & 0x0000FFFF)
+}
+
+// Minor returns the minor component of a z/OS device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0x0000FFFF)
+}
+
+// Mkdev returns a z/OS device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 16) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go
new file mode 100644
index 000000000..e74e5eaa3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dirent.go
@@ -0,0 +1,103 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+import "unsafe"
+
+// readInt returns the size-bytes unsigned integer in native byte order at offset off.
+func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
+ if len(b) < int(off+size) {
+ return 0, false
+ }
+ if isBigEndian {
+ return readIntBE(b[off:], size), true
+ }
+ return readIntLE(b[off:], size), true
+}
+
+func readIntBE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[1]) | uint64(b[0])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+func readIntLE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number of
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ count = 0
+ for max != 0 && len(buf) > 0 {
+ reclen, ok := direntReclen(buf)
+ if !ok || reclen > uint64(len(buf)) {
+ return origlen, count, names
+ }
+ rec := buf[:reclen]
+ buf = buf[reclen:]
+ ino, ok := direntIno(rec)
+ if !ok {
+ break
+ }
+ if ino == 0 { // File absent in directory.
+ continue
+ }
+ const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
+ namlen, ok := direntNamlen(rec)
+ if !ok || namoff+namlen > uint64(len(rec)) {
+ break
+ }
+ name := rec[namoff : namoff+namlen]
+ for i, c := range name {
+ if c == 0 {
+ name = name[:i]
+ break
+ }
+ }
+ // Check for useless names before allocating a string.
+ if string(name) == "." || string(name) == ".." {
+ continue
+ }
+ max--
+ count++
+ names = append(names, string(name))
+ }
+ return origlen - len(buf), count, names
+}
diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go
new file mode 100644
index 000000000..a52026557
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/endian_big.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
+// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64
+
+package unix
+
+const isBigEndian = true
diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go
new file mode 100644
index 000000000..4362f47e2
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/endian_little.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
+// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
+
+package unix
+
+const isBigEndian = false
diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go
new file mode 100644
index 000000000..29ccc4d13
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/env_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+// Unix environment variables.
+
+package unix
+
+import "syscall"
+
+func Getenv(key string) (value string, found bool) {
+ return syscall.Getenv(key)
+}
+
+func Setenv(key, value string) error {
+ return syscall.Setenv(key, value)
+}
+
+func Clearenv() {
+ syscall.Clearenv()
+}
+
+func Environ() []string {
+ return syscall.Environ()
+}
+
+func Unsetenv(key string) error {
+ return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go
new file mode 100644
index 000000000..cedaf7e02
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/epoll_zos.go
@@ -0,0 +1,221 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+// +build zos,s390x
+
+package unix
+
+import (
+ "sync"
+)
+
+// This file simulates epoll on z/OS using poll.
+
+// Analogous to epoll_event on Linux.
+// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove?
+type EpollEvent struct {
+ Events uint32
+ Fd int32
+ Pad int32
+}
+
+const (
+ EPOLLERR = 0x8
+ EPOLLHUP = 0x10
+ EPOLLIN = 0x1
+ EPOLLMSG = 0x400
+ EPOLLOUT = 0x4
+ EPOLLPRI = 0x2
+ EPOLLRDBAND = 0x80
+ EPOLLRDNORM = 0x40
+ EPOLLWRBAND = 0x200
+ EPOLLWRNORM = 0x100
+ EPOLL_CTL_ADD = 0x1
+ EPOLL_CTL_DEL = 0x2
+ EPOLL_CTL_MOD = 0x3
+ // The following constants are part of the epoll API, but represent
+ // currently unsupported functionality on z/OS.
+ // EPOLL_CLOEXEC = 0x80000
+ // EPOLLET = 0x80000000
+ // EPOLLONESHOT = 0x40000000
+ // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis
+ // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode
+ // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability
+)
+
+// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL
+// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16).
+
+// epToPollEvt converts epoll event field to poll equivalent.
+// In epoll, Events is a 32-bit field, while poll uses 16 bits.
+func epToPollEvt(events uint32) int16 {
+ var ep2p = map[uint32]int16{
+ EPOLLIN: POLLIN,
+ EPOLLOUT: POLLOUT,
+ EPOLLHUP: POLLHUP,
+ EPOLLPRI: POLLPRI,
+ EPOLLERR: POLLERR,
+ }
+
+ var pollEvts int16 = 0
+ for epEvt, pEvt := range ep2p {
+ if (events & epEvt) != 0 {
+ pollEvts |= pEvt
+ }
+ }
+
+ return pollEvts
+}
+
+// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields.
+func pToEpollEvt(revents int16) uint32 {
+ var p2ep = map[int16]uint32{
+ POLLIN: EPOLLIN,
+ POLLOUT: EPOLLOUT,
+ POLLHUP: EPOLLHUP,
+ POLLPRI: EPOLLPRI,
+ POLLERR: EPOLLERR,
+ }
+
+ var epollEvts uint32 = 0
+ for pEvt, epEvt := range p2ep {
+ if (revents & pEvt) != 0 {
+ epollEvts |= epEvt
+ }
+ }
+
+ return epollEvts
+}
+
+// Per-process epoll implementation.
+type epollImpl struct {
+ mu sync.Mutex
+ epfd2ep map[int]*eventPoll
+ nextEpfd int
+}
+
+// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances.
+// On Linux, this is an in-kernel data structure accessed through a fd.
+type eventPoll struct {
+ mu sync.Mutex
+ fds map[int]*EpollEvent
+}
+
+// epoll impl for this process.
+var impl epollImpl = epollImpl{
+ epfd2ep: make(map[int]*eventPoll),
+ nextEpfd: 0,
+}
+
+func (e *epollImpl) epollcreate(size int) (epfd int, err error) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ epfd = e.nextEpfd
+ e.nextEpfd++
+
+ e.epfd2ep[epfd] = &eventPoll{
+ fds: make(map[int]*EpollEvent),
+ }
+ return epfd, nil
+}
+
+func (e *epollImpl) epollcreate1(flag int) (fd int, err error) {
+ return e.epollcreate(4)
+}
+
+func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ ep, ok := e.epfd2ep[epfd]
+ if !ok {
+
+ return EBADF
+ }
+
+ switch op {
+ case EPOLL_CTL_ADD:
+ // TODO(neeilan): When we make epfds and fds disjoint, detect epoll
+ // loops here (instances watching each other) and return ELOOP.
+ if _, ok := ep.fds[fd]; ok {
+ return EEXIST
+ }
+ ep.fds[fd] = event
+ case EPOLL_CTL_MOD:
+ if _, ok := ep.fds[fd]; !ok {
+ return ENOENT
+ }
+ ep.fds[fd] = event
+ case EPOLL_CTL_DEL:
+ if _, ok := ep.fds[fd]; !ok {
+ return ENOENT
+ }
+ delete(ep.fds, fd)
+
+ }
+ return nil
+}
+
+// Must be called while holding ep.mu
+func (ep *eventPoll) getFds() []int {
+ fds := make([]int, len(ep.fds))
+ for fd := range ep.fds {
+ fds = append(fds, fd)
+ }
+ return fds
+}
+
+func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait
+ ep, ok := e.epfd2ep[epfd]
+
+ if !ok {
+ e.mu.Unlock()
+ return 0, EBADF
+ }
+
+ pollfds := make([]PollFd, 4)
+ for fd, epollevt := range ep.fds {
+ pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)})
+ }
+ e.mu.Unlock()
+
+ n, err = Poll(pollfds, msec)
+ if err != nil {
+ return n, err
+ }
+
+ i := 0
+ for _, pFd := range pollfds {
+ if pFd.Revents != 0 {
+ events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)}
+ i++
+ }
+
+ if i == n {
+ break
+ }
+ }
+
+ return n, nil
+}
+
+func EpollCreate(size int) (fd int, err error) {
+ return impl.epollcreate(size)
+}
+
+func EpollCreate1(flag int) (fd int, err error) {
+ return impl.epollcreate1(flag)
+}
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ return impl.epollctl(epfd, op, fd, event)
+}
+
+// Because EpollWait mutates events, the caller is expected to coordinate
+// concurrent access if calling with the same epfd from multiple goroutines.
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ return impl.epollwait(epfd, events, msec)
+}
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go
new file mode 100644
index 000000000..761db66ef
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go
@@ -0,0 +1,233 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
+// them here for backwards compatibility.
+
+package unix
+
+const (
+ DLT_HHDLC = 0x79
+ IFF_SMART = 0x20
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf2
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_IPXIP = 0xf9
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IPV6_MIN_MEMBERSHIPS = 0x1f
+ IP_FAITH = 0x16
+ IP_MAX_SOURCE_FILTER = 0x400
+ IP_MIN_MEMBERSHIPS = 0x1f
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ RT_CACHING_CONTEXT = 0x1
+ RT_NORTREF = 0x2
+ SIOCADDRT = 0x8030720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8030720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
new file mode 100644
index 000000000..070f44b65
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
@@ -0,0 +1,233 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
+// them here for backwards compatibility.
+
+package unix
+
+const (
+ DLT_HHDLC = 0x79
+ IFF_SMART = 0x20
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf2
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_IPXIP = 0xf9
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IPV6_MIN_MEMBERSHIPS = 0x1f
+ IP_FAITH = 0x16
+ IP_MAX_SOURCE_FILTER = 0x400
+ IP_MIN_MEMBERSHIPS = 0x1f
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ RT_CACHING_CONTEXT = 0x1
+ RT_NORTREF = 0x2
+ SIOCADDRT = 0x8040720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8040720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
new file mode 100644
index 000000000..856dca325
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
@@ -0,0 +1,226 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+
+ // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go
+ IFF_SMART = 0x20
+ IFT_FAITH = 0xf2
+ IFT_IPXIP = 0xf9
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IP_FAITH = 0x16
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ SIOCADDRT = 0x8030720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8030720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go
new file mode 100644
index 000000000..946dcf3fc
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
+// them here for backwards compatibility.
+
+package unix
+
+const (
+ DLT_HHDLC = 0x79
+ IPV6_MIN_MEMBERSHIPS = 0x1f
+ IP_MAX_SOURCE_FILTER = 0x400
+ IP_MIN_MEMBERSHIPS = 0x1f
+ RT_CACHING_CONTEXT = 0x1
+ RT_NORTREF = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go
new file mode 100644
index 000000000..e9b991258
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux || netbsd || openbsd
+// +build dragonfly freebsd linux netbsd openbsd
+
+package unix
+
+import "unsafe"
+
+// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
+// systems by fcntl_linux_32bit.go to be SYS_FCNTL64.
+var fcntl64Syscall uintptr = SYS_FCNTL
+
+func fcntl(fd int, cmd, arg int) (int, error) {
+ valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg))
+ var err error
+ if errno != 0 {
+ err = errno
+ }
+ return int(valptr), err
+}
+
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return fcntl(int(fd), cmd, arg)
+}
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
+ if errno == 0 {
+ return nil
+ }
+ return errno
+}
diff --git a/vendor/golang.org/x/sys/unix/fcntl_darwin.go b/vendor/golang.org/x/sys/unix/fcntl_darwin.go
new file mode 100644
index 000000000..a9911c7c1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl_darwin.go
@@ -0,0 +1,24 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "unsafe"
+
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return fcntl(int(fd), cmd, arg)
+}
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(lk))))
+ return err
+}
+
+// FcntlFstore performs a fcntl syscall for the F_PREALLOCATE command.
+func FcntlFstore(fd uintptr, cmd int, fstore *Fstore_t) error {
+ _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(fstore))))
+ return err
+}
diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
new file mode 100644
index 000000000..29d44808b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
@@ -0,0 +1,14 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc)
+// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc
+
+package unix
+
+func init() {
+ // On 32-bit Linux systems, the fcntl syscall that matches Go's
+ // Flock_t type is SYS_FCNTL64, not SYS_FCNTL.
+ fcntl64Syscall = SYS_FCNTL64
+}
diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go
new file mode 100644
index 000000000..a8068f94f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fdset.go
@@ -0,0 +1,30 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+package unix
+
+// Set adds fd to the set fds.
+func (fds *FdSet) Set(fd int) {
+ fds.Bits[fd/NFDBITS] |= (1 << (uintptr(fd) % NFDBITS))
+}
+
+// Clear removes fd from the set fds.
+func (fds *FdSet) Clear(fd int) {
+ fds.Bits[fd/NFDBITS] &^= (1 << (uintptr(fd) % NFDBITS))
+}
+
+// IsSet returns whether fd is in the set fds.
+func (fds *FdSet) IsSet(fd int) bool {
+ return fds.Bits[fd/NFDBITS]&(1<<(uintptr(fd)%NFDBITS)) != 0
+}
+
+// Zero clears the set fds.
+func (fds *FdSet) Zero() {
+ for i := range fds.Bits {
+ fds.Bits[i] = 0
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go
new file mode 100644
index 000000000..e377cc9f4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go
@@ -0,0 +1,164 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+// +build zos,s390x
+
+package unix
+
+import (
+ "unsafe"
+)
+
+// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent.
+
+func Fstatfs(fd int, stat *Statfs_t) (err error) {
+ var stat_v Statvfs_t
+ err = Fstatvfs(fd, &stat_v)
+ if err == nil {
+ // populate stat
+ stat.Type = 0
+ stat.Bsize = stat_v.Bsize
+ stat.Blocks = stat_v.Blocks
+ stat.Bfree = stat_v.Bfree
+ stat.Bavail = stat_v.Bavail
+ stat.Files = stat_v.Files
+ stat.Ffree = stat_v.Ffree
+ stat.Fsid = stat_v.Fsid
+ stat.Namelen = stat_v.Namemax
+ stat.Frsize = stat_v.Frsize
+ stat.Flags = stat_v.Flag
+ for passn := 0; passn < 5; passn++ {
+ switch passn {
+ case 0:
+ err = tryGetmntent64(stat)
+ break
+ case 1:
+ err = tryGetmntent128(stat)
+ break
+ case 2:
+ err = tryGetmntent256(stat)
+ break
+ case 3:
+ err = tryGetmntent512(stat)
+ break
+ case 4:
+ err = tryGetmntent1024(stat)
+ break
+ default:
+ break
+ }
+ //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred)
+ if err == nil || err != nil && err != ERANGE {
+ break
+ }
+ }
+ }
+ return err
+}
+
+func tryGetmntent64(stat *Statfs_t) (err error) {
+ var mnt_ent_buffer struct {
+ header W_Mnth
+ filesys_info [64]W_Mntent
+ }
+ var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
+ fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
+ if err != nil {
+ return err
+ }
+ err = ERANGE //return ERANGE if no match is found in this batch
+ for i := 0; i < fs_count; i++ {
+ if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
+ stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
+ err = nil
+ break
+ }
+ }
+ return err
+}
+
+func tryGetmntent128(stat *Statfs_t) (err error) {
+ var mnt_ent_buffer struct {
+ header W_Mnth
+ filesys_info [128]W_Mntent
+ }
+ var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
+ fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
+ if err != nil {
+ return err
+ }
+ err = ERANGE //return ERANGE if no match is found in this batch
+ for i := 0; i < fs_count; i++ {
+ if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
+ stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
+ err = nil
+ break
+ }
+ }
+ return err
+}
+
+func tryGetmntent256(stat *Statfs_t) (err error) {
+ var mnt_ent_buffer struct {
+ header W_Mnth
+ filesys_info [256]W_Mntent
+ }
+ var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
+ fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
+ if err != nil {
+ return err
+ }
+ err = ERANGE //return ERANGE if no match is found in this batch
+ for i := 0; i < fs_count; i++ {
+ if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
+ stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
+ err = nil
+ break
+ }
+ }
+ return err
+}
+
+func tryGetmntent512(stat *Statfs_t) (err error) {
+ var mnt_ent_buffer struct {
+ header W_Mnth
+ filesys_info [512]W_Mntent
+ }
+ var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
+ fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
+ if err != nil {
+ return err
+ }
+ err = ERANGE //return ERANGE if no match is found in this batch
+ for i := 0; i < fs_count; i++ {
+ if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
+ stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
+ err = nil
+ break
+ }
+ }
+ return err
+}
+
+func tryGetmntent1024(stat *Statfs_t) (err error) {
+ var mnt_ent_buffer struct {
+ header W_Mnth
+ filesys_info [1024]W_Mntent
+ }
+ var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
+ fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
+ if err != nil {
+ return err
+ }
+ err = ERANGE //return ERANGE if no match is found in this batch
+ for i := 0; i < fs_count; i++ {
+ if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
+ stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
+ err = nil
+ break
+ }
+ }
+ return err
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go
new file mode 100644
index 000000000..0dee23222
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo && !aix
+// +build gccgo,!aix
+
+package unix
+
+import "syscall"
+
+// We can't use the gc-syntax .s files for gccgo. On the plus side
+// much of the functionality can be written directly in Go.
+
+func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
+
+func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
+
+func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ syscall.Entersyscall()
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0
+}
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0
+}
+
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c
new file mode 100644
index 000000000..2cb1fefac
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -0,0 +1,45 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo
+// +build !aix
+
+#include
+#include
+#include
+
+#define _STRINGIFY2_(x) #x
+#define _STRINGIFY_(x) _STRINGIFY2_(x)
+#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
+
+// Call syscall from C code because the gccgo support for calling from
+// Go to C does not support varargs functions.
+
+struct ret {
+ uintptr_t r;
+ uintptr_t err;
+};
+
+struct ret gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+ __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscall");
+
+struct ret
+gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ struct ret r;
+
+ errno = 0;
+ r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ r.err = errno;
+ return r;
+}
+
+uintptr_t gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+ __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscallNoError");
+
+uintptr_t
+gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
new file mode 100644
index 000000000..e60e49a3d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
@@ -0,0 +1,21 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo && linux && amd64
+// +build gccgo,linux,amd64
+
+package unix
+
+import "syscall"
+
+//extern gettimeofday
+func realGettimeofday(*Timeval, *byte) int32
+
+func gettimeofday(tv *Timeval) (err syscall.Errno) {
+ r := realGettimeofday(tv, nil)
+ if r < 0 {
+ return syscall.GetErrno()
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go
new file mode 100644
index 000000000..6c7ad052e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl.go
@@ -0,0 +1,75 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req uint, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetPointerInt performs an ioctl operation which sets an
+// integer value on fd, using the specified request number. The ioctl
+// argument is called with a pointer to the integer value, rather than
+// passing the integer value directly.
+func IoctlSetPointerInt(fd int, req uint, value int) error {
+ v := int32(value)
+ return ioctl(fd, req, uintptr(unsafe.Pointer(&v)))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value will usually be TCSETA or TIOCSETA.
+func IoctlSetTermios(fd int, req uint, value *Termios) error {
+ // TODO: if we get the chance, remove the req parameter.
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req uint) (int, error) {
+ var value int
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
+ var value Winsize
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlGetTermios(fd int, req uint) (*Termios, error) {
+ var value Termios
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go
new file mode 100644
index 000000000..48773f730
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go
@@ -0,0 +1,196 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// IoctlRetInt performs an ioctl operation specified by req on a device
+// associated with opened file descriptor fd, and returns a non-negative
+// integer that is returned by the ioctl syscall.
+func IoctlRetInt(fd int, req uint) (int, error) {
+ ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(ret), nil
+}
+
+func IoctlGetUint32(fd int, req uint) (uint32, error) {
+ var value uint32
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return value, err
+}
+
+func IoctlGetRTCTime(fd int) (*RTCTime, error) {
+ var value RTCTime
+ err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlSetRTCTime(fd int, value *RTCTime) error {
+ err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) {
+ var value RTCWkAlrm
+ err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error {
+ err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+type ifreqEthtool struct {
+ name [IFNAMSIZ]byte
+ data unsafe.Pointer
+}
+
+// IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network
+// device specified by ifname.
+func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
+ // Leave room for terminating NULL byte.
+ if len(ifname) >= IFNAMSIZ {
+ return nil, EINVAL
+ }
+
+ value := EthtoolDrvinfo{
+ Cmd: ETHTOOL_GDRVINFO,
+ }
+ ifreq := ifreqEthtool{
+ data: unsafe.Pointer(&value),
+ }
+ copy(ifreq.name[:], ifname)
+ err := ioctl(fd, SIOCETHTOOL, uintptr(unsafe.Pointer(&ifreq)))
+ runtime.KeepAlive(ifreq)
+ return &value, err
+}
+
+// IoctlGetWatchdogInfo fetches information about a watchdog device from the
+// Linux watchdog API. For more information, see:
+// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
+func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) {
+ var value WatchdogInfo
+ err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For
+// more information, see:
+// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
+func IoctlWatchdogKeepalive(fd int) error {
+ return ioctl(fd, WDIOC_KEEPALIVE, 0)
+}
+
+// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the
+// range of data conveyed in value to the file associated with the file
+// descriptor destFd. See the ioctl_ficlonerange(2) man page for details.
+func IoctlFileCloneRange(destFd int, value *FileCloneRange) error {
+ err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file
+// associated with the file description srcFd to the file associated with the
+// file descriptor destFd. See the ioctl_ficlone(2) man page for details.
+func IoctlFileClone(destFd, srcFd int) error {
+ return ioctl(destFd, FICLONE, uintptr(srcFd))
+}
+
+type FileDedupeRange struct {
+ Src_offset uint64
+ Src_length uint64
+ Reserved1 uint16
+ Reserved2 uint32
+ Info []FileDedupeRangeInfo
+}
+
+type FileDedupeRangeInfo struct {
+ Dest_fd int64
+ Dest_offset uint64
+ Bytes_deduped uint64
+ Status int32
+ Reserved uint32
+}
+
+// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the
+// range of data conveyed in value from the file associated with the file
+// descriptor srcFd to the value.Info destinations. See the
+// ioctl_fideduperange(2) man page for details.
+func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error {
+ buf := make([]byte, SizeofRawFileDedupeRange+
+ len(value.Info)*SizeofRawFileDedupeRangeInfo)
+ rawrange := (*RawFileDedupeRange)(unsafe.Pointer(&buf[0]))
+ rawrange.Src_offset = value.Src_offset
+ rawrange.Src_length = value.Src_length
+ rawrange.Dest_count = uint16(len(value.Info))
+ rawrange.Reserved1 = value.Reserved1
+ rawrange.Reserved2 = value.Reserved2
+
+ for i := range value.Info {
+ rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
+ uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
+ uintptr(i*SizeofRawFileDedupeRangeInfo)))
+ rawinfo.Dest_fd = value.Info[i].Dest_fd
+ rawinfo.Dest_offset = value.Info[i].Dest_offset
+ rawinfo.Bytes_deduped = value.Info[i].Bytes_deduped
+ rawinfo.Status = value.Info[i].Status
+ rawinfo.Reserved = value.Info[i].Reserved
+ }
+
+ err := ioctl(srcFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(&buf[0])))
+
+ // Output
+ for i := range value.Info {
+ rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
+ uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
+ uintptr(i*SizeofRawFileDedupeRangeInfo)))
+ value.Info[i].Dest_fd = rawinfo.Dest_fd
+ value.Info[i].Dest_offset = rawinfo.Dest_offset
+ value.Info[i].Bytes_deduped = rawinfo.Bytes_deduped
+ value.Info[i].Status = rawinfo.Status
+ value.Info[i].Reserved = rawinfo.Reserved
+ }
+
+ return err
+}
+
+func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error {
+ err := ioctl(fd, HIDIOCGRDESC, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) {
+ var value HIDRawDevInfo
+ err := ioctl(fd, HIDIOCGRAWINFO, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlHIDGetRawName(fd int) (string, error) {
+ var value [_HIDIOCGRAWNAME_LEN]byte
+ err := ioctl(fd, _HIDIOCGRAWNAME, uintptr(unsafe.Pointer(&value[0])))
+ return ByteSliceToString(value[:]), err
+}
+
+func IoctlHIDGetRawPhys(fd int) (string, error) {
+ var value [_HIDIOCGRAWPHYS_LEN]byte
+ err := ioctl(fd, _HIDIOCGRAWPHYS, uintptr(unsafe.Pointer(&value[0])))
+ return ByteSliceToString(value[:]), err
+}
+
+func IoctlHIDGetRawUniq(fd int) (string, error) {
+ var value [_HIDIOCGRAWUNIQ_LEN]byte
+ err := ioctl(fd, _HIDIOCGRAWUNIQ, uintptr(unsafe.Pointer(&value[0])))
+ return ByteSliceToString(value[:]), err
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go
new file mode 100644
index 000000000..5384e7d91
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go
@@ -0,0 +1,74 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+// +build zos,s390x
+
+package unix
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req uint, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value is expected to be TCSETS, TCSETSW, or TCSETSF
+func IoctlSetTermios(fd int, req uint, value *Termios) error {
+ if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) {
+ return ENOSYS
+ }
+ err := Tcsetattr(fd, int(req), value)
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req uint) (int, error) {
+ var value int
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
+ var value Winsize
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+// IoctlGetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value is expected to be TCGETS
+func IoctlGetTermios(fd int, req uint) (*Termios, error) {
+ var value Termios
+ if req != TCGETS {
+ return &value, ENOSYS
+ }
+ err := Tcgetattr(fd, &value)
+ return &value, err
+}
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
new file mode 100644
index 000000000..396aadf86
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -0,0 +1,231 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This script runs or (given -n) prints suggested commands to generate files for
+# the Architecture/OS specified by the GOARCH and GOOS environment variables.
+# See README.md for more information about how the build system works.
+
+GOOSARCH="${GOOS}_${GOARCH}"
+
+# defaults
+mksyscall="go run mksyscall.go"
+mkerrors="./mkerrors.sh"
+zerrors="zerrors_$GOOSARCH.go"
+mksysctl=""
+zsysctl="zsysctl_$GOOSARCH.go"
+mksysnum=
+mktypes=
+mkasm=
+run="sh"
+cmd=""
+
+case "$1" in
+-syscalls)
+ for i in zsyscall*go
+ do
+ # Run the command line that appears in the first line
+ # of the generated file to regenerate it.
+ sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
+ rm _$i
+ done
+ exit 0
+ ;;
+-n)
+ run="cat"
+ cmd="echo"
+ shift
+esac
+
+case "$#" in
+0)
+ ;;
+*)
+ echo 'usage: mkall.sh [-n]' 1>&2
+ exit 2
+esac
+
+if [[ "$GOOS" = "linux" ]]; then
+ # Use the Docker-based build system
+ # Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ $cmd docker build --tag generate:$GOOS $GOOS
+ $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS
+ exit
+fi
+
+GOOSARCH_in=syscall_$GOOSARCH.go
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+aix_ppc)
+ mkerrors="$mkerrors -maix32"
+ mksyscall="go run mksyscall_aix_ppc.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+aix_ppc64)
+ mkerrors="$mkerrors -maix64"
+ mksyscall="go run mksyscall_aix_ppc64.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_amd64)
+ mkerrors="$mkerrors -m64"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+darwin_arm64)
+ mkerrors="$mkerrors -m64"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+dragonfly_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -dragonfly"
+ mksysnum="go run mksysnum.go 'https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -arm"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+freebsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+netbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32 -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -netbsd -arm"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+netbsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32 -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -openbsd -arm"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_mips64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+solaris_amd64)
+ mksyscall="go run mksyscall_solaris.go"
+ mkerrors="$mkerrors -m64"
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+illumos_amd64)
+ mksyscall="go run mksyscall_solaris.go"
+ mkerrors=
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+*)
+ echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+(
+ if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
+ case "$GOOS" in
+ *)
+ syscall_goos="syscall_$GOOS.go"
+ case "$GOOS" in
+ darwin | dragonfly | freebsd | netbsd | openbsd)
+ syscall_goos="syscall_bsd.go $syscall_goos"
+ ;;
+ esac
+ if [ -n "$mksyscall" ]; then
+ if [ "$GOOSARCH" == "aix_ppc64" ]; then
+ # aix/ppc64 script generates files instead of writing to stdin.
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
+ elif [ "$GOOS" == "darwin" ]; then
+ # 1.12 and later, syscalls via libSystem
+ echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ # 1.13 and later, syscalls via libSystem (including syscallPtr)
+ echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go";
+ elif [ "$GOOS" == "illumos" ]; then
+ # illumos code generation requires a --illumos switch
+ echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go";
+ # illumos implies solaris, so solaris code generation is also required
+ echo "$mksyscall -tags solaris,$GOARCH syscall_solaris.go syscall_solaris_$GOARCH.go |gofmt >zsyscall_solaris_$GOARCH.go";
+ else
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ fi
+ fi
+ esac
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi
+ if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi
+) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
new file mode 100644
index 000000000..3f670faba
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -0,0 +1,758 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Generate Go code listing errors and other #defined constant
+# values (ENAMETOOLONG etc.), by asking the preprocessor
+# about the definitions.
+
+unset LANG
+export LC_ALL=C
+export LC_CTYPE=C
+
+if test -z "$GOARCH" -o -z "$GOOS"; then
+ echo 1>&2 "GOARCH or GOOS not defined in environment"
+ exit 1
+fi
+
+# Check that we are using the new build system if we should
+if [[ "$GOOS" = "linux" ]] && [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
+ echo 1>&2 "In the Docker based build system, mkerrors should not be called directly."
+ echo 1>&2 "See README.md"
+ exit 1
+fi
+
+if [[ "$GOOS" = "aix" ]]; then
+ CC=${CC:-gcc}
+else
+ CC=${CC:-cc}
+fi
+
+if [[ "$GOOS" = "solaris" ]]; then
+ # Assumes GNU versions of utilities in PATH.
+ export PATH=/usr/gnu/bin:$PATH
+fi
+
+uname=$(uname)
+
+includes_AIX='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define AF_LOCAL AF_UNIX
+'
+
+includes_Darwin='
+#define _DARWIN_C_SOURCE
+#define KERNEL
+#define _DARWIN_USE_64_BIT_INODE
+#define __APPLE_USE_RFC_3542
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+includes_DragonFly='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+includes_FreeBSD='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#if __FreeBSD__ >= 10
+#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10
+#undef SIOCAIFADDR
+#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data
+#undef SIOCSIFPHYADDR
+#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data
+#endif
+'
+
+includes_Linux='
+#define _LARGEFILE_SOURCE
+#define _LARGEFILE64_SOURCE
+#ifndef __LP64__
+#define _FILE_OFFSET_BITS 64
+#endif
+#define _GNU_SOURCE
+
+// is broken on powerpc64, as it fails to include definitions of
+// these structures. We just include them copied from .
+#if defined(__powerpc__)
+struct sgttyb {
+ char sg_ispeed;
+ char sg_ospeed;
+ char sg_erase;
+ char sg_kill;
+ short sg_flags;
+};
+
+struct tchars {
+ char t_intrc;
+ char t_quitc;
+ char t_startc;
+ char t_stopc;
+ char t_eofc;
+ char t_brkc;
+};
+
+struct ltchars {
+ char t_suspc;
+ char t_dsuspc;
+ char t_rprntc;
+ char t_flushc;
+ char t_werasc;
+ char t_lnextc;
+};
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#if defined(__sparc__)
+// On sparc{,64}, the kernel defines struct termios2 itself which clashes with the
+// definition in glibc. As only the error constants are needed here, include the
+// generic termibits.h (which is included by termbits.h on sparc).
+#include
+#else
+#include
+#endif
+
+#ifndef MSG_FASTOPEN
+#define MSG_FASTOPEN 0x20000000
+#endif
+
+#ifndef PTRACE_GETREGS
+#define PTRACE_GETREGS 0xc
+#endif
+
+#ifndef PTRACE_SETREGS
+#define PTRACE_SETREGS 0xd
+#endif
+
+#ifndef SOL_NETLINK
+#define SOL_NETLINK 270
+#endif
+
+#ifdef SOL_BLUETOOTH
+// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
+// but it is already in bluetooth_linux.go
+#undef SOL_BLUETOOTH
+#endif
+
+// Certain constants are missing from the fs/crypto UAPI
+#define FS_KEY_DESC_PREFIX "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE 8
+#define FS_MAX_KEY_SIZE 64
+
+// The code generator produces -0x1 for (~0), but an unsigned value is necessary
+// for the tipc_subscr timeout __u32 field.
+#undef TIPC_WAIT_FOREVER
+#define TIPC_WAIT_FOREVER 0xffffffff
+
+// Copied from linux/l2tp.h
+// Including linux/l2tp.h here causes conflicts between linux/in.h
+// and netinet/in.h included via net/route.h above.
+#define IPPROTO_L2TP 115
+
+// Copied from linux/hid.h.
+// Keep in sync with the size of the referenced fields.
+#define _HIDIOCGRAWNAME_LEN 128 // sizeof_field(struct hid_device, name)
+#define _HIDIOCGRAWPHYS_LEN 64 // sizeof_field(struct hid_device, phys)
+#define _HIDIOCGRAWUNIQ_LEN 64 // sizeof_field(struct hid_device, uniq)
+
+#define _HIDIOCGRAWNAME HIDIOCGRAWNAME(_HIDIOCGRAWNAME_LEN)
+#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN)
+#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN)
+
+'
+
+includes_NetBSD='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include