diff --git a/.gitignore b/.gitignore
index 60d6568a2..b6c8a288d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,7 @@
# Test binary, build with `go test -c`
*.test
+.coverprofile
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
@@ -13,6 +14,12 @@
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
+# IDE Files
+.vscode/
+
+# Test Coverage Profile Output
+.coverprofile
+
# output path
OPATH
@@ -24,11 +31,13 @@ OPATH
.Trashes
ehthumbs.db
Thumbs.db
-vendor
-
-# The binary
-trickster
# dep
cacheKey.data
cacheKey.expiration
+
+#Goland IDE
+.idea/
+
+#log testing
+out.log
diff --git a/.travis.yml b/.travis.yml
index 3d4f3cf88..2c39f6e11 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,17 +1,21 @@
+env:
+ GO111MODULE: "on"
+
matrix:
include:
- language: python
install:
- pip install codespell
script:
- - codespell --skip=".git,*.png"
+ - codespell --skip="vendor,*.git,*.png,*.pdf,*.tiff,*.plist,*.pem,rangesim*.go,*.gz"
- language: go
go:
- - "1.11.x"
+ - "1.13.x"
- master
before_install:
- go get github.com/mattn/goveralls
script:
- make style test build
- - $GOPATH/bin/goveralls -service=travis-ci
+ - sed -i -e '/^.*_gen\.go:.*$/d' .coverprofile
+ - $GOPATH/bin/goveralls -coverprofile=.coverprofile -service=travis-ci
diff --git a/Makefile b/Makefile
index 389329c1e..dd41fc410 100644
--- a/Makefile
+++ b/Makefile
@@ -1,45 +1,70 @@
-DEFAULT: build
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
-GO ?= go
-GOFMT ?= $(GO)fmt
-FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
-DEP := $(FIRST_GOPATH)/bin/dep
-TRICKSTER := $(FIRST_GOPATH)/bin/trickster
+DEFAULT: build
-PROGVER = $(shell grep 'applicationVersion = ' main.go | awk '{print $$3}' | sed -e 's/\"//g')
+GO ?= go
+GOFMT ?= $(GO)fmt
+FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
+TRICKSTER_MAIN := cmd/trickster
+TRICKSTER := $(FIRST_GOPATH)/bin/trickster
+PROGVER := $(shell grep 'applicationVersion = ' $(TRICKSTER_MAIN)/main.go | awk '{print $$3}' | sed -e 's/\"//g')
+BUILD_TIME := $(shell date -u +%FT%T%z)
+GIT_LATEST_COMMIT_ID := $(shell git rev-parse HEAD)
+GO_VER := $(shell go version | awk '{print $$3}')
+LDFLAGS=-ldflags "-s -X main.applicationBuildTime=$(BUILD_TIME) -X main.applicationGitCommitID=$(GIT_LATEST_COMMIT_ID) -X main.applicationGoVersion=$(GO_VER) -X main.applicationGoArch=$(GOARCH)"
+GO111MODULE ?= on
+export GO111MODULE
.PHONY: go-mod-vendor
go-mod-vendor:
- GO111MODULE=on $(GO) mod vendor
+ $(GO) mod vendor
+
+.PHONY: go-mod-tidy
+go-mod-tidy:
+ $(GO) mod tidy
+
+.PHONY: test-go-mod
+test-go-mod:
+ @git diff --quiet --exit-code go.mod go.sum || echo "There are changes to go.mod and go.sum which needs to be committed"
.PHONY: build
-build: go-mod-vendor
- GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO_ENABLED) go build -a -v
+build:
+ GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(LDFLAGS) -o ./OPATH/trickster -a -v $(TRICKSTER_MAIN)/main.go
rpm: build
mkdir -p ./OPATH/SOURCES
- cp -p trickster ./OPATH/SOURCES/
- cp conf/trickster.service ./OPATH/SOURCES/
+ cp -p ./OPATH/trickster ./OPATH/SOURCES/
+ cp $(TRICKSTER_MAIN)/conf/trickster.service ./OPATH/SOURCES/
sed -e 's%^# log_file =.*$$%log_file = "/var/log/trickster/trickster.log"%' \
-e 's%prometheus:9090%localhost:9090%' \
- < conf/example.conf > ./OPATH/SOURCES/trickster.conf
+ < $(TRICKSTER_MAIN)/conf/example.conf > ./OPATH/SOURCES/trickster.conf
rpmbuild --define "_topdir $(CURDIR)/OPATH" \
--define "_version $(PROGVER)" \
--define "_release 1" \
-ba deploy/packaging/trickster.spec
.PHONY: install
-install: go-mod-vendor
- echo go build -o $(TRICKSTER) $(PROGVER)
+install:
+ $(GO) install -o $(TRICKSTER) $(PROGVER)
.PHONY: release
release: build release-artifacts docker docker-release
.PHONY: release-artifacts
release-artifacts:
- GOOS=darwin GOARCH=amd64 go build -o ./OPATH/trickster-$(PROGVER).darwin-amd64 && tar cvfz ./OPATH/trickster-$(PROGVER).darwin-amd64.tar.gz ./OPATH/trickster-$(PROGVER).darwin-amd64
- GOOS=linux GOARCH=amd64 go build -o ./OPATH/trickster-$(PROGVER).linux-amd64 && tar cvfz ./OPATH/trickster-$(PROGVER).linux-amd64.tar.gz ./OPATH/trickster-$(PROGVER).linux-amd64
+ GOOS=darwin GOARCH=amd64 $(GO) build -o ./OPATH/trickster-$(PROGVER).darwin-amd64 $(TRICKSTER_MAIN)/main.go && gzip -f ./OPATH/trickster-$(PROGVER).darwin-amd64
+ GOOS=linux GOARCH=amd64 $(GO) build -o ./OPATH/trickster-$(PROGVER).linux-amd64 $(TRICKSTER_MAIN)/main.go && gzip -f ./OPATH/trickster-$(PROGVER).linux-amd64
+# Minikube and helm bootstrapping are done via deploy/helm/Makefile
.PHONY: helm-local
helm-local:
kubectl config use-context minikube --namespace=trickster
@@ -49,6 +74,7 @@ helm-local:
kubectl set image deployment/dev-trickster trickster=trickster:dev -n trickster
kubectl scale --replicas=1 deployment/dev-trickster -n trickster
+# Minikube and helm bootstrapping are done via deploy/kube/Makefile
.PHONY: kube-local
kube-local:
kubectl config use-context minikube
@@ -72,13 +98,16 @@ style:
! gofmt -d $$(find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
.PHONY: test
-test: go-mod-vendor
- go test -o $(TRICKSTER) -v ./...
+test:
+ @go test -v -coverprofile=.coverprofile ./...
+
+.PHONY: bench
+bench:
+ $(GO) test -v -coverprofile=.coverprofile ./... -run=nonthingplease -bench=. | grep -v ' app=trickster '
.PHONY: test-cover
-test-cover: go-mod-vendor
- go test -o $(TRICKSTER) -coverprofile=cover.out ./...
- go tool cover -html=cover.out
+test-cover: test
+ $(GO) tool cover -html=.coverprofile
.PHONY: clean
clean:
diff --git a/README.md b/README.md
index eaf8dcc75..9dc21e0e6 100644
--- a/README.md
+++ b/README.md
@@ -1,28 +1,64 @@
-#
+#
+
[](https://travis-ci.org/Comcast/trickster/)
[](https://goreportcard.com/report/github.com/Comcast/trickster)
-[](https://coveralls.io/github/Comcast/trickster?branch=master)
-[](https://bestpractices.coreinfrastructure.org/en/projects/2518)
+[](https://coveralls.io/github/Comcast/trickster?branch=next)
+[](https://bestpractices.coreinfrastructure.org/en/projects/2518)
+[](https://hub.docker.com/r/tricksterio/trickster)
+[](https://twitter.com/tricksterio)
+
+Trickster is an HTTP reverse proxy/cache for http applications and a dashboard query accelerator for time series databases.
+
+
+
+## HTTP Reverse Proxy Cache
+
+Trickster is a fully-featured HTTP Reverse Proxy Cache for HTTP applications like static file servers and web API's.
+
+### Trickster Feature Highlights
+
+* [Supports TLS](./docs/tls.md) frontend termination and backend origination
+* Offers several options for a [caching layer](./docs/caches.md), including in-memory, filesystem, Redis and bbolt
+* [Highly customizable](./docs/configuring.md), using simple configuration settings, [down to the HTTP Path](./docs/paths.md)
+* Built-in Prometheus [metrics](./docs/metrics.md) and customizable [Health Check](./docs/health.md) Endpoints for end-to-end monitoring
+* [Negative Caching](./docs/negative-caching.md) to prevent domino effect outages
+* High-performance [Collapsed Forwarding](./docs/collapsed-forwarding.md)
+* Best-in-class [Range Request caching and acceleration](./docs/range_request.md).
+
+## Dashboard Acceleration
+
+Trickster dramatically improves dashboard chart rendering times for end users, while eliminating redundant computations on the TSDBs it fronts. In short, Trickster makes read-heavy Dashboard/TSDB environments, as well as those with highly-cardinalized datasets, significantly more performant and scalable.
-Trickster is a reverse proxy cache for the [Prometheus](https://github.com/prometheus/prometheus) [HTTP APIv1](https://prometheus.io/docs/prometheus/latest/querying/api/) that dramatically accelerates dashboard rendering times for any series queried from Prometheus.
+## Compatibility
-#### NOTE: Trickster is currently actively developed under the [next](https://github.com/Comcast/trickster/tree/next) Branch for our milestone 1.0 Release.
+Trickster works with virtually any Dashboard application that makes queries to any of these TSDB's:
-
+ Prometheus
-## How it works
+ ClickHouse
+
+ InfluxDB
+
+ Circonus IRONdb
+
+See the [Supported Origin Types](./docs/supported-origin-types.md) document for full details
+
+## How Trickster Accelerates Time Series
### 1. Delta Proxy
-Most dashboards request the entire time range of data from the time series database, every time a dashboard loads or reloads. Trickster's Delta Proxy inspects the time range of a client query to determine what data points are already cached, and requests from Prometheus only the data points still needed to service the client request. This results in dramatically faster chart load times for everyone, since Prometheus is queried only for tiny incremental changes on each dashboard load, rather than several hundred data points of duplicative data.
+
+Most dashboards request from a time series database the entire time range of data they wish to present, every time a user's dashboard loads, as well as on every auto-refresh. Trickster's Delta Proxy inspects the time range of a client query to determine what data points are already cached, and requests from the tsdb only the data points still needed to service the client request. This results in dramatically faster chart load times for everyone, since the tsdb is queried only for tiny incremental changes on each dashboard load, rather than several hundred data points of duplicative data.
### 2. Step Boundary Normalization
-When Trickster requests data from Prometheus, it adjusts the clients's requested time range slightly to ensure that all data points returned by Prometheus are aligned to normalized step boundaries. For example, if the step is 300s, all data points will fall on the clock 0's and 5's. This ensures that the data is highly cacheable, is conveyed visually to users in a more familiar way, and that all dashboard users see identical data on their screens.
+
+When Trickster requests data from a tsdb, it adjusts the clients's requested time range slightly to ensure that all data points returned are aligned to normalized step boundaries. For example, if the step is 300s, all data points will fall on the clock 0's and 5's. This ensures that the data is highly cacheable, is conveyed visually to users in a more familiar way, and that all dashboard users see identical data on their screens.
### 3. Fast Forward
+
Trickster's Fast Forward feature ensures that even with step boundary normalization, real-time graphs still always show the most recent data, regardless of how far away the next step boundary is. For example, if your chart step is 300s, and the time is currently 1:21p, you would normally be waiting another four minutes for a new data point at 1:25p. Trickster will break the step interval for the most recent data point and always include it in the response to clients requesting real-time data.
@@ -52,7 +88,6 @@ binary into your `GOPATH`:
$ go get github.com/Comcast/trickster
$ trickster -origin http://prometheus.example.com:9090
-
You can also clone the repository yourself and build using `make`:
$ mkdir -p $GOPATH/src/github.com/Comcast
@@ -62,25 +97,26 @@ You can also clone the repository yourself and build using `make`:
$ make build
$ ./trickster -origin http://prometheus.example.com:9090
-The Makefile provides several targets:
+The Makefile provides several targets, including:
- * *build*: build the `trickster` binary
- * *docker*: build a docker container for the current `HEAD`
- * *clean*: delete previously-built binaries and object files
+* *build*: build the `trickster` binary
+* *docker*: build a docker container for the current `HEAD`
+* *clean*: delete previously-built binaries and object files
+* *test*: runs unit tests
## More information
- * Refer to the docs directory for additional info.
+* Refer to the [docs](./docs/) directory for additional info.
## Contributing
Refer to [CONTRIBUTING.md](CONTRIBUTING.md)
-## Who uses Trickster?
+## Who Is Using Trickster
As the Trickster community grows, we'd like to keep track of who is using it in their stack. We invite you to submit a PR with your company name and @githubhandle to be included on the list.
1. [Comcast](https://comcast.github.io) [[@jranson](https://github.com/jranson)]
2. [Selfnet e.V.](https://www.selfnet.de/) [[@ThoreKr](https://github.com/ThoreKr)]
3. [swarmstack](https://github.com/swarmstack) [[@mh720](https://github.com/mh720)]
-4. [Hostinger](https://www.hostinger.com/) [[@ton31337](https://github.com/ton31337)]
\ No newline at end of file
+4. [Hostinger](https://www.hostinger.com/) [[@ton31337](https://github.com/ton31337)]
diff --git a/boltdb.go b/boltdb.go
deleted file mode 100644
index 2c2fe7488..000000000
--- a/boltdb.go
+++ /dev/null
@@ -1,232 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "fmt"
- "strconv"
- "strings"
- "time"
-
- bolt "github.com/coreos/bbolt"
- "github.com/go-kit/kit/log/level"
-)
-
-// BoltDBCache describes a BoltDB Cache
-type BoltDBCache struct {
- T *TricksterHandler
- Config BoltDBCacheConfig
- dbh *bolt.DB
-}
-
-// Connect instantiates the BoltDBCache mutex map and starts the Expired Entry Reaper goroutine
-func (c *BoltDBCache) Connect() error {
- level.Info(c.T.Logger).Log("event", "boltdb cache setup", "cacheFile", c.Config.Filename)
-
- var err error
- c.dbh, err = bolt.Open(c.Config.Filename, 0644, &bolt.Options{Timeout: 1 * time.Second})
- if err != nil {
- return err
- }
-
- err = c.dbh.Update(func(tx *bolt.Tx) error {
- tx.CreateBucketIfNotExists([]byte(c.Config.Bucket))
- if err != nil {
- return fmt.Errorf("create bucket: %s", err)
- }
- return nil
- })
- if err != nil {
- return err
- }
-
- go c.Reap()
- return nil
-}
-
-// Store places an object in the cache using the specified key and ttl
-func (c *BoltDBCache) Store(cacheKey string, data string, ttl int64) error {
-
- expKey, dataKey := c.getKeyNames(cacheKey)
- expiration := []byte(strconv.FormatInt(time.Now().Unix()+ttl, 10))
-
- err := c.dbh.Update(func(tx *bolt.Tx) error {
-
- b := tx.Bucket([]byte(c.Config.Bucket))
-
- err := b.Put([]byte(dataKey), []byte(data))
- if err != nil {
- return err
- }
-
- return b.Put([]byte(expKey), expiration)
- })
- if err != nil {
- return err
- }
-
- level.Debug(c.T.Logger).Log("event", "boltdb cache store", "key", dataKey, "expKey", expKey)
-
- return nil
-}
-
-// Retrieve looks for an object in cache and returns it (or an error if not found)
-func (c *BoltDBCache) Retrieve(cacheKey string) (string, error) {
-
- level.Debug(c.T.Logger).Log("event", "boltdb cache retrieve", "key", cacheKey)
-
- _, dataKey := c.getKeyNames(cacheKey)
-
- c.checkExpiration(cacheKey)
-
- return c.retrieve(dataKey)
-}
-
-// retrieve looks for an object in cache and returns it (or an error if not found)
-func (c *BoltDBCache) retrieve(cacheKey string) (string, error) {
-
- content := ""
-
- err := c.dbh.View(func(tx *bolt.Tx) error {
- b := tx.Bucket([]byte(c.Config.Bucket))
- v := b.Get([]byte(cacheKey))
- if v == nil {
- level.Debug(c.T.Logger).Log("event", "boltdb cache miss", "key", cacheKey)
- return fmt.Errorf("Value for key [%s] not in cache", cacheKey)
- }
- content = string(v)
- return nil
- })
- if err != nil {
- return "", err
- }
-
- return content, nil
-}
-
-// checkExpiration verifies that a cacheKey is not expired
-func (c *BoltDBCache) checkExpiration(cacheKey string) {
-
- expKey, _ := c.getKeyNames(cacheKey)
-
- content, err := c.retrieve(expKey)
- if err == nil {
- // We found this key, let's see if it's expired
- expiration, err := strconv.ParseInt(string(content), 10, 64)
- if err != nil || expiration < time.Now().Unix() {
- c.Delete(cacheKey)
- }
- }
-}
-
-// Delete removes an object in cache, if present
-func (c *BoltDBCache) Delete(cacheKey string) error {
-
- level.Debug(c.T.Logger).Log("event", "boltdb cache delete", "key", cacheKey)
-
- expKey, dataKey := c.getKeyNames(cacheKey)
-
- return c.dbh.Update(func(tx *bolt.Tx) error {
-
- b := tx.Bucket([]byte(c.Config.Bucket))
-
- err1 := b.Delete([]byte(expKey))
- if err1 != nil {
- level.Error(c.T.Logger).Log("event", "boltdb cache key delete failure", "key", expKey, "reason", err1.Error())
- }
-
- err2 := b.Delete([]byte(dataKey))
- if err2 != nil {
- level.Error(c.T.Logger).Log("event", "boltdb cache key delete failure", "key", dataKey, "reason", err2.Error())
- }
-
- c.T.ChannelCreateMtx.Lock()
-
- // Close out the channel if it exists
- if _, ok := c.T.ResponseChannels[cacheKey]; ok {
- close(c.T.ResponseChannels[cacheKey])
- delete(c.T.ResponseChannels, cacheKey)
- }
-
- // Unlock
- c.T.ChannelCreateMtx.Unlock()
-
- if err1 != nil {
- return err1
- }
- if err2 != nil {
- return err2
- }
-
- return nil
-
- })
-
-}
-
-// Reap continually iterates through the cache to find expired elements and removes them
-func (c *BoltDBCache) Reap() {
-
- for {
- c.ReapOnce()
- time.Sleep(time.Duration(c.T.Config.Caching.ReapSleepMS) * time.Millisecond)
- }
-
-}
-
-// ReapOnce makes a single iteration through the cache to to find and remove expired elements
-func (c *BoltDBCache) ReapOnce() {
-
- now := time.Now().Unix()
- expiredKeys := make([]string, 0)
-
- // Iterate through the cache to find any expiration keys and check their value
- c.dbh.View(func(tx *bolt.Tx) error {
- // Assume bucket exists and has keys
- b := tx.Bucket([]byte(c.Config.Bucket))
- cursor := b.Cursor()
-
- for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
-
- expKey := string(k)
-
- if strings.HasSuffix(expKey, ".expiration") {
-
- expiration, err := strconv.ParseInt(string(v), 10, 64)
- if err != nil || expiration < now {
-
- expiredKeys = append(expiredKeys, strings.Replace(expKey, ".expiration", "", -1))
-
- }
- }
- }
-
- return nil
- })
-
- // Iterate through the expired keys so we can delete them
- for _, cacheKey := range expiredKeys {
- c.Delete(cacheKey)
- }
-
-}
-
-// Close closes the BoltDBCache
-func (c *BoltDBCache) Close() error {
- return c.dbh.Close()
-}
-
-func (c *BoltDBCache) getKeyNames(cacheKey string) (string, string) {
- return cacheKey + ".expiration", cacheKey + ".data"
-}
diff --git a/boltdb_test.go b/boltdb_test.go
deleted file mode 100644
index ff591e81a..000000000
--- a/boltdb_test.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "testing"
-
- "github.com/go-kit/kit/log"
-)
-
-func TestBoltDBCache_Connect(t *testing.T) {
- cfg := Config{Caching: CachingConfig{ReapSleepMS: 1}}
- tr := TricksterHandler{Logger: log.NewNopLogger(), Config: &cfg}
- bc := BoltDBCache{T: &tr, Config: BoltDBCacheConfig{Filename: "/tmp/test.db", Bucket: "trickster_test"}}
-
- // it should connect
- err := bc.Connect()
- if err != nil {
- t.Error(err)
- }
-
- bc.Close()
-
-}
-
-func TestBoltDBCache_Store(t *testing.T) {
- cfg := Config{Caching: CachingConfig{ReapSleepMS: 1}}
- tr := TricksterHandler{Logger: log.NewNopLogger(), Config: &cfg}
- bc := BoltDBCache{T: &tr, Config: BoltDBCacheConfig{Filename: "/tmp/test.db", Bucket: "trickster_test"}}
-
- err := bc.Connect()
- if err != nil {
- t.Error(err)
- }
- defer bc.Close()
-
- // it should store a value
- err = bc.Store("cacheKey", "data", 60000)
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestBoltDBCache_Delete(t *testing.T) {
- cfg := Config{Caching: CachingConfig{ReapSleepMS: 1}}
- tr := TricksterHandler{Logger: log.NewNopLogger(), Config: &cfg}
- bc := BoltDBCache{T: &tr, Config: BoltDBCacheConfig{Filename: "/tmp/test.db", Bucket: "trickster_test"}}
-
- err := bc.Connect()
- if err != nil {
- t.Error(err)
- }
- defer bc.Close()
-
- // it should store a value
- err = bc.Store("cacheKey", "data", 60000)
- if err != nil {
- t.Error(err)
- }
-
- // it should store a value
- err = bc.Delete("cacheKey")
- if err != nil {
- t.Error(err)
- }
-
-}
-
-func TestBoltDBCache_Retrieve(t *testing.T) {
- cfg := Config{Caching: CachingConfig{ReapSleepMS: 1}}
- tr := TricksterHandler{Logger: log.NewNopLogger(), Config: &cfg}
- bc := BoltDBCache{T: &tr, Config: BoltDBCacheConfig{Filename: "/tmp/test.db", Bucket: "trickster_test"}}
-
- err := bc.Connect()
- if err != nil {
- t.Error(err)
- }
- defer bc.Close()
-
- err = bc.Store("cacheKey", "data", 60000)
- if err != nil {
- t.Error(err)
- }
-
- // it should retrieve a value
- data, err := bc.Retrieve("cacheKey")
- if err != nil {
- t.Error(err)
- }
- if data != "data" {
- t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
- }
-}
diff --git a/cache.go b/cache.go
deleted file mode 100644
index 8fe6b6e00..000000000
--- a/cache.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "fmt"
-)
-
-const (
- // Cache interface types
- ctMemory = "memory"
- ctFilesystem = "filesystem"
- ctRedis = "redis"
- ctBoltDB = "boltdb"
-)
-
-// Cache is the interface for the supported caching fabrics
-// When making new cache types, Retrieve() must return an error on cache miss
-type Cache interface {
- Connect() error
- Store(cacheKey string, data string, ttl int64) error
- Retrieve(cacheKey string) (string, error)
- Reap()
- Close() error
-}
-
-func getCache(t *TricksterHandler) Cache {
- switch t.Config.Caching.CacheType {
- case ctFilesystem:
- return &FilesystemCache{Config: t.Config.Caching.Filesystem, T: t}
- case ctBoltDB:
- return &BoltDBCache{Config: t.Config.Caching.BoltDB, T: t}
- case ctRedis:
- return &RedisCache{Config: t.Config.Caching.Redis, T: t}
- case ctMemory:
- return &MemoryCache{T: t}
- default:
- panic(fmt.Errorf("Invalid cache type: %q", t.Config.Caching.CacheType))
- }
-}
diff --git a/logging_test.go b/cmd/promsim/main.go
similarity index 58%
rename from logging_test.go
rename to cmd/promsim/main.go
index 59efa6016..bd7e4a95e 100644
--- a/logging_test.go
+++ b/cmd/promsim/main.go
@@ -14,26 +14,24 @@
package main
import (
- "testing"
+ "fmt"
+ "net/http"
+ "os"
+
+ "github.com/Comcast/trickster/pkg/promsim"
)
-func TestNewLogger(t *testing.T) {
- testCases := []string{
- "debug",
- "info",
- "warn",
- "error",
- "none",
- }
- // it should create a logger for each level
- for _, tc := range testCases {
- t.Run(tc, func(t *testing.T) {
- newLogger(LoggingConfig{LogLevel: tc}, tc)
- })
+func main() {
+
+ port := "9090"
+ if len(os.Args) > 1 && os.Args[1] != "" {
+ port = os.Args[1]
}
-}
-func TestNewLogger_LogFile(t *testing.T) {
- // it should create a logger that outputs to a log file ("out.test.log")
- newLogger(LoggingConfig{LogFile: "out.log"}, "test")
+ fmt.Println("Starting up PromSim on port", port)
+ err := http.ListenAndServe(fmt.Sprintf(":%s", port), promsim.MuxWithRoutes())
+ if err != nil {
+ fmt.Println(err.Error())
+ os.Exit(0)
+ }
}
diff --git a/cmd/rangesim/main.go b/cmd/rangesim/main.go
new file mode 100644
index 000000000..199d9d6a5
--- /dev/null
+++ b/cmd/rangesim/main.go
@@ -0,0 +1,38 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+
+ "github.com/Comcast/trickster/pkg/rangesim"
+)
+
+func main() {
+
+ port := "8090"
+ if len(os.Args) > 1 && os.Args[1] != "" {
+ port = os.Args[1]
+ }
+
+ fmt.Println("Starting up RangeSim on port", port)
+
+ err := http.ListenAndServe(fmt.Sprintf(":%s", port), rangesim.MuxWithRoutes())
+ if err != nil {
+ fmt.Println(err.Error())
+ os.Exit(0)
+ }
+}
diff --git a/cmd/trickster/conf/example.conf b/cmd/trickster/conf/example.conf
new file mode 100644
index 000000000..7458e7bec
--- /dev/null
+++ b/cmd/trickster/conf/example.conf
@@ -0,0 +1,435 @@
+#
+# Trickster 1.0 Example Configuration File - Exhaustive
+#
+# To use this, run: trickster -config /path/to/example.conf
+#
+# This file contains descriptions and examples for all
+# Trickster configuration options. More documentation is
+# available at https://github.com/Comcast/trickster/docs/
+#
+# Optional configs are commented out, required configs are uncommented
+# and set to common values that let you try it out with Prometheus
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+#
+
+# [main]
+
+## instance_id allows you to run multiple Trickster processes on the same host and log to separate files
+## Useful for baremetal, not so much for elastic deployments, so only uncomment if you really need it
+## default is 0, which means ignored
+#instance_id = 0
+
+## config_handler_path provides the HTTP path to view a read-only printout of the running configuration
+## which can be reached at http://your-trickster-endpoint:port/$config_handler_path
+## default is '/trickster/config'
+# config_handler_path = '/trickster/config'
+
+## ping_handler_path provides the HTTP path you will use to perform an uptime health check against Trickster
+## which can be reached at http://your-trickster-endpoint:port/$ping_handler_path
+## default is '/trickster/ping'
+# ping_handler_path = '/trickster/ping'
+
+
+# Configuration options for the Trickster Frontend
+[frontend]
+
+# listen_port defines the port on which Trickster's Front-end HTTP Proxy server listens.
+listen_port = 9090
+
+## listen_address defines the ip on which Trickster's Front-end HTTP Proxy server listens.
+## empty by default, listening on all interfaces
+# listen_address = ''
+
+## tls_listen_address defines the ip on which Trickster's Front-end TLS Proxy server listens.
+## empty by default, listening on all interfaces
+# tls_listen_address = ''
+
+## tls_listen_port defines the port on which Trickster's Front-end TLS Proxy server listens.
+## The default is 0, which means TLS is not used, even if certificates are configured below.
+# tls_listen_port = 0
+
+## connections_limit defines the maximum number of concurrent connections
+## Trickster's Proxy server may handle at any time.
+## 0 by default, unlimited.
+# connections_limit = 0
+
+# [caches]
+
+ # [caches.default]
+ ## cache_type defines what kind of cache Trickster uses
+ ## options are 'bbolt', 'badger', 'filesystem', 'memory', and 'redis'
+ ## The default is 'memory'.
+ # cache_type = 'memory'
+
+ ### Configuration options for the Cache Index
+ ## The Cache Index handles key management and retention for bbolt, filesystem and memory
+ ## Redis and BadgerDB handle those functions natively and does not use the Trickster's Cache Index
+ # [caches.default.index]
+
+ ## reap_interval_secs defines how long the Cache Index reaper sleeps between reap cycles. Default is 3 (3s)
+ # reap_interval_secs = 3
+
+ ## flush_interval_secs sets how often the Cache Index saves its metadata to the cache from application memory. Default is 5 (5s)
+ # flush_interval_secs = 5
+
+ ## max_size_bytes indicates how large the cache can grow in bytes before the Index evicts least-recently-accessed items. default is 512MB
+ # max_size_bytes = 536870912
+
+ ## max_size_backoff_bytes indicates how far below max_size_bytes the cache size must be to complete a byte-size-based eviction exercise. default is 16MB
+ # max_size_backoff_bytes = 16777216
+
+ ## max_size_objects indicates how large the cache can grow in objects before the Index evicts least-recently-accessed items. default is 0 (infinite)
+ # max_size_objects = 0
+
+ ## max_size_backoff_objects indicates how far under max_size_objects the cache size must be to complete object-size-based eviction exercise. default is 100
+ # max_size_backoff_objects = 100
+
+ ### Configuration options when using a Redis Cache
+ # [caches.default.redis]
+
+ ## client_type indicates which kind of Redis client to use. Options are: 'standard', 'cluster' and 'sentinel'
+ ## default is 'standard'
+ # client_type = 'standard'
+
+ ### Supported by Redis (standard) #####################################
+ ### These configurations are ignored by Redis Sentinel and Redis Cluster
+ ###
+
+ ## endpoint defines the fqdn+port or path to a unix socket file for connecting to redis
+ ## default is 'redis:6379'
+ # endpoint = 'redis:6379'
+ #
+
+ ### Supported by Redis Cluster and Redis Sentinel #####################
+ ### These conigurations are ignored by Redis (standard)
+ ###
+
+ ## endpoints is used for Redis Cluster and Redis Sentinel to define a list of endpoints
+ ## default is ['redis:6379']
+ # endpoints = ['redis:6379']
+ #
+
+ ### Supported by Redis Sentinel #######################################
+ ### These conigurations are ignored by Redis (standard) and Redis Cluster
+ ###
+ ## sentinel_master should be set when using Redis Sentinel to indicate the Master Node
+ ## sentinel_master = ''
+ #
+
+ ### Supported by all Redis Client Types ###############################
+ ### See the go-redis documentation at https://github.com/go-redis/redis/blob/master/options.go
+ ### for more information on tuning these settings
+
+ ## protocol defines the protocol for connecting to redis ('unix' or 'tcp'). 'tcp' is default
+ # protocol = 'tcp'
+
+ ## password provides the redis password. default is empty string ''
+ # password = ''
+
+ ## db is the Database to be selected after connecting to the server. default is 0
+ # db = 0
+
+ ## max_retries is the maximum number of retries before giving up on the command
+ # max_retries = 0
+
+ ## min_retry_backoff_ms is the minimum backoff time between each retry
+ # min_retry_backoff_ms = 8
+
+ ## max_retry_backoff_ms is the maximum backoff time between each retry
+ # max_retry_backoff_ms = 512
+
+ ## dial_timeout_ms is the timeout for establishing new connections
+ # dial_timeout_ms = 5000
+
+ ## read_timeout_ms is the timeout for socket reads. If reached, commands will fail with a timeout instead of blocking.
+ # read_timeout_ms = 3000
+
+ ## write_timeout_ms is the timeout for socket writes. If reached, commands will fail with a timeout instead of blocking.
+ # write_timeout_ms = 3000
+
+ ## pool_size is the maximum number of socket connections.
+ # pool_size = 20
+
+ ## min_idle_conns is the minimum number of idle connections which is useful when establishing new connection is slow.
+ # min_idle_conns = 0
+
+ ## max_conn_age_ms is the connection age at which client retires (closes) the connection.
+ # max_conn_age_ms = 0
+
+ ## pool_timeout_ms is the amount of time client waits for connection if all connections are busy before returning an error.
+ # pool_timeout_ms = 4000
+
+ ## idle_timeout_ms is the amount of time after which client closes idle connections.
+ # idle_timeout_ms = 300000
+
+ ## idle_check_frequency_ms is the frequency of idle checks made by idle connections reaper.
+ # idle_check_frequency_ms = 60000
+
+
+ ### Configuration options when using a Filesystem Cache ###############
+ # [caches.default.filesystem]
+ ## cache_path defines the directory location under which the Trickster cache will be maintained
+ ## default is '/tmp/trickster'
+ # cache_path = '/tmp/trickster'
+
+ ### Configuration options when using a bbolt Cache ####################
+ # [caches.default.bbolt]
+
+ ## filename defines the file where the Trickster cache will be maintained
+ ## default is 'trickster.db'
+ # filename = 'trickster.db'
+
+ ## bucket defines the name of the bbolt bucket (similar to a namespace) under which our key value store lives
+ ## default is 'trickster'
+ # bucket = 'trickster'
+
+ ### Configuration options when using a Badger cache ###################
+ # [caches.default.badger]
+ ## directory defines the directory location under which the Badger data will be maintained
+ ## default is '/tmp/trickster'
+ # directory = '/tmp/trickster'
+ ## value_directory defines the directory location under which the Badger value log will be maintained
+ ## default is '/tmp/trickster'
+ # value_directory = '/tmp/trickster'
+
+ ## Example of a second cache, sans comments, that origin configs below could use with: cache_name = 'bbolt_example'
+ #
+ # [caches.bbolt_example]
+ # cache_type = 'bbolt'
+
+ # [caches.bbolt_example.bbolt]
+ # filename = 'trickster.db'
+ # bucket = 'trickster'
+
+ # [caches.bbolt_example.index]
+ # reap_interval_secs = 3
+ # flush_interval_secs = 5
+ # max_size_bytes = 536870912
+ # size_backoff_bytes = 16777216
+
+## Negative Caching Configurations
+## A Negative Cache is a map of HTTP Status Codes that are cached for the specified duration,
+## used for temporarily caching failures (e.g., 404's for 10 seconds)
+##
+## By default, each Origin Configuration maps to the 'default' negative cache which you can
+## configure below, or can define your own negative caches, and specify them in your origin configs.
+## See /docs/negative-caching.md for more info.
+##
+
+# [negative_caches]
+# [negative_caches.default]
+# # The 'default' negative cache config, mapped by all origins by default,
+# # is empty unless you populate it. Update it by adding entries here in the format of:
+# # code = ttl_secs
+
+## Here's a pre-populated negative cache config ready to be uncommented and used in an origin config
+## The 'general' negative cache config will cache common failure response codes for 3 seconds
+# [negative_caches.general]
+# 400 = 3
+# 404 = 3
+# 500 = 3
+# 502 = 3
+
+# Configuration options for mapping Origin(s)
+[origins]
+
+ # example origin named default. default is always created with these settings unless a different origin is defined here.
+ [origins.default]
+
+ ## is_default describes whether this origin is the default origin considered when routing http requests
+ ## it is false, by default; but if you only have a single origin configured, is_default will be true unless explicitly set to false
+ # is_default = true
+
+ # origin_type identifies the origin type.
+ # Valid options are: 'prometheus', 'influxdb', 'clickhouse', 'irondb', 'reverseproxycache' (or just 'rpc')
+ # origin_type is a required configuration value
+ origin_type = 'prometheus'
+
+ ## cache_name identifies the name of the cache (configured above) that you want to use with this origin proxy. default is 'default'
+ # cache_name = 'default'
+
+ ## cache_key_prefix defines the prefix this origin appends to cache keys. When using a shared cache like Redis,
+ ## this can help partition multiple trickster instances that may have the same same hostname or ip address (the default prefix)
+ # cache_key_prefix = 'example'
+
+ ## negative_cache_name identifies the name of the negative cache (configured above) to be used with this origin. default is 'default'
+ # negative_cache_name = 'default'
+
+ # origin_url provides the base upstream URL for all proxied requests to this origin.
+ # it can be as simple as http://example.com or as complex as https://example.com:8443/path/prefix
+ # origin_url is a required configuration value
+ origin_url = 'http://prometheus:9090'
+
+ ## dearticulate_upstream_ranges, when true, instructs Trickster to make multiple parallel requests to the origin for each
+ ## range needed to fulfill the client request, rather than making a multipart range request. default is false
+ # dearticulate_upstream_ranges = false
+
+ ## multipart_ranges_disabled, when true, instructs Trickster to return the full object when the client provides
+ ## a multipart range request. The default is false.
+ # multipart_ranges_disabled = false
+
+ ## compressable_types defines the Content Types that will be compressed when stored in the Trickster cache
+ ## reasonable defaults are set, so use this with care. To disable compression, set compressable_types = []
+ ## Default list is provided here:
+ # compressable_types = [ 'text/javascript', 'text/css', 'text/plain', 'text/xml', 'text/json', 'application/json', 'application/javascript', 'application/xml' ]
+
+ ## timeout_secs defines how many seconds Trickster will wait before aborting and upstream http request. Default: 180s
+ # timeout_secs = 180
+
+ ## keep_alive_timeout_secs defines how long Trickster will wait before closing a keep-alive connection due to inactivity
+ ## if the origin's keep-alive timeout is shorter than Trickster's, the connect will be closed sooner. Default: 300
+ # keep_alive_timeout_secs = 300
+
+ ## max_idle_conns set the maximum concurrent keep-alive connections Trickster may have opened to this origin
+ ## additional requests will be queued. Default: 20
+ # max_idle_conns = 20
+
+ ## backfill_tolerance_secs prevents new datapoints that fall within the tolerance window (relative to time.Now) from being cached
+ ## Think of it as "never cache the newest N seconds of real-time data, because it may be preliminary and subject to updates"
+ ## default is 0
+ # backfill_tolerance_secs = 0
+
+ ## timeseries_retention_factor defines the maximum number of recent timestamps to cache for a given query. Default is 1024
+ # timeseries_retention_factor = 1024
+
+ ## timeseries_ttl_secs defines the relative expiration of cached timeseries. default is 6 hours (21600 seconds)
+ # timeseries_ttl_secs = 21600
+
+ ## timeseries_eviction_method selects the metholodogy used to determine which timestamps are removed once
+ ## the timeseries_retention_factor limit is reached. options are 'oldest' and 'lru'. Default is 'oldest'
+ # timeseries_eviction_method = 'oldest'
+
+ ## fast_forward_disable, when set to true, will turn off the 'fast forward' feature for any requests proxied to this origin
+ # fast_forward_disable = false
+
+ ## fastforward_ttl_secs defines the relative expiration of cached fast forward data. default is 15s
+ # fastforward_ttl_secs = 15
+
+ ## max_ttl_secs defines the maximum allowed TTL for any object cached for this origin. default is 86400
+ # max_ttl_secs = 86400
+
+ ## revalidation_factor is the multiplier for object lifetime expiration to determine cache object TTL; default is 2
+ ## for example, if a revalidatable object has Cache-Control: max-age=300, we will cache for 10 minutes (300s * 2)
+ ## so there is an opportunity to revalidate
+ # revalidation_factor = 2
+
+ ## max_object_size_bytes defines the largest byte size an object may be before it is uncacheable due to size. default is 524288 (512k)
+ # max_object_size_bytes = 524288
+
+ ##
+ ## Each origin type implements their own defaults for health_check_upstream_url, health_check_verb and health_check_query,
+ ## which can be overridden per origin. See /docs/health.md for more information
+
+ ## health_check_upstream_url is the URL Trickster will request against this origin to
+ ## when a health check request is received by Trickster via http:///trickster//health
+ ## this is the default value for prometheus:
+ # health_check_upstream_url = '/api/v1/query'
+
+ ## health_check_verb is the HTTP Method Trickster will when peforming an upstream health check for this origin
+ ## default is 'GET' for all origin types unless overridden per-origin here.
+ # health_check_verb = 'GET'
+
+ ## health_check_query is the query string Trickster will append the when peforming an upstream health check for this origin
+ ## This value is the default for prometheus (again, see /docs/health.md)
+ # health_check_query = 'query=up'
+
+ ## health_check_headers provides a list of HTTP Headers to add to Health Check HTTP Requests to this origin
+ # [origins.default.health_check_headers]
+ # Authorization = 'Basic SomeHash'
+
+ ## [origins.ORIGIN_NAME.paths] section customizes the behavior of Trickster for specific paths. See /docs/paths.md for more info.
+ # [origins.default.paths]
+ # [origins.default.paths.example1]
+ # path = '/api/v1/admin/'
+ # methods = [ '*' ] # HTTP methods to be routed with this path config. '*' for all methods.
+ # match_type = 'prefix' # match $path* (using 'exact' will match just $path)
+ # handler = 'localresponse' # don't actually proxy this request, respond immediately
+ # response_code = 401
+ # response_body = 'No soup for you!'
+ # no_metrics = true # do not record metrics for requests to this path
+ # [origins.default.paths.example1.response_headers]
+ # 'Cache-Control' = 'no-cache' # attach these headers to the response down to the client
+ # 'Content-Type' = 'text/plain'
+
+ # [origins.default.paths.example2]
+ # path = '/example/'
+ # methods = [ 'GET', 'POST' ]
+ # collapsed_forwarding = 'progressive' # see /docs/collapsed_forwarding.md
+ # match_type = 'prefix' # this path is routed using prefix matching
+ # handler = 'proxycache' # this path is routed through the cache
+ # cache_key_params = [ 'ex_param1', 'ex_param2' ] # the cache key will be hashed with these query parameters (GET)
+ # cache_key_form_fields = [ 'ex_param1', 'ex_param2' ] # or these form fields (POST)
+ # cache_key_headers = [ 'X-Example-Header' ] # and these request headers, when present in the incoming request
+ # [origins.default.paths.example1.request_headers]
+ # 'Authorization' = 'custom proxy client auth header'
+ # '-Cookie' = '' # attach these request headers when proxying. the '+' in the header name
+ # '+Accept-Encoding' = 'gzip' # means append the value if the header exists, rather than replace
+ ## while the '-' will remove the header
+ # [origins.default.paths.example1.request_params]
+ # '+authToken' = 'SomeTokenHere' # manipulate request query parameters in the same way
+
+ ## the [origins.ORIGIN_NAME.tls] section configures the frontend and backend TLS operation for the origin
+ # [origins.default.tls]
+
+ ## TLS Frontend Configs
+ ## You can configure which certificate and key to use when this endpoint serves downstream clients over TLS
+ ## Trickster will fail out at startup if the provided files do not exist, are unreadable, or in an invalid format
+ ## These settings by default are '' (empty string), which disables this origin from being routed over the TLS port
+ # full_chain_cert_path = '/path/to/your/cert.pem'
+ # private_key_path = '/path/to/your/key.pem'
+
+ ## TLS Backend Configs
+ ## These settings configure how Trickster will behave as a client when communicating with
+ ## this origin over TLS
+
+ ## if insecure_skip_verify is true, Trickster will trust the origins certificate without any verification
+ ## default is false
+ # insecure_skip_verify = false
+
+ ## certificate_authority_paths provides a list of additional certificate authorities to be used to trust an upstream origin
+ ## in addition to Operating System CA's. default is an empty list, which insructs the Trickster to use only the OS List
+ # certificate_authority_paths = [ '../../testdata/test.rootca.pem' ]
+
+ ## client_cert_path provides the path to a client certificate for Trickster to use when authenticating with an upstream server
+ ## empty string '' by default
+ # client_cert_path = '/path/to/my/client/cert.pem'
+
+ ## client_key_path provides the path to a client key for Trickster to use when authenticating with an upstream server
+ ## empty string '' by default
+ # client_key_path = '/path/to/my/client/key.pem'
+
+ ## For multi-origin support, origins are named, and the name is the second word of the configuration section name.
+ ## In this example, an origin is named "foo".
+ ## Clients can indicate this origin in their path (http://trickster.example.com:9090/foo/api/v1/query_range?.....)
+ ## there are other ways for clients to indicate which origin to use in a multi-origin setup. See the documentation for more information
+
+ ## use quotes around FQDNs for host-based routing (see /docs/multi-origin.md).
+ # [origins.'foo.example.com']
+ # is_default = false
+ # origin_type = 'influxdb'
+ # origin_url = 'http://influx-origin:8086'
+ # cache_name = 'bbolt_example'
+ # negative_cache_name = 'general'
+ # timeseries_retention_factor = 1024
+ # timeseries_eviction_method = 'oldest'
+ # timeout_secs = 180
+ # backfill_tolerance_secs = 180
+
+## Configuration Options for Metrics Instrumentation
+# [metrics]
+## listen_port defines the port that Trickster's metrics server listens on at /metrics
+# listen_port = 8082
+## listen_address defines the ip that Trickster's metrics server listens on at /metrics
+## empty by default, listening on all interfaces
+# listen_address = ''
+
+## Configuration Options for Logging Instrumentation
+# [logging]
+## log_level defines the verbosity of the logger. Possible values are 'debug', 'info', 'warn', 'error'
+## default is 'info'
+# log_level = 'info'
+
+## log_file defines the file location to store logs. These will be auto-rolled and maintained for you.
+## not specifying a log_file (this is the default behavior) will print logs to STDOUT
+# log_file = '/some/path/to/trickster.log'
diff --git a/cmd/trickster/conf/simple.prometheus.conf b/cmd/trickster/conf/simple.prometheus.conf
new file mode 100644
index 000000000..bc624cea4
--- /dev/null
+++ b/cmd/trickster/conf/simple.prometheus.conf
@@ -0,0 +1,27 @@
+#
+# Trickster 1.0 Example Configuration File - Simple Prometheus Reverse Proxy Cache
+#
+# To use this, run: trickster -config /path/to/simple.prometheus.conf
+#
+# This file demonstrates a basic configuration to accelerate
+# Prometheus queries using Trickster. More documentation is
+# available at https://github.com/Comcast/trickster/docs/
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+#
+
+[frontend]
+listen_port = 9090
+
+[origins]
+ [origins.default]
+
+ # update FQDN and Port to work in your environment
+ origin_url = 'http://prometheus:9090'
+ origin_type = 'prometheus'
+
+[metrics]
+listen_port = 8082 # available for scraping at http://:/metrics
+
+[logging]
+log_level = 'info'
diff --git a/cmd/trickster/conf/simple.reverseproxycache.conf b/cmd/trickster/conf/simple.reverseproxycache.conf
new file mode 100644
index 000000000..783978c7b
--- /dev/null
+++ b/cmd/trickster/conf/simple.reverseproxycache.conf
@@ -0,0 +1,27 @@
+#
+# Trickster 1.0 Example Configuration File - Simple HTTP Reverse Proxy Cache
+#
+# To use this, run: trickster -config /path/to/simple.reverseproxycache.conf
+#
+# This file demonstrates a basic configuration for operating an
+# HTTP Reverse Proxy Cache using Trickster. More documentation is
+# available at https://github.com/Comcast/trickster/docs/
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+#
+
+[frontend]
+listen_port = 8080
+
+[origins]
+ [origins.default]
+
+ # update FQDN and (optional) Port to work in your environment
+ origin_url = 'http://api.example.com:2379'
+ origin_type = 'reverseproxycache'
+
+[metrics]
+listen_port = 8082 # available for scraping at http://:/metrics
+
+[logging]
+log_level = 'info'
diff --git a/conf/trickster.service b/cmd/trickster/conf/trickster.service
similarity index 100%
rename from conf/trickster.service
rename to cmd/trickster/conf/trickster.service
diff --git a/cmd/trickster/main.go b/cmd/trickster/main.go
new file mode 100644
index 000000000..e7120cf67
--- /dev/null
+++ b/cmd/trickster/main.go
@@ -0,0 +1,143 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ _ "net/http/pprof" // Comment to disable. Available on :METRICS_PORT/debug/pprof
+ "os"
+ "sync"
+
+ cr "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy"
+ th "github.com/Comcast/trickster/internal/proxy/handlers"
+ "github.com/Comcast/trickster/internal/routing"
+ rr "github.com/Comcast/trickster/internal/routing/registration"
+ "github.com/Comcast/trickster/internal/runtime"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/internal/util/metrics"
+
+ "github.com/gorilla/handlers"
+)
+
+var (
+ applicationGitCommitID string
+ applicationBuildTime string
+ applicationGoVersion string
+ applicationGoArch string
+)
+
+const (
+ applicationName = "trickster"
+ applicationVersion = "1.0.0-beta10"
+)
+
+func main() {
+
+ var err error
+
+ runtime.ApplicationName = applicationName
+ runtime.ApplicationVersion = applicationVersion
+
+ err = config.Load(runtime.ApplicationName, runtime.ApplicationVersion, os.Args[1:])
+ if err != nil {
+ printVersion()
+ fmt.Println("Could not load configuration:", err.Error())
+ os.Exit(1)
+ }
+
+ if config.Flags.PrintVersion {
+ printVersion()
+ os.Exit(0)
+ }
+
+ log.Init()
+ defer log.Logger.Close()
+ log.Info("application start up",
+ log.Pairs{
+ "name": runtime.ApplicationName,
+ "version": runtime.ApplicationVersion,
+ "goVersion": applicationGoVersion,
+ "goArch": applicationGoArch,
+ "commitID": applicationGitCommitID,
+ "buildTime": applicationBuildTime,
+ "logLevel": config.Logging.LogLevel,
+ },
+ )
+
+ for _, w := range config.LoaderWarnings {
+ log.Warn(w, log.Pairs{})
+ }
+
+ metrics.Init()
+ cr.LoadCachesFromConfig()
+ th.RegisterPingHandler()
+ th.RegisterConfigHandler()
+ err = rr.RegisterProxyRoutes()
+ if err != nil {
+ log.Fatal(1, "route registration failed", log.Pairs{"detail": err.Error()})
+ }
+
+ if config.Frontend.TLSListenPort < 1 && config.Frontend.ListenPort < 1 {
+ log.Fatal(1, "no http or https listeners configured", log.Pairs{})
+ }
+
+ wg := sync.WaitGroup{}
+ var l net.Listener
+
+ // if TLS port is configured and at least one origin is mapped to a good tls config,
+ // then set up the tls server listener instance
+ if config.Frontend.ServeTLS && config.Frontend.TLSListenPort > 0 {
+ wg.Add(1)
+ go func() {
+ tlsConfig, err := config.Config.TLSCertConfig()
+ if err == nil {
+ l, err = proxy.NewListener(
+ config.Frontend.TLSListenAddress,
+ config.Frontend.TLSListenPort,
+ config.Frontend.ConnectionsLimit,
+ tlsConfig)
+ if err == nil {
+ err = http.Serve(l, handlers.CompressHandler(routing.TLSRouter))
+ }
+ }
+ log.Error("exiting", log.Pairs{"err": err})
+ wg.Done()
+ }()
+ }
+
+ // if the plaintext HTTP port is configured, then set up the http listener instance
+ if config.Frontend.ListenPort > 0 {
+ wg.Add(1)
+ go func() {
+ l, err := proxy.NewListener(config.Frontend.ListenAddress, config.Frontend.ListenPort,
+ config.Frontend.ConnectionsLimit, nil)
+
+ if err == nil {
+ err = http.Serve(l, handlers.CompressHandler(routing.Router))
+ }
+ log.Error("exiting", log.Pairs{"err": err})
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+}
+
+func printVersion() {
+ fmt.Println(runtime.ApplicationVersion, runtime.ApplicationVersion, applicationBuildTime, applicationGitCommitID, applicationGoVersion, applicationGoArch)
+}
diff --git a/conf/example.conf b/conf/example.conf
deleted file mode 100644
index 436d777b6..000000000
--- a/conf/example.conf
+++ /dev/null
@@ -1,122 +0,0 @@
-[main]
-# instance_id allows you to run multiple trickster processes on the same host and log to separate files
-# Useful for baremetal, not so much for elastic deployments, so only uncomment if you really need it
-#instance_id = 1
-
-# Configuration options for the Proxy Server
-[proxy_server]
-# listen_port defines the port on which Trickster's Proxy server listens.
-# since this is a proxy for Prometheus, we use 9090 by default, just like Prometheus does
-# listen_port = 9090
-# listen_address defines the ip on which Trickster's Proxy server listens.
-# empty by default, listening on all interfaces
-# listen_address =
-
-[cache]
-# cache_type defines what kind of cache Trickster uses
-# options are 'boltdb', 'filesystem', 'memory', and 'redis'.
-# The default is 'memory'.
-cache_type = 'memory'
-
-# record_ttl_secs defines the relative expiration of cached queries. default is 6 hours (21600 seconds)
-# record_ttl_secs = 21600
-
-# reap_sleep_ms defines how long the cache reaper waits between reap cycles. Default is 1000 (1s)
-# reap_sleep_ms = 1000
-
-# compression determines whether the cache should be compressed. default is true
-# compression = true
-
- ### Configuration options when using a Redis Cache
- # [cache.redis]
- # protocol defines the protocol for connecting to redis ('unix' or 'tcp') 'tcp' is default
- # protocol = 'tcp'
- # endpoint defines the fqdn+port or path to a unix socket file for connecting to redis
- # default is 'redis:6379'
- # endpoint = 'redis:6379'
-
- ### Configuration options when using a Filesystem Cache
- # [cache.filesystem]
- # cache_path defines the directory location under which the Trickster cache will be maintained
- # default is '/tmp/trickster'
- # cache_path = '/tmp/trickster'
-
- # Configuration options when using a BoltDb Cache
- #[cache.boltdb]
-
- # filename defines the file where the Trickster cache will be maintained
- # default is 'trickster.db'
- # filename = 'trickster.db'
-
- # bucket defines the name of the BotlDb bucket (similar to a namespace) under which our key value store lives
- # default is 'trickster'
- # bucket = 'trickster'
-
-# Configuration options for mapping Origin(s)
-[origins]
- ### The default origin
- [origins.default]
-
- # origin_url defines the URL of the origin. Default is http://prometheus:9090
- origin_url = 'http://prometheus:9090'
-
- # timeout_secs defines how many seconds Trickster will wait before aborting and upstream http request. Default: 180s
- # timeout_secs = 180
-
- # api path defines the path of the Prometheus API (usually '/api/v1')
- api_path = '/api/v1'
-
- # ignore_no_cache_header disables a client's ability to send a no-cache to refresh a cached query. Default is false
- # ignore_no_cache_header = false
-
- # max_value_age_secs defines the maximum age of specific datapoints in seconds. Default is 86400 (24 hours)
- max_value_age_secs = 86400
-
- # fast_forward_disable, when set to true, will turn off the 'fast forward' feature for any requests proxied to this origin
- # fast_forward_disable = false
-
- # For multi-origin support, origins are named, and the name is the second word of the configuration section name.
- # In this example, an origin is named "foo". Clients can indicate this origin in their path (http://trickster.example.com:9090/foo/query_range?.....)
- # there are other ways for clients to indicate which origin to use in a multi-origin setup. See the documentation for more information
-
- # [origins.foo]
- # origin_url = 'http://prometheus-foo:9090'
- # api_path = '/api/v1'
- # default_step = 300
- # ignore_no_cache_header = false
- # max_value_age_secs = 86400
- # timeout_secs = 180
-
-# Configuration Options for Metrics Instrumentation
-[metrics]
-# listen_port defines the port that Trickster's metrics server listens on at /metrics
-listen_port = 8082
-# listen_address defines the ip that Trickster's metrics server listens on at /metrics
-# empty by default, listening on all interfaces
-# listen_address =
-
-# Configuration Options for Profiler
-[profiler]
-# enabled indicates whether to start the profiler server when Trickster starts up. Default: false
-# enabled = false
-# listen_port defines the port that Trickster's profiler server listens on at /debug/pprof. Default: 6060
-# listen_port = 6060
-
-# Configuration Options for Logging Instrumentation
-[logging]
-# log_level defines the verbosity of the logger. Possible values are 'debug', 'info', 'warn', 'error'
-# default is info
-log_level = 'info'
-
-# log_file defines the file location to store logs. These will be auto-rolled and maintained for you.
-# not specifying a log_file (this is the default behavior) will print logs to STDOUT
-# log_file = '/some/path/to/trickster.log'
-
-# Configuration options for the TLS
-[tls]
-# enabled indecates whether to start Trickster's Proxy server using tls. Default: false
-# enabled = false
-# full_chain_cert_path defines the location of the concat file of the server certification and the intermediate certification for the tls endpoint.
-# full_chain_cert_path = ''
-# private_key_path defines the location of the private key file for the tls endpoint.
-# private_key_path = ''
diff --git a/config.go b/config.go
deleted file mode 100644
index 58cbf90e2..000000000
--- a/config.go
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import "github.com/BurntSushi/toml"
-
-// Config is the main configuration object
-type Config struct {
- Caching CachingConfig `toml:"cache"`
- DefaultOriginURL string // to capture a CLI origin url
- Logging LoggingConfig `toml:"logging"`
- Main GeneralConfig `toml:"main"`
- Metrics MetricsConfig `toml:"metrics"`
- Profiler ProfilerConfig `toml:"profiler"`
- Origins map[string]PrometheusOriginConfig `toml:"origins"`
- ProxyServer ProxyServerConfig `toml:"proxy_server"`
- TLS TLSConfig `toml:"tls"`
-}
-
-// GeneralConfig is a collection of general configuration values.
-type GeneralConfig struct {
- // InstanceID represents a unique ID for the current instance, when multiple instances on the same host
- InstanceID int `toml:"instance_id"`
- // Environment indicates the operating environment of the running instance (e.g., "dev", "stage", "prod")
- Environment string
- // ConfigFile represents the physical filepath to the Trickster Configuration
- ConfigFile string
- // Hostname is populated with the self-resolved Hostname where the instance is running
- Hostname string
-}
-
-// ProxyServerConfig is a collection of configurations for the main http listener for the application
-type ProxyServerConfig struct {
- // ListenAddress is IP address for the main http listener for the application
- ListenAddress string `toml:"listen_address"`
- // ListenPort is TCP Port for the main http listener for the application
- ListenPort int `toml:"listen_port"`
-}
-
-// CachingConfig is a collection of defining the Trickster Caching Behavior
-type CachingConfig struct {
- // CacheType represents the type of cache that we wish to use: "boltdb", "memory", "filesystem", or "redis"
- CacheType string `toml:"cache_type"`
- RecordTTLSecs int64 `toml:"record_ttl_secs"`
- Redis RedisCacheConfig `toml:"redis"`
- Filesystem FilesystemCacheConfig `toml:"filesystem"`
- ReapSleepMS int64 `toml:"reap_sleep_ms"`
- Compression bool `toml:"compression"`
- BoltDB BoltDBCacheConfig `toml:"boltdb"`
-}
-
-// RedisCacheConfig is a collection of Configurations for Connecting to Redis
-type RedisCacheConfig struct {
- // Protocol represents the connection method (e.g., "tcp", "unix", etc.)
- Protocol string `toml:"protocol"`
- // Endpoint represents FQDN:port or IPAddress:Port of the Redis server
- Endpoint string `toml:"endpoint"`
- // Password can be set when using password protected redis instance.
- Password string `toml:"password"`
-}
-
-// BoltDBCacheConfig is a collection of Configurations for storing cached data on the Filesystem
-type BoltDBCacheConfig struct {
- // Filename represents the filename (including path) of the BotlDB database
- Filename string `toml:"filename"`
- // Bucket represents the name of the bucket within BoltDB under which Trickster's keys will be stored.
- Bucket string `toml:"bucket"`
-}
-
-// FilesystemCacheConfig is a collection of Configurations for storing cached data on the Filesystem
-type FilesystemCacheConfig struct {
- // CachePath represents the path on disk where our cache will live
- CachePath string `toml:"cache_path"`
-}
-
-// PrometheusOriginConfig is a collection of configurations for prometheus origins proxied by Trickster
-// You can override these on a per-request basis with url-params
-type PrometheusOriginConfig struct {
- OriginURL string `toml:"origin_url"`
- APIPath string `toml:"api_path"`
- IgnoreNoCacheHeader bool `toml:"ignore_no_cache_header"`
- MaxValueAgeSecs int64 `toml:"max_value_age_secs"`
- FastForwardDisable bool `toml:"fast_forward_disable"`
- NoCacheLastDataSecs int64 `toml:"no_cache_last_data_secs"`
- TimeoutSecs int64 `toml:"timeout_secs"`
-}
-
-// MetricsConfig is a collection of Metrics Collection configurations
-type MetricsConfig struct {
- // ListenAddress is IP address from which the Application Metrics are available for pulling at /metrics
- ListenAddress string `toml:"listen_address"`
- // ListenPort is TCP Port from which the Application Metrics are available for pulling at /metrics
- ListenPort int `toml:"listen_port"`
-}
-
-// ProfilerConfig is a collection of pprof profiling configurations
-type ProfilerConfig struct {
- // Enabled specifies whether or not the pprof endpoint should be exposed
- Enabled bool `toml:"enabled"`
- // ListenPort is TCP Port from which the Profiler data is available at /debug/pprof
- ListenPort int `toml:"listen_port"`
-}
-
-// LoggingConfig is a collection of Logging configurations
-type LoggingConfig struct {
- // LogFile provides the filepath to the instances's logfile. Set as empty string to Log to Console
- LogFile string `toml:"log_file"`
- // LogLevel provides the most granular level (e.g., DEBUG, INFO, ERROR) to log
- LogLevel string `toml:"log_level"`
-}
-
-// TLSConfig is a collection of TLS configurations for the main http listenr for the application
-type TLSConfig struct {
- // Enabled specifies whether or not the tls endpoint should be exposed
- Enabled bool `toml:"enabled"`
- // FullChainCertPath specifies the path of the concat file of the server certification and the intermediate certification for the tls endpoint
- FullChainCertPath string `toml:"full_chain_cert_path"`
- // PrivateKeyPath specifies the path of the private key file for the tls endpoint
- PrivateKeyPath string `toml:"private_key_path"`
-}
-
-// NewConfig returns a Config initialized with default values.
-func NewConfig() *Config {
-
- defaultCachePath := "/tmp/trickster"
- defaultBoltDBFile := "trickster.db"
-
- return &Config{
- Caching: CachingConfig{
-
- CacheType: ctMemory,
- RecordTTLSecs: 21600,
-
- Redis: RedisCacheConfig{Protocol: "tcp", Endpoint: "redis:6379"},
- Filesystem: FilesystemCacheConfig{CachePath: defaultCachePath},
- BoltDB: BoltDBCacheConfig{Filename: defaultBoltDBFile, Bucket: "trickster"},
-
- ReapSleepMS: 1000,
- Compression: true,
- },
- Logging: LoggingConfig{
- LogFile: "",
- LogLevel: "INFO",
- },
- Main: GeneralConfig{
- ConfigFile: "/etc/trickster/trickster.conf",
- Hostname: "localhost.unknown",
- },
- Metrics: MetricsConfig{
- ListenPort: 8082,
- },
- Profiler: ProfilerConfig{
- ListenPort: 6060,
- Enabled: false,
- },
- Origins: map[string]PrometheusOriginConfig{
- "default": defaultOriginConfig(),
- },
- ProxyServer: ProxyServerConfig{
- ListenPort: 9090,
- },
- TLS: TLSConfig{
- Enabled: false,
- FullChainCertPath: "",
- PrivateKeyPath: "",
- },
- }
-}
-
-func defaultOriginConfig() PrometheusOriginConfig {
- return PrometheusOriginConfig{
- OriginURL: "http://prometheus:9090/",
- APIPath: prometheusAPIv1Path,
- IgnoreNoCacheHeader: true,
- MaxValueAgeSecs: 86400, // Keep datapoints up to 24 hours old
- TimeoutSecs: 180,
- }
-}
-
-// LoadFile loads application configuration from a TOML-formatted file.
-func (c *Config) LoadFile(path string) error {
- _, err := toml.DecodeFile(path, &c)
- return err
-}
diff --git a/deploy/Dockerfile b/deploy/Dockerfile
index 4180c76a0..0988d4be4 100644
--- a/deploy/Dockerfile
+++ b/deploy/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.11.5 as builder
+FROM golang:1.13.6 as builder
COPY . /go/src/github.com/Comcast/trickster
WORKDIR /go/src/github.com/Comcast/trickster
@@ -6,16 +6,15 @@ WORKDIR /go/src/github.com/Comcast/trickster
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 make build
-FROM alpine:3.9
+FROM alpine:3.11.2
LABEL maintainer "The Trickster Authors "
-COPY --from=builder /go/src/github.com/Comcast/trickster/trickster /usr/local/bin/trickster
-COPY conf/example.conf /etc/trickster/trickster.conf
+COPY --from=builder /go/src/github.com/Comcast/trickster/OPATH/trickster /usr/local/bin/trickster
+COPY cmd/trickster/conf/example.conf /etc/trickster/trickster.conf
RUN chown nobody /usr/local/bin/trickster
RUN chmod +x /usr/local/bin/trickster
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
-EXPOSE 9090 8082
USER nobody
ENTRYPOINT ["trickster"]
diff --git a/deploy/README.md b/deploy/README.md
index 081df0f76..b1bac1b18 100644
--- a/deploy/README.md
+++ b/deploy/README.md
@@ -12,21 +12,21 @@ If you are wanting to use Helm and kubernetes rbac use the following install ste
#### Bootstrap Local Kubernetes-Helm Dev
-- Install [Helm](helm.sh) **Client Version 2.8.2**
+- Install [Helm](helm.sh) **Client Version 2.9.1**
```
brew install kubernetes-helm
```
-- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) **client server 1.9.4, client version 1.9.4**
+- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) **client server 1.13.4, client version 1.13.4**
```
- curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.9.4/bin/darwin/amd64/kubectl
+ curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.13.4/bin/darwin/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
```
-- Install [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) **version 0.25.0**
+- Install [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) **version 0.35.0**
```
- curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.25.2/minikube-darwin-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/
+ curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.23.2/minikube-darwin-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/
```
-
+
- Start minikube and enable RBAC `make start-minikube` or manually with `--extra-config=apiserver.Authorization.Mode=RBAC --kubernetes-version=v1.8.0`.
- Install Tiller `make bootstrap-peripherals`
- Wait until Tiller is running `kubectl get po --namespace trickster -w`
@@ -55,7 +55,7 @@ For pure kubernetes deployment use the `deploy/kube` directory.
```
brew cask install https://raw.githubusercontent.com/caskroom/homebrew-cask/903f1507e1aeea7fc826c6520a8403b4076ed6f4/Casks/minikube.rb
```
-
+
- Start minikube `make start-minikube` or manually with `minikube start`.
- Deploy all K8 artifacts `make bootstrap-trickster-dev`
diff --git a/deploy/helm/Makefile b/deploy/helm/Makefile
index 5b9371410..33ed58090 100644
--- a/deploy/helm/Makefile
+++ b/deploy/helm/Makefile
@@ -1,14 +1,14 @@
KUBE_DIR ?= kube-artifacts
-install-tiller-dev:
+install-tiller-dev:
kubectl config use-context minikube
-kubectl create ns trickster
kubectl config set-context minikube --namespace=trickster
-kubectl --namespace trickster create -f tiller/templates/serviceaccount.yaml
-kubectl --namespace trickster create -f tiller/templates/role.yaml
- -helm init --service-account tiller --tiller-image gcr.io/kubernetes-helm/tiller:v2.8.2 --tiller-namespace trickster
-
+ -helm init --service-account tiller --tiller-image gcr.io/kubernetes-helm/tiller:v2.9.1 --tiller-namespace trickster
+
bootstrap-peripherals: install-tiller-dev
kubectl config use-context minikube
-kubectl create -f $(KUBE_DIR)/compute-quota.yaml -n trickster
@@ -19,19 +19,17 @@ bootstrap-trickster-dev:
--install \
--namespace=trickster \
--tiller-namespace=trickster
-
-update-dev-chart: update-trickster
+
+update-dev-chart:
kubectl config use-context minikube
- helm upgrade dev trickster --namespace=trickster --tiller-namespace=trickster --reuse-values
+ helm upgrade dev trickster --namespace=trickster --tiller-namespace=trickster
start-minikube:
minikube start \
--memory 2048 \
- --cpus 2 \
- --extra-config=apiserver.Authorization.Mode=RBAC \
- --kubernetes-version=v1.9.4
+ --cpus 2
-kubectl config set-context minikube --namespace=trickster
-
+
delete:
kubectl config use-context minikube
helm delete --purge dev --tiller-namespace=trickster
diff --git a/deploy/helm/trickster/Chart.yaml b/deploy/helm/trickster/Chart.yaml
index d9bd91d3a..b1290af76 100644
--- a/deploy/helm/trickster/Chart.yaml
+++ b/deploy/helm/trickster/Chart.yaml
@@ -1,7 +1,8 @@
-name: trickster
-version: 1.1.5
-appVersion: 0.1.10
+apiVersion: v1
+appVersion: 1.0.9
description: Trickster is a reverse proxy cache for the Prometheus HTTP API that dramatically accelerates chart rendering times for any series queried from Prometheus.
+name: trickster
+version: 1.3.0
home: https://github.com/comcast/trickster
icon: https://github.com/Comcast/trickster/blob/master/docs/images/logos/trickster-horizontal-sm.png?raw=true
sources:
diff --git a/deploy/helm/trickster/README.md b/deploy/helm/trickster/README.md
index d2b30ccd4..1d32cd2ea 100644
--- a/deploy/helm/trickster/README.md
+++ b/deploy/helm/trickster/README.md
@@ -12,50 +12,76 @@ The following table lists the configurable parameters of the trickster chart and
Parameter | Description | Default
--- | --- | ---
-`config.originURL` | Default trickster originURL, references a source Prometheus instance | `http://prometheus:9090`
-`config.cache.type` | The cache_type to use. {boltdb, filesystem, memory, redis} | `memory`
-`config.cache.redis.protocol` | The protocol for connecting to redis ('unix' or 'tcp') | `tcp`
-`config.cache.redis.endpoint` | The fqdn+port or path to a unix socket file for connecting to redis | `redis:6379`
-`config.cache.filesystem.path` | The directory location under which the Trickster filesystem cache will be maintained | `/tmp/trickster`
-`config.cache.boltdb.file` | The filename of the BoltDB database | `db`
-`config.cache.boltdb.bucket` | The name of the BoltDB bucket | `trickster`
-`config.recordTTLSecs` | The relative expiration of cached queries. default is 6 hours (21600 seconds) | `21600`
-`config.defaultStep` | The step (in seconds) of a query_range request if one is not provided by the client. This helps to correct improperly formed client requests. | `300`
-`config.maxValueAgeSecs` | The maximum age of specific datapoints in seconds. Default is 86400 (24 hours). | `86400`
-`config.fastForwardDisable` | Whether to disable fastforwarding (partial step to get latest data). | `false`
-`config.logLevel` | The verbosity of the logger. Possible values are 'debug', 'info', 'warn', 'error'. | `info`
-`name` | trickster container name | `trickster`
-`image.repository` | trickster container image repository | `tricksterio/trickster`
-`image.tag` | trickster container image tag | `0.1.7`
-`image.pullPolicy` | trickster container image pull policy | `IfNotPresent`
-`extraArgs` | Additional trickster container arguments | `{}`
-`ingress.enabled` | If true, trickster Ingress will be created | `false`
-`ingress.annotations` | trickster Ingress annotations | `{}`
-`ingress.extraLabels` | trickster Ingress additional labels | `{}`
-`ingress.hosts` | trickster Ingress hostnames | `[]`
-`ingress.tls` | trickster Ingress TLS configuration (YAML) | `[]`
-`nodeSelector` | node labels for trickster pod assignment | `{}`
-`tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
-`affinity` | pod affinity | `{}`
-`schedulerName` | trickster alternate scheduler name | `nil`
-`persistentVolume.enabled` | If true, trickster will create a Persistent Volume Claim | `true`
-`persistentVolume.accessModes` | trickster data Persistent Volume access modes | `[ReadWriteOnce]`
-`persistentVolume.annotations` | Annotations for trickster Persistent Volume Claim | `{}`
-`persistentVolume.existingClaim` | trickster data Persistent Volume existing claim name | `""`
-`persistentVolume.mountPath` | trickster data Persistent Volume mount root path | `/tmp/trickster`
-`persistentVolume.size` | trickster data Persistent Volume size | `15Gi`
-`persistentVolume.storageClass` | trickster data Persistent Volume Storage Class | `unset`
-`podAnnotations` | annotations to be added to trickster pods | `{}`
-`replicaCount` | desired number of trickster pods | `1`
-`statefulSet.enabled` | If true, use a statefulset instead of a deployment for pod management | `false`
-`priorityClassName` | trickster priorityClassName | `nil`
-`resources` | trickster pod resource requests & limits | `{}`
-`securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for trickster containers | `{}`
-`service.annotations` | annotations for trickster service | `{}`
-`service.clusterIP` | internal trickster cluster service IP | `""`
-`service.externalIPs` | trickster service external IP addresses | `[]`
-`service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""`
-`service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]`
-`service.metricsPort` | trickster service port | `8080`
-`service.servicePort` | trickster service port | `9090`
-`service.type` | type of trickster service to create | `ClusterIP`
+`affinity` | Node/Pod affinities | `{}`
+`image.repository` | Image | `hub.docker.com/tricksterio/trickster`
+`image.tag` | Image tag | `1.0.1`
+`image.pullPolicy` | Image pull policy | `IfNotPresent`
+`ingress.enabled` | If true, Trickster Ingress will be created | `false`
+`ingress.annotations` | Annotations for Trickster Ingress` | `{}`
+`ingress.fqdn` | Trickster Ingress fully-qualified domain name | `""`
+`ingress.tls` | TLS configuration for Trickster Ingress | `[]`
+`nodeSelector` | Node labels for pod assignment | `{}`
+`originURL` | Default trickster originURL, references a source Prometheus instance | `http://prometheus:9090`
+`caches.name` | Name of the cache to be defined | `default`
+`caches.type` | The cache_type to use. {boltdb, filesystem, memory, redis} | `memory`
+`caches.compression` | Boolean to compress the cache | `true`
+`caches.timeSeriesTTLSecs` | The relative expiration of cached timeseries | `21600`
+`caches.fastForwardTTLSecs` | The relative expiration of cached fast forward data | `15`
+`caches.objectTTLSecs` | The relative expiration of generically cached (non-timeseries) objects | `30`
+`caches.index.reapIntervalSecs` | How long the Cache Index reaper sleeps between reap cycles | `3`
+`caches.index.flushIntervalSecs` | How often the Cache Index saves its metadata to the cache from application memory | `5`
+`caches.index.maxSizeBytes` | How large the cache can grow in bytes before the Index evicts least-recently-accessed items | `536870912`
+`caches.index.maxSizeBackoffBytes` | How far below max_size_bytes the cache size must be to complete a byte-size-based eviction exercise | `16777216`
+`caches.index.maxSizeObjects` | How large the cache can grow in objects before the Index evicts least-recently-accessed items | `0`
+`caches.index.maxSizeBackoffObjects` | How far under max_size_objects the cache size must be to complete object-size-based eviction exercise | `100`
+`caches.redis.clientType` | redis architecture to use ('standard', 'cluster', or 'sentinel') | `standard`
+`caches.redis.protocol` | The protocol for connecting to redis ('unix' or 'tcp') | `tcp`
+`caches.redis.endpoint` | The fqdn+port or path to a unix socket file for connecting to redis | `redis:6379`
+`caches.redis.endpoints` | Used for Redis Cluster and Redis Sentinel to define a list of endpoints | `['redis:6379']`
+`caches.redis.password` | Password provides the redis password | `''`
+`caches.redis.sentinelMaster` | Should be set when using Redis Sentinel to indicate the Master Node | `''`
+`caches.redis.db` | The Database to be selected after connecting to the server | `"0"`
+`caches.redis.maxRetries` | The maximum number of retries before giving up on the command | `"0"`
+`caches.redis.minRetryBackoffMs` | The minimum backoff time between each retry | `"8"`
+`caches.redis.maxRetyBackoffMs` | The maximum backoff time between each retry | `"512"`
+`caches.redis.dialTimeoutMs` | The timeout for establishing new connections | `"5000"`
+`caches.redis.readTimeoutMs` | The timeout for socket reads. If reached, commands will fail with a timeout instead of blocking | `"3000"`
+`caches.redis.writeTimeoutMs` | The timeout for socket writes. If reached, commands will fail with a timeout instead of blocking | `"3000"`
+`caches.redis.poolSize` | The maximum number of socket connections | `"20"`
+`caches.redis.minIdleConns` | The minimum number of idle connections which is useful when establishing new connection is slow | `"0"`
+`caches.redis.maxConnAgeMs` | The connection age at which client retires (closes) the connection | `"0"`
+`caches.redis.poolTimeoutMs` | The amount of time client waits for connection if all connections are busy before returning an error | `"4000"`
+`caches.redis.idleTimeoutMs` | The amount of time after which client closes idle connections | `"300000"`
+`caches.redis.idleCheckFrequencyMs` | The frequency of idle checks made by idle connections reaper | `"60000"`
+`caches.filesystem.path` | The directory location under which the Trickster filesystem cache will be maintained | `/tmp/trickster`
+`caches.boltdb.file` | The filename of the BoltDB database | `trickster.db`
+`caches.boltdb.bucket` | The name of the BoltDB bucket | `trickster`
+`origins.name` | Identifies the name of the cache (configured above) that you want to use with this origin proxy. | `default`
+`origins.isDefault` | Describes whether this origin is the default origin considered when routing http requests | `true`
+`origins.type` | The origin type. Valid options are 'prometheus', 'influxdb', 'irondb' | `prometheus`
+`origins.scheme` | The scheme | `http`
+`origins.host` | The upstream origin by fqdn/IP and port | `'prometheus:9090'`
+`origins.pathPrefix` | Provides any path that is prefixed onto the front of the client's requested path | `''`
+`origins.timeoutSecs` | Defines how many seconds Trickster will wait before aborting and upstream http request | `"180`
+`origins.keepAliveTimeoutSecs` | Defines how long Trickster will wait before closing a keep-alive connection due to inactivity | `"300"`
+`origins.maxIdleConns` | The maximum concurrent keep-alive connections Trickster may have opened to this origin | `"20"`
+`origins.apiPath` | The path of the Upstream Origin's API | `/api/v1`
+`origins.ignoreNoCacheHeader` | Disables a client's ability to send a no-cache to refresh a cached query | `false`
+`origins.timeseriesRetentionFactor` | The maximum number of recent timestamps to cache for a given query | `"1024"`
+`origins.timeseriesEvictionMethod` | The metholodogy used to determine which timestamps are removed once the timeseries_retention_factor limit is reached ('oldest', 'lru') | `"oldest"`
+`origins.fastForwardDisable` | When set to true, will turn off the 'fast forward' feature for any requests proxied to this origin | `false`
+`origins.backfillToleranceSecs` | Prevents new datapoints that fall within the tolerance window (relative to time.Now) from being cached | `"0"`
+`logLevel` | The verbosity of the logger. Possible values are 'debug', 'info', 'warn', 'error'. | `info`
+`replicaCount` | Number of trickster replicas desired | `1`
+`resources` | Pod resource requests & limits | `{}`
+`service.annotations` | Annotations to be added to the Trickster Service | `{}`
+`service.clusterIP` | Cluster-internal IP address for Trickster Service | `""`
+`service.externalIPs` | List of external IP addresses at which the Trickster Service will be available | `[]`
+`service.loadBalancerIP` | External IP address to assign to Trickster Service | `""`
+`service.loadBalancerSourceRanges` | List of client IPs allowed to access Trickster Service | `[]`
+`service.metricsPort` | Port used for exporting Trickster metrics | `8080`
+`service.nodePort` | Port to expose Trickster Service on each node | ``
+`service.metricsNodePort` | Port to expose Trickster Service metrics on each node | ``
+`service.port` | Trickster's Service port | `9090`
+`service.type` | Trickster Service type | `ClusterIP`
+`tolerations` | Tolerations for pod assignment | `[]`
diff --git a/deploy/helm/trickster/templates/configmap.yaml b/deploy/helm/trickster/templates/configmap.yaml
index cb2103114..75a74d51a 100644
--- a/deploy/helm/trickster/templates/configmap.yaml
+++ b/deploy/helm/trickster/templates/configmap.yaml
@@ -7,105 +7,363 @@ metadata:
data:
trickster.conf: |-
[main]
+ {{- if .Values.configHandlerPath }}
+ config_handler_path = {{ .Values.configHandlerPath | quote }}
+ {{- end }}
+ {{- if .Values.pingHandlerPath }}
+ ping_handler_path = {{ .Values.pingHandlerPath | quote }}
+ {{- end }}
- # instance_id allows you to run multiple trickster processes on the same host and log to separate files
- # Useful for baremetal, not so much for elastic deployments, so only uncomment if you really need it
- #instance_id = 1
+ [frontend]
+ listen_port = {{ .Values.service.port }}
+ {{- if .Values.frontend }}
+ {{- if .Values.frontend.listenAddress }}
+ listen_address = {{ .Values.frontend.listenAddress | quote }}
+ {{- end }}
+ {{- if .Values.frontend.tlsListenPort }}
+ tls_listen_port = {{ .Values.frontend.tlsListenPort }}
+ {{- end }}
+ {{- if .Values.frontend.tlsListenAddress }}
+ tls_listen_address = {{ .Values.frontend.tlsListenAddress | quote }}
+ {{- end }}
+ {{- if .Values.frontend.connectionsLimit }}
+ connections_limit = {{ .Values.frontend.connectionsLimit }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.caches }}
+ {{- if gt (len .Values.caches) 0 }}
- # Configuration options for the Proxy Server
- [proxy_server]
+ [caches]
+ {{- range .Values.caches }}
- # listen_port defines the port on which Trickster's Proxy server listens.
- # since this is a proxy for Prometheus, we use 9090 by default, just like Prometheus does
- listen_port = {{ .Values.service.servicePort }}
+ {{ printf "[caches.%s]" .name }}
+ {{- if .cacheType }}
+ cache_type = {{ .cacheType | quote }}
+ {{- end }}
+ {{- if and (ne .cacheType "redis") (ne .cacheType "badger") }}
+ {{- if .index }}
+ {{ printf "[caches.%s.index]" .name }}
+ {{- if .index.reapIntervalSecs }}
+ reap_interval_secs = {{ .index.reapIntervalSecs }}
+ {{- end }}
+ {{- if .index.flushIntervalSecs }}
+ flush_interval_secs = {{ .index.flushIntervalSecs }}
+ {{- end }}
+ {{- if .index.maxSizeBytes }}
+ max_size_bytes = {{ .index.maxSizeBytes }}
+ {{- end }}
+ {{- if .index.maxSizeBackoffBytes }}
+ max_size_backoff_bytes = {{ .index.maxSizeBackoffBytes }}
+ {{- end }}
+ {{- if .index.maxSizeObjects }}
+ max_size_objects = {{ .index.maxSizeObjects }}
+ {{- end }}
+ {{- if .index.maxSizeBackoffObjects }}
+ max_size_backoff_objects = {{ .index.maxSizeBackoffObjects }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if and (eq .cacheType "redis") ( .redis ) }}
- [cache]
- # cache_type defines what kind of cache Trickster uses
- # options are 'boltdb', 'filesystem', 'memory', and 'redis'. 'memory' is the default
- cache_type = {{ .Values.config.cache.type | quote }}
-
- # record_ttl_secs defines the relative expiration of cached queries. default is 6 hours (21600 seconds)
- record_ttl_secs = {{ .Values.config.recordTTLSecs }}
-
- {{- if eq .Values.config.cache.type "redis" }}
- # Configuration options when using a Redis Cache
- [cache.redis]
-
- # protocol defines the protocol for connecting to redis ('unix' or 'tcp') 'tcp' is default
- protocol = {{ .Values.config.cache.redis.protocol | quote }}
-
- # endpoint defines the fqdn+port or path to a unix socket file for connecting to redis
- # default is 'redis:6379'
- endpoint = {{ .Values.config.cache.redis.endpoint | quote }}
-
- {{- else if eq .Values.config.cache.type "filesystem" }}
- # Configuration options when using a Filesystem Cache
- [cache.filesystem]
+ {{ printf "[caches.%s.redis]" .name }}
+ {{- if .redis.clientType }}
+ client_type = {{ .redis.clientType | quote }}
+ {{- end }}
+ {{- if .redis.protocol }}
+ protocol = {{ .redis.protocol | quote }}
+ {{- end }}
+ {{- if .redis.password }}
+ password = {{ .redis.password | quote }}
+ {{- end }}
+ {{- if or (eq .redis.clientType "cluster") (eq .redis.clientType "sentinel") }}
+ {{- if .redis.endpoints }}
+ endpoints = [ '{{- join "', '" .redis.endpoints }}' ]
+ {{- end }}
+ {{- if eq .redis.clientType "sentinel" }}
+ {{- if .redis.sentinelMaster }}
+ sentinel_master = {{ .redis.sentinelMaster | quote }}
+ {{- end }}
+ {{- end }}
+ {{- else }}
+ {{- if .redis.endpoint }}
+ endpoint = {{ .redis.endpoint | quote }}
+ {{- end }}
+ {{- end }}
+ {{- if .redis.db }}
+ db = {{ .redis.db }}
+ {{- end }}
+ {{- if .redis.maxRetries }}
+ max_retries = {{ .redis.maxRetries }}
+ {{- end }}
+ {{- if .redis.minRetryBackoffMs }}
+ min_retry_backoff_ms = {{ .redis.minRetryBackoffMs }}
+ {{- end }}
+ {{- if .redis.maxRetyBackoffMs }}
+ max_retry_backoff_ms = {{ .redis.maxRetyBackoffMs }}
+ {{- end }}
+ {{- if .redis.dialTimeoutMs }}
+ dial_timeout_ms = {{ .redis.dialTimeoutMs }}
+ {{- end }}
+ {{- if .redis.readTimeoutMs }}
+ read_timeout_ms = {{ .redis.readTimeoutMs }}
+ {{- end }}
+ {{- if .redis.writeTimeoutMs }}
+ write_timeout_ms = {{ .redis.writeTimeoutMs }}
+ {{- end }}
+ {{- if .redis.poolSize }}
+ pool_size = {{ .redis.poolSize }}
+ {{- end }}
+ {{- if .redis.minIdleConns }}
+ min_idle_conns = {{ .redis.minIdleConns }}
+ {{- end }}
+ {{- if .redis.maxConnAgeMs }}
+ max_conn_age_ms = {{ .redis.maxConnAgeMs }}
+ {{- end }}
+ {{- if .redis.poolTimeoutMs }}
+ pool_timeout_ms = {{ .redis.poolTimeoutMs }}
+ {{- end }}
+ {{- if .redis.idleTimeoutMs }}
+ idle_timeout_ms = {{ .redis.idleTimeoutMs }}
+ {{- end }}
+ {{- if .redis.idleCheckFrequencyMs }}
+ idle_check_frequency_ms = {{ .redis.idleCheckFrequencyMs }}
+ {{- end }}
+ {{- else if and (eq .cacheType "filesystem") ( .filesystem ) }}
- # cache_path defines the directory location under which the Trickster cache will be maintained
- # default is '/tmp/trickster'
- cache_path = {{ .Values.config.cache.filesystem.mountPath | quote }}
+ {{ printf "[caches.%s.filesystem]" .name }}
+ {{- if .filesystem.path }}
+ cache_path = {{ .filesystem.path | quote }}
+ {{- end }}
+ {{- else if and (eq .cacheType "bbolt") ( .bbolt ) }}
- {{- else if eq .Values.config.cache.type "boltdb" }}
- # Configuration options when using a BoltDb Cache
- [cache.boltdb]
- # filename defines the file where the Trickster cache will be maintained
- # default is 'trickster.db'
- filename = {{ .Values.config.cache.boltdb.file | quote }}
+ {{ printf "[caches.%s.bbolt]" .name }}
+ {{- if .bbolt.file }}
+ filename = {{ .bbolt.file | quote }}
+ {{- end }}
+ {{- if .bbolt.bucket }}
+ bucket = {{ .bbolt.bucket | quote }}
+ {{- end }}
+ {{- else if and (eq .cacheType "badger") ( .badger ) }}
- # bucket defines the name of the BotlDb bucket (similar to a namespace) under which our key value store lives
- # default is 'trickster'
- bucket = {{ .Values.config.cache.boltdb.bucket | quote }}
- {{- end }}
+ {{ printf "[caches.%s.badger]" .name }}
+ {{- if .badger.directory }}
+ directory = {{ .badger.directory | quote }}
+ {{- end }}
+ {{- if .badger.valueDirectory }}
+ value_directory = {{ .badger.valueDirectory | quote }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.origins }}
+ {{- if gt (len .Values.origins) 0 }}
- # Configuration options for mapping Origin(s)
[origins]
+ {{- range .Values.origins }}
+ {{- $origin := .name }}
- # The default origin
- [origins.default]
+ {{ printf "[origins.%s]" .name }}
+ {{- if .originType }}
+ origin_type = {{ .originType | quote }}
+ {{- end }}
+ {{- if .originURL }}
+ origin_url = {{ .originURL | quote }}
+ {{- end }}
+ {{- if .isDefault }}
+ is_default = {{ .isDefault }}
+ {{- end }}
+ {{- if .cacheName }}
+ cache_name = {{ .cacheName | quote }}
+ {{- end }}
+ {{- if .cacheKeyPrefix }}
+ cache_key_prefix = {{ .cacheKeyPrefix | quote }}
+ {{- end }}
+ {{- if .negativeCacheName }}
+ negative_cache_name = {{ .negativeCacheName | quote }}
+ {{- end }}
+ {{- if .dearticulateUpstreamRanges }}
+ dearticulate_upstream_ranges = {{ .dearticulateUpstreamRanges }}
+ {{- end }}
+ {{- if .multipartRangesDisabled }}
+ multipart_ranges_disabled = {{ .multipartRangesDisabled }}
+ {{- end }}
+ {{- if .compressableTypes }}
+ compressable_types = [ '{{- join "', '" .compressableTypes }}' ]
+ {{- end }}
+ {{- if .timeoutSecs }}
+ timeout_secs = {{ .timeoutSecs }}
+ {{- end }}
+ {{- if .keepAliveTimeoutSecs }}
+ keep_alive_timeout_secs = {{ .keepAliveTimeoutSecs }}
+ {{- end }}
+ {{- if .maxIdleConns }}
+ max_idle_conns = {{ .maxIdleConns }}
+ {{- end }}
+ {{- if .backfillToleranceSecs }}
+ backfill_tolerance_secs = {{ .backfillToleranceSecs }}
+ {{- end }}
+ {{- if .timeseriesRetentionFactor }}
+ timeseries_retention_factor = {{ .timeseriesRetentionFactor }}
+ {{- end }}
+ {{- if .timeseriesTTLSecs }}
+ timeseries_ttl_secs = {{ .timeseriesTTLSecs }}
+ {{- end }}
+ {{- if .timeseriesEvictionMethod }}
+ timeseries_eviction_method = {{ .timeseriesEvictionMethod | quote}}
+ {{- end }}
+ {{- if .fastForwardDisable }}
+ fast_forward_disable = {{ .fastForwardDisable }}
+ {{- end }}
+ {{- if .fastforwardTTLSecs }}
+ fastforward_ttl_secs = {{ .fastforwardTTLSecs }}
+ {{- end }}
+ {{- if .maxTTLSecs }}
+ max_ttl_secs = {{ .maxTTLSecs }}
+ {{- end }}
+ {{- if .revalidationFactor }}
+ revalidation_factor = {{ .revalidationFactor }}
+ {{- end }}
+ {{- if .maxObjectByteSize }}
+ max_object_byte_size = {{ .maxObjectByteSize }}
+ {{- end }}
+ {{- if .hcUpstreamURL }}
+ health_check_upstream_url = {{ .hcUpstreamURL | quote }}
+ {{- end }}
+ {{- if .hcVerb }}
+ health_check_verb = {{ .hcVerb | quote }}
+ {{- end }}
+ {{- if .hcQuery }}
+ health_check_query = {{ .hcQuery | quote }}
+ {{- end }}
+ {{- if .hcHeaders }}
- # origin_url defines the URL of the origin. Default is http://prometheus:9090
- origin_url = {{ .Values.config.originURL | quote }}
+ {{ printf "[origins.%s.paths.%s.health_check_headers]" $origin .name }}
+ {{- range $key, $val := .hcHeaders }}
+ {{ printf "'%s' = '%s'" $key $val }}
+ {{- end }}
+ {{- end }}
+ {{- if .tls }}
- # api path defines the path of the Prometheus API (usually '/api/v1')
- api_path = '/api/v1'
+ {{ printf "[origins.%s.tls]" .name }}
+ {{- if .tls.fullChainCertPath }}
+ full_chain_cert_path = {{ .tls.fullChainCertPath | quote }}
+ {{- end }}
+ {{- if .tls.privateKeyPath }}
+ private_key_path = {{ .tls.privateKeyPath | quote }}
+ {{- end }}
+ {{- if .tls.insecureSkipVerify }}
+ insecure_skip_verify = {{ .tls.insecureSkipVerify }}
+ {{- end }}
+ {{- if .tls.certificateAuthorityPaths }}
+ certificate_authority_paths = [ '{{- join "', '" .tls.certificateAuthorityPaths }}' ]
+ {{- end }}
+ {{- if .tls.clientCertPath }}
+ client_cert_path = {{ .tls.clientCertPath | quote }}
+ {{- end }}
+ {{- if .tls.clientKeyPath }}
+ client_key_path = {{ .tls.clientKeyPath | quote }}
+ {{- end }}
+ {{- end }}
+ {{- if (.paths) }}
+ {{- if (gt (len .paths) 0) }}
- # default_step defines the step (in seconds) of a query_range request if one is
- # not provided by the client. This helps to correct improperly formed client requests.
- default_step = {{ .Values.config.defaultStep }}
+ [paths]
+ {{- range .paths }}
- # ignore_no_cache_header disables a client's ability to send a no-cache to refresh a cached query. Default is false
- #ignore_no_cache_header = false
+ {{ printf "[origins.%s.paths.%s]" $origin .name }}
+ {{- if .path }}
+ path = {{ .path | quote }}
+ {{- end }}
+ {{- if .methods }}
+ methods = [ '{{- join "', '" .methods }}' ]
+ {{- end }}
+ {{- if .matchType }}
+ match_type = {{ .matchType | quote }}
+ {{- end }}
+ {{- if .handler }}
+ handler = {{ .handler | quote }}
+ {{- end }}
+ {{- if .responseCode }}
+ response_code = {{ .responseCode }}
+ {{- end }}
+ {{- if .responseBody }}
+ response_body = {{ .responseBody | quote }}
+ {{- end }}
+ {{- if .noMetrics }}
+ no_metrics = {{ .noMetrics }}
+ {{- end }}
+ {{- if .collapsedForwarding }}
+ collapsed_forwarding = {{ .collapsedForwarding | quote }}
+ {{- end }}
+ {{- if .cacheKeyParams }}
+ cache_key_params = [ '{{- join "', '" .cacheKeyParams }}' ]
+ {{- end }}
+ {{- if .cacheKeyFormFields }}
+ cache_key_form_fields = [ '{{- join "', '" .cacheKeyFormFields }}' ]
+ {{- end }}
+ {{- if .cacheKeyHeaders }}
+ cache_key_headers = [ '{{- join "', '" .cacheKeyHeaders }}' ]
+ {{- end }}
+ {{- if .responseHeaders }}
- # max_value_age_secs defines the maximum age of specific datapoints in seconds. Default is 86400 (24 hours)
- max_value_age_secs = {{ .Values.config.maxValueAgeSecs }}
+ {{ printf "[origins.%s.paths.%s.response_headers]" $origin .name }}
+ {{- range $key, $val := .responseHeaders }}
+ {{ printf "'%s' = '%s'" $key $val }}
+ {{- end }}
+ {{- end }}
+ {{- if .requestHeaders }}
- # fast_forward_disable, when set to true, will turn off the 'fast forward' feature for any requests proxied to this origin
- fast_forward_disable = {{ .Values.config.fastForwardDisable }}
+ {{ printf "[origins.%s.paths.%s.request_headers]" $origin .name }}
+ {{- range $key, $val := .requestHeaders }}
+ {{ printf "'%s' = '%s'" $key $val }}
+ {{- end }}
+ {{- end }}
+ {{- if .requestParams }}
- # For multi-origin support, origins are named, and the name is the second word of the configuration section name.
- # In this example, an origin is named "foo". Clients can indicate this origin in their path (http://trickster.example.com:9090/foo/query_range?.....)
- # there are other ways for clients to indicate which origin to use in a multi-origin setup. See the documentation for more information
- #[origins.foo]
- #origin_url = 'http://prometheus-foo:9090'
- #api_path = '/api/v1'
- #default_step = 300
- #ignore_no_cache_header = false
- #max_value_age_secs = 86400
+ {{ printf "[origins.%s.paths.%s.request_params]" $origin .name }}
+ {{- range $key, $val := .requestParams }}
+ {{ printf "'%s' = '%s'" $key $val }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if (.Values.negativeCaches) }}
+ {{- if gt (len .Values.negativeCaches) 0 }}
- # Configuration Options for Metrics Instrumentation
- [metrics]
+ [negative_caches]
+ {{- range .Values.negativeCaches }}
+
+ {{ printf "[negative_caches.%s]" .name }}
+ {{- range $key, $val := . }}
+ {{- if not (eq $key "name") }}
+ {{ printf "%s = %s" $key $val }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.service.metricsPort }}
- # listen_port defines the port that Trickster's metrics server listens on at /metrics
- listen_port = {{ .Values.service.metricsPort }}
+ [metrics]
+ listen_port = {{ .Values.service.metricsPort }}
+ {{- if .Values.metrics }}
+ {{- if .Values.metrics.listenAddress }}
+ listen_address = {{ .Values.metrics.listenAddress }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.logging }}
- # Configuration Options for Logging Instrumentation
[logging]
-
- # log_level defines the verbosity of the logger. Possible values are 'debug', 'info', 'warn', 'error'
- # default is info
- log_level = {{ .Values.config.logLevel | quote }}
-
- # log_file defines the file location to store logs. These will be auto-rolled and maintained for you.
- # not specifying a log_file (this is the default behavior) will print logs to STDOUT
- #log_file = '/some/path/to/trickster.log'
+ {{- if .Values.logging.logLevel }}
+ log_level = {{ .Values.logging.logLevel | quote }}
+ {{- end }}
+ {{- if .Values.logging.logFile }}
+ log_file = {{ .Values.logging.logFile | quote }}
+ {{- end }}
+ {{- end }}
diff --git a/deploy/helm/trickster/templates/deployment.yaml b/deploy/helm/trickster/templates/deployment.yaml
index 297257c72..604a873cc 100644
--- a/deploy/helm/trickster/templates/deployment.yaml
+++ b/deploy/helm/trickster/templates/deployment.yaml
@@ -25,7 +25,10 @@ spec:
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
labels:
- {{- include "trickster.labels" . | nindent 8 }}
+ app: {{ template "trickster.name" . }}
+ release: {{ .Release.Name }}
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
spec:
{{- if .Values.affinity }}
affinity:
@@ -70,11 +73,11 @@ spec:
protocol: TCP
livenessProbe:
httpGet:
- path: /ping
+ path: /trickster/ping
port: http
readinessProbe:
httpGet:
- path: /health
+ path: /trickster/ping
port: http
resources:
{{ toYaml .Values.resources | indent 12 }}
diff --git a/deploy/helm/trickster/templates/service.yaml b/deploy/helm/trickster/templates/service.yaml
index c0c256bc4..ef4708819 100644
--- a/deploy/helm/trickster/templates/service.yaml
+++ b/deploy/helm/trickster/templates/service.yaml
@@ -3,8 +3,11 @@ kind: Service
metadata:
{{- if .Values.service.annotations }}
annotations:
-{{ toYaml .Values.service.annotations | indent 4 }}
-{{- end }}
+ prometheus.io/scrape: {{ .Values.prometheusScrape | quote }}
+ prometheus.io/port: {{ .Values.service.metricsPort | quote }}
+ prometheus.io/path: /metrics
+ {{- end }}
+ name: {{ template "trickster.fullname" . }}
labels:
{{- include "trickster.labels" . | nindent 4 }}
{{- if .Values.service.labels }}
diff --git a/deploy/helm/trickster/values.yaml b/deploy/helm/trickster/values.yaml
index ee338b309..60eb3d3f4 100644
--- a/deploy/helm/trickster/values.yaml
+++ b/deploy/helm/trickster/values.yaml
@@ -1,55 +1,387 @@
# Default values for trickster.
-## trickster container name
-##
-name: trickster
+# See our documentation at https://github.com/Comcast/trickster/docs/
+
+# Put ints in quotes to ensure they aren't converted to scientific notations.
+# See https://github.com/kubernetes/helm/issues/1707s
+
+# frontend:
+
+# # listenAddress defines the ip on which Trickster's Front-end HTTP Proxy server listens.
+# # empty by default, listening on all interfaces
+# listenAddress: ""
+
+# # tlsListenAddress defines the ip on which Trickster's Front-end TLS Proxy server listens.
+# # empty by default, listening on all interfaces
+# tlsListenAddress: ""
+
+# # tlsListenPort defines the port on which Trickster's Front-end TLS Proxy server listens.
+# # The default is 0, which means TLS is not used, even if certificates are configured below.
+# tlsListenPort: ""
+
+# # connectionsLimit defines the maximum number of concurrent connections
+# # Trickster's Proxy server may handle at any time. 0 (default) means unlimited.
+# connectionsLimit: "0"
+
+
+# Configuration options for mapping Origin(s)
+origins:
+ # example origin named default. default is always created with these settings unless a different origin is defined here.
+ - name: default
+
+ # originType identifies the origin type. Valid options are 'prometheus', 'influxdb', 'irondb', 'reverseproxycache' (or just 'rpc')
+ # origin_type is a required configuration value
+ originType: prometheus
+
+ # originURL provides the base upstream URL for all proxied requests to this origin
+ # it can be as simple as http://example.com or as complex as https://example.com:8443/path/prefix
+ # origin_url is a required configuration value
+ originURL: http://prometheus:9090
+
+# # isDefault describes whether this origin is the default origin considered when routing http requests
+# # it is false, by default; but if you only have a single origin configured, isDefault will be true unless explicitly set to false
+# isDefault: true
+
+# # cacheName identifies the name of the cache (configured above) to use with this origin. default is 'default'
+# cacheName: default
+
+# # cacheKeyPrefix defines the prefix this origin appends to cache keys. When using a shared cache like Redis, this can
+# # help partition multiple trickster instances that may have the same same hostname or ip address (the default prefix)
+# # cacheKeyPrefix: 'example'
+
+# # negativeCacheName identifies the name of the negative cache (configured above) to be used with this origin. default is 'default'
+# negativeCacheName: 'default'
+
+# # dearticulateUpstreamRanges, when true, instructs Trickster to make multiple parallel requests to the origin for each
+# # range needed to fulfill the client request, rather than making a multipart range request. default is false
+# # dearticulateUpstreamRanges: false
+
+# # multipartRangesDisabled, when true, instructs Trickster to return the full object when the client provides
+# # a multipart range request. The default is false.
+# # multipartRangesDisabled = false
+
+# # compressableTypes defines the Content Types that will be compressed when stored in the Trickster cache
+# # reasonable defaults are set, so use this with care. To disable compression, set compressableTypes = []
+# # Default list is provided here:
+# # compressableTypes = [ 'text/javascript', 'text/css', 'text/plain', 'text/xml', 'text/json', 'application/json', 'application/javascript', 'application/xml' ]
+
+# # timeoutSecs defines how many seconds Trickster will wait before aborting and upstream http request.
+# timeoutSecs: "180"
+
+# # keepAliveTimeoutSecs defines how long Trickster will wait before closing a keep-alive connection due to inactivity
+# # if the origin's keep-alive timeout is shorter than Trickster's, the connect will be closed sooner.
+# keepAliveTimeoutSecs: "300"
+
+# # maxIdleConns set the maximum concurrent keep-alive connections Trickster may have opened to this origin
+# # additional requests will be queued.
+# maxIdleConns: "20"
+
+# # backfillToleranceSecs prevents new datapoints that fall within the tolerance window (relative to time.Now) from being cached
+# # Think of it as "never cache the newest N seconds of real-time data, because it may be preliminary and subject to updates"
+# backfillToleranceSecs: "0"
+
+# # timeseriesRetentionFactor defines the maximum number of recent timestamps to cache for a given query.
+# timeseriesRetentionFactor: "1024"
+
+# # timeseriesTTLSecs defines the relative expiration of cached timeseries. default is 6 hours (21600 seconds)
+# timeseriesTTLSecs: "21600"
+
+# # timeseriesEvictionMethod selects the metholodogy used to determine which timestamps are removed once
+# # the timeseriesEvictionMethod limit is reached. options are 'oldest' and 'lru'.
+# timeseriesEvictionMethod: "oldest"
+
+# # fastForwardDisable, when set to true, will turn off the 'fast forward' feature for any requests proxied to this origin
+# fastForwardDisable: true
+
+# # fastforwardTTLSecs defines the relative expiration of cached fast forward data. default is 15s
+# fastforwardTTLSecs: "15"
+
+# # maxTTLSecs defines the maximum allowed TTL for any object cached for this origin. default is 86400
+# maxTTLSecs: "86400"
+
+# # revalidationFactor is the multiplier for object lifetime expiration to determine cache object TTL; default is 2
+# # for example, if a revalidatable object has Cache-Control: max-age=300, we will cache for 10 minutes (300s * 2)
+# # so there is an opportunity to revalidate
+# revalidationFactor: "2"
+
+# # maxObjectByteSize defines the largest byte size an object may be before it is uncacheable due to size. default is 524288 (512k)
+# maxObjectByteSize: "524288"
+
+# # Each origin type implements their own defaults for hcUpstreamURL, hcVerb and hcQuery,
+# # which can be overridden per origin with the following configs. See https://github.com/Comcast/trickster/docs/health.md
+
+# # hcUpstreamURL is the URL Trickster will request against this origin to
+# # when a health check request is received by Trickster via http:///trickster//health
+# # this is the default value for prometheus:
+# hcUpstreamURL: /api/v1/query
+
+# # hcVerb is the HTTP Method Trickster will when peforming an upstream health check for this origin
+# # default is 'GET' for all origin types unless overridden per-origin here.
+# hcVerb: GET
+
+# # hcQuery is the query string Trickster will append to the URL the when peforming an upstream health check for this origin
+# # This value is the default for prometheus (again, see /docs/health.md)
+# hcQuery: query=up
+
+# # hcHeaders provides a list of HTTP Headers to add to Health Check HTTP Requests to this origin
+# hcHeaders:
+# Authorization: "Basic SomeHash"
+
+# # this section configures the frontend and backend TLS operation for the origin
+# tls:
+
+# # TLS Frontend Configs
+# # You can configure which certificate and key to use when this endpoint serves downstream clients over TLS
+# # Trickster will fail out at startup if the provided files do not exist, are unreadable, or in an invalid format
+# # These settings by default are '' (empty string), which disables this origin from being routed over the TLS port
+# fullChainCertPath: /path/to/your/cert.pem
+# privateKeyPath: /path/to/your/key.pem
+
+# # TLS Backend Configs
+# # These settings configure how Trickster will behave as a client when communicating with
+# # this origin over TLS
+
+# # if insecureSkipVerify is true, Trickster will trust the origins certificate without any verification
+# insecureSkipVerify: true
+
+# # certificateAuthorityPaths provides a list of additional certificate authorities to be used to trust an upstream origin
+# # in addition to Operating System CA's. default is an empty list, which insructs the Trickster to use only the OS List
+# certificateAuthorityPaths: [ '../../testdata/test.rootca.pem' ]
+
+# # clientCertPath provides the path to a client certificate for Trickster to use when authenticating with an upstream server
+# clientCertPath: /path/to/my/client/cert.pem
+
+# # clientKeyPath provides the path to a client key for Trickster to use when authenticating with an upstream server
+# clientKeyPath: /path/to/my/client/key.pem
+
+# # This section customizes the behavior of Trickster for specific paths. See https://github.com/Comcast/trickster/docs/paths.md
+# paths:
+# - name: example1
+# path: /api/v1/admin/
+# methods: [ '*' ] # HTTP methods to be routed with this path config. '*' for all methods.
+# matchType: prefix # match $path* (using 'exact' will match just $path)
+# handler: localresponse # don't actually proxy this request, respond immediately
+# responseCode: "401"
+# responseBody: "No soup for you!"
+# noMetrics: true # do not record metrics for requests to this path
+# responseHeaders:
+# Cache-Control: no-cache # attach these headers to the response down to the client
+# Content-Type: text/plain # + and - prefixes work as described below
+
+# - name: example2
+# path: /example/
+# methods: [ 'GET', 'POST' ]
+# collapsedForwarding: progressive # see /docs/collapsed_forwarding.md
+# matchType: prefix # this path is routed using prefix matching
+# handler: proxycache # this path is routed through the cache
+# cacheKeyParams: [ 'ex_param1', 'ex_param2' ] # the cache key will be hashed with these query parameters (GET)
+# cacheKeyFormFields: [ 'ex_param1', 'ex_param2' ] # or these form fields (POST)
+# cacheKeyHeaders: [ 'X-Example-Header' ] # and these request headers, when present in the incoming request
+# requestHeaders:
+# Authorization: 'custom auth header' # attach these request headers when proxying. the '+' in the header name
+# +Accept-Encoding: gzip # means append the value if the header exists, rather than replace
+# -Cookie: '' # while the '-' will remove the header
+# requestParams:
+# +authToken: 'SomeTokenHere' # manipulate request query parameters in the same way
+
+# # For multi-origin support, provide a unique name.
+# # Clients can indicate the desired origin in their path (http://trickster.example.com:9090/foo/api/v1/query_range?.....)
+# # You can also use host-header based routing. See the documentation at https://github.com/Comcast/trickster/docs/multi-origin.md) for more information
+# - name: foo
+# isDefault: false
+# originType: influxdb
+# originURL: http://influx-origin:8086
+# cacheName: bbolt_example
+# negativeCacheName: general
+# timeseriesRetentionFactor: "1024"
+# timeseriesEvictionMethod: oldest
+# timeoutSecs: "180"
+# backfillToleranceSecs: "180"
+
+# caches:
+# - name: default
+# # cacheType defines what kind of cache Trickster uses
+# # options are 'bbolt', 'badger', 'filesystem', 'memory', and 'redis'
+# cacheType: memory
+
+# ## Configuration options for the Cache Index
+# # The Cache Index handles key management and retention for bbolt, filesystem and memory
+# # Redis and BadgerDB handle those functions natively and does not use the Trickster's Cache Index
+# index:
+
+# # reapIntervalSecs defines how long the Cache Index reaper sleeps between reap cycles. Default is 3 (3s)
+# #reapIntervalSecs: "3"
+
+# # flushIntervalSecs sets how often the Cache Index saves its metadata to the cache from application memory. Default is 5 (5s)
+# flushIntervalSecs: "5"
+
+# # maxSizeBytes indicates how large the cache can grow in bytes before the Index evicts least-recently-accessed items. default is 512MB
+# maxSizeBytes: "536870912"
+
+# # maxSizeBackoffBytes indicates how far below max_size_bytes the cache size must be to complete a byte-size-based eviction exercise. default is 16MB
+# maxSizeBackoffBytes: "16777216"
+
+# # maxSizeObjects indicates how large the cache can grow in objects before the Index evicts least-recently-accessed items. default is 0 (infinite)
+# maxSizeObjects: "0"
+
+# # maxSizeBackoffObjects indicates how far under maxSizeObjects the cache size must be to complete object-size-based eviction exercise. default is 100
+# maxSizeBackoffObjects: "100"
+
+# ## Configuration options when using a Redis Cache
+# redis:
+# # clientType indicates which kind of Redis client to use. Options are: 'standard', 'cluster' and 'sentinel'
+# clientType: standard
+
+# ## Supported by Redis (standard)
+# ## These configurations are ignored by Redis Sentinel and Redis Cluster
+# ###
+
+# # endpoint defines the fqdn+port or path to a unix socket file for connecting to redis
+# endpoint: redis:6379
+
+# ## Supported by Redis Cluster and Redis Sentinel
+# ## These conigurations are ignored by Redis (standard)
+# ###
+
+# # endpoints is used for Redis Cluster and Redis Sentinel to define a list of endpoints
+# endpoints: [ "redis:6379" ]
+
+# ## Supported by Redis Sentinel
+# ## These conigurations are ignored by Redis (standard) and Redis Cluster
+# ###
+
+# ## sentinelMaster should be set when using Redis Sentinel to indicate the Master Node
+# sentinelMaster: ""
+
+# ## Supported by all Redis Client Types
+# ## See the go-redis documentation at https://github.com/go-redis/redis/blob/master/options.go
+# ## for more information on tuning these settings
+
+# # protocol defines the protocol for connecting to redis ('unix' or 'tcp') 'tcp' is default
+# protocol: tcp
+
+# # password provides the redis password
+# password: ""
+
+# # db is the Database to be selected after connecting to the server.
+# db: "0"
+
+# #maxRetries is the maximum number of retries before giving up on the command
+# maxRetries: "0"
+
+# #minRetryBackoffMs is the minimum backoff time between each retry
+# minRetryBackoffMs: "8"
+
+# #maxRetyBackoffMs is the maximum backoff time between each retry
+# maxRetyBackoffMs: "512"
+
+# #dialTimeoutMs is the timeout for establishing new connections
+# dialTimeoutMs: "5000"
+
+# #readTimeoutMs is the timeout for socket reads. If reached, commands will fail with a timeout instead of blocking.
+# readTimeoutMs: "3000"
+
+# #writeTimeoutMs is the timeout for socket writes. If reached, commands will fail with a timeout instead of blocking.
+# writeTimeoutMs: "3000"
+
+# #poolSize is the maximum number of socket connections.
+# poolSize: "20"
+
+# #minIdleConns is the minimum number of idle connections which is useful when establishing new connection is slow.
+# minIdleConns: "0"
+
+# #maxConnAgeMs is the connection age at which client retires (closes) the connection.
+# maxConnAgeMs: "0"
+
+# #poolTimeoutMs is the amount of time client waits for connection if all connections are busy before returning an error.
+# poolTimeoutMs: "4000"
+
+# #idleTimeoutMs is the amount of time after which client closes idle connections.
+# idleTimeoutMs: "300000"
+
+# #idleCheckFrequencyMs is the frequency of idle checks made by idle connections reaper.
+# idleCheckFrequencyMs: "60000"
+
+# ## Configuration options when using a Filesystem Cache
+# filesystem:
+# # path defines the directory location under which the Trickster cache will be maintained
+# path: /tmp/trickster
+
+# ## Configuration options when using a bbolt Cache
+# bbolt:
+# # file defines the file where the Trickster cache will be maintained
+# file: trickster.db
+
+# # bucket defines the name of the BotlDb bucket (similar to a namespace) under which our key value store lives
+# bucket: trickster
+
+# ## Configuration options when using a Badger cache
+# badger:
+# # directory defines the directory location under which the Badger data will be maintained
+# directory: /tmp/trickster
+
+# # valueDirectory defines the directory location under which the Badger value log will be maintained
+# valueDirectory: /tmp/trickster
+
+# negativeCaches:
+# - name: default
+# #The 'default' negative cache config, mapped by all origins by default, is empty unless you populate it.
+# #Update it by concommenting this section and adding entries here in the format of:
+# #code: "ttl_secs"
+
+# # Here's a pre-populated negative cache config ready to be uncommented and used in an origin config
+# # The 'general' negative cache config will cache common failure response codes for 3 seconds
+# - name: general
+# 400: "3"
+# 404: "3"
+# 500: "3"
+# 502: "3"
+
+# metrics:
+# # listenAddress defines the ip that Trickster's metrics server listens on at /metrics
+# # empty by default, listening on all interfaces
+# listenAddress: ""
+
+# # These are the default values for logging
+# logging:
+# logLevel: info
+# logFile: /some/path/to/trickster.log
+
+# # configHandlerPath provides the HTTP path to view a read-only printout of the running configuration
+# # which can be reached at http://your-trickster-endpoint:port/$config_handler_path
+# configHandlerPath: /trickster/config
+
+# # pingHandlerPath provides the HTTP path you will use to perform an uptime health check against Trickster
+# # which can be reached at http://your-trickster-endpoint:port/$ping_handler_path
+# pingHandlerPath: /trickster/ping
+
+profiler:
+ enabled: false
+ port: 6060
+
+prometheusScrape: false
+
+# Number of trickster replicas desired
+replicaCount: 1
## trickster container image
##
image:
repository: tricksterio/trickster
- tag: 0.1.7
+ tag: 1.0-beta
pullPolicy: IfNotPresent
-imagePullSecrets:
-# - name: "image-pull-secret"
-
-config:
- # Default trickster originURL, references a source Prometheus instance
- # Ref: https://github.com/Comcast/trickster/blob/master/docs/configuring.md
- originURL: http://prometheus:9090
- cache:
- type: memory
- redis:
- protocol: tcp
- endpoint: redis:6379
- filesystem:
- mountPath: /tmp/trickster
- boltdb:
- file: trickster.db
- bucket: trickster
- # Put ints in quotes to ensure they aren't converted to scientific notations.
- # See https://github.com/kubernetes/helm/issues/1707
- recordTTLSecs: "21600"
- defaultStep: "300"
- # 24h
- maxValueAgeSecs: "86400"
- fastForwardDisable: false
- logLevel: info
-
-## trickster priorityClassName
-##
-priorityClassName: ""
-
-## Additional trickster container arguments
-##
-extraArgs: {}
-
-## Additional trickster container environment variable
-## For instance to add a http_proxy
-##
-extraEnv: {}
+# Service resource for trickster deplyoment
+# Ref: https://kubernetes.io/docs/concepts/services-networking/service/
+service:
+ type: ClusterIP
+ port: 9090
+ # nodePort: 0
+ metricsPort: 8080
+ # metricsNodePort: 0
+# Ingress resource for trickster service
+# Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
## If true, trickster Ingress will be created
##
@@ -76,36 +408,9 @@ ingress:
## Secrets must be manually created in the namespace
##
tls: []
- # - secretName: trickster-tls
- # hosts:
- # - trickster.domain.com
-
-## trickster Deployment Strategy type
-# strategy:
-# type: Recreate
-
-## Node tolerations for trickster scheduling to nodes with taints
-## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-##
-tolerations: []
- # - key: "key"
- # operator: "Equal|Exists"
- # value: "value"
- # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
-## Node labels for trickster pod assignment
-## Ref: https://kubernetes.io/docs/user-guide/node-selection/
-##
-nodeSelector: {}
-
-## Pod affinity
-##
-affinity: {}
-
-## Use an alternate scheduler, e.g. "stork".
-## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-##
-# schedulerName:
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
persistentVolume:
## If true, trickster will create/use a Persistent Volume Claim
@@ -154,12 +459,12 @@ replicaCount: 1
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
- # limits:
- # cpu: 10m
- # memory: 32Mi
- # requests:
- # cpu: 10m
- # memory: 32Mi
+ #limits:
+ #cpu: 100m
+ #memory: 128Mi
+ #requests:
+ #cpu: 100m
+ #memory: 128Mi
## Security context to be added to trickster pods
##
diff --git a/deploy/kube/configmap.yaml b/deploy/kube/configmap.yaml
index 19cef9825..d6e1fc2e9 100644
--- a/deploy/kube/configmap.yaml
+++ b/deploy/kube/configmap.yaml
@@ -7,103 +7,437 @@ metadata:
data:
trickster-conf: |-
- [main]
+ #
+ # Trickster 1.0 Example Configuration File - Exhaustive
+ #
+ # This file contains descriptions and examples for all
+ # Trickster configuration options. More documentation is
+ # available at https://github.com/Comcast/trickster/docs/
+ #
+ # Optional configs are commented out, required configs are uncommented
+ # and set to common values that let you try it out with Prometheus
+ #
+ # Copyright 2018 Comcast Cable Communications Management, LLC
+ #
- # instance_id allows you to run multiple trickster processes on the same host and log to separate files
- # Useful for baremetal, not so much for elastic deployments, so only uncomment if you really need it
- #instance_id = 1
+ # [main]
- # Configuration options for the Proxy Server
- [proxy_server]
+ ## instance_id allows you to run multiple Trickster processes on the same host and log to separate files
+ ## Useful for baremetal, not so much for elastic deployments, so only uncomment if you really need it
+ ## default is 0, which means ignored
+ #instance_id = 0
- # listen_port defines the port on which Trickster's Proxy server listens.
- # since this is a proxy for Prometheus, we use 9090 by default, just like Prometheus does
- listen_port = 9090
+ ## config_handler_path provides the HTTP path to view a read-only printout of the running configuration
+ ## which can be reached at http://your-trickster-endpoint:port/$config_handler_path
+ ## default is '/trickster/config'
+ # config_handler_path = '/trickster/config'
- [cache]
- # cache_type defines what kind of cache Trickster uses
- # options are 'boltdb, 'filesystem', 'memory', and 'redis'. 'memory' is the default
- cache_type = 'memory'
+ ## ping_handler_path provides the HTTP path you will use to perform an uptime health check against Trickster
+ ## which can be reached at http://your-trickster-endpoint:port/$ping_handler_path
+ ## default is '/trickster/ping'
+ # ping_handler_path = '/trickster/ping'
- # record_ttl_secs defines the relative expiration of cached queries. default is 6 hours (21600 seconds)
- record_ttl_secs = 21600
- # Configuration options when using a Redis Cache
- #[cache.redis]
+ # Configuration options for the Trickster Frontend
+ [frontend]
- # protocol defines the protocol for connecting to redis ('unix' or 'tcp') 'tcp' is default
- #protocol = 'tcp'
+ # listen_port defines the port on which Trickster's Front-end HTTP Proxy server listens.
+ listen_port = 9090
- # endpoint defines the fqdn+port or path to a unix socket file for connecting to redis
- # default is 'redis:6379'
- #endpoint = 'redis:6379'
+ ## listen_address defines the ip on which Trickster's Front-end HTTP Proxy server listens.
+ ## empty by default, listening on all interfaces
+ # listen_address = ''
- # Configuration options when using a Filesystem Cache
- #[cache.filesystem]
+ ## tls_listen_address defines the ip on which Trickster's Front-end TLS Proxy server listens.
+ ## empty by default, listening on all interfaces
+ # tls_listen_address = ''
- # cache_path defines the directory location under which the Trickster cache will be maintained
- # default is '/tmp/trickster'
- #cache_path = '/tmp/trickster'
- # Configuration options when using a BoltDb Cache
- #[cache.boltdb]
+ ## tls_listen_port defines the port on which Trickster's Front-end TLS Proxy server listens.
+ ## The default is 0, which means TLS is not used, even if certificates are configured below.
+ # tls_listen_port = 0
- # filename defines the file where the Trickster cache will be maintained
- # default is 'trickster.db'
+ ## connections_limit defines the maximum number of concurrent connections
+ ## Trickster's Proxy server may handle at any time.
+ ## 0 by default, unlimited.
+ # connections_limit = 0
+
+ # [caches]
+
+ # [caches.default]
+ ## cache_type defines what kind of cache Trickster uses
+ ## options are 'bbolt', 'badger', 'filesystem', 'memory', and 'redis'
+ ## The default is 'memory'.
+ # cache_type = 'memory'
+
+ ### Configuration options for the Cache Index
+ ## The Cache Index handles key management and retention for bbolt, filesystem and memory
+ ## Redis and BadgerDB handle those functions natively and does not use the Trickster's Cache Index
+ # [caches.default.index]
+
+ ## reap_interval_secs defines how long the Cache Index reaper sleeps between reap cycles. Default is 3 (3s)
+ # reap_interval_secs = 3
+
+ ## flush_interval_secs sets how often the Cache Index saves its metadata to the cache from application memory. Default is 5 (5s)
+ # flush_interval_secs = 5
+
+ ## max_size_bytes indicates how large the cache can grow in bytes before the Index evicts least-recently-accessed items. default is 512MB
+ # max_size_bytes = 536870912
+
+ ## max_size_backoff_bytes indicates how far below max_size_bytes the cache size must be to complete a byte-size-based eviction exercise. default is 16MB
+ # max_size_backoff_bytes = 16777216
+
+ ## max_size_objects indicates how large the cache can grow in objects before the Index evicts least-recently-accessed items. default is 0 (infinite)
+ # max_size_objects = 0
+
+ ## max_size_backoff_objects indicates how far under max_size_objects the cache size must be to complete object-size-based eviction exercise. default is 100
+ # max_size_backoff_objects = 100
+
+ ### Configuration options when using a Redis Cache
+ # [caches.default.redis]
+
+ ## client_type indicates which kind of Redis client to use. Options are: 'standard', 'cluster' and 'sentinel'
+ ## default is 'standard'
+ # client_type = 'standard'
+
+ ### Supported by Redis (standard) #####################################
+ ### These configurations are ignored by Redis Sentinel and Redis Cluster
+ ###
+
+ ## endpoint defines the fqdn+port or path to a unix socket file for connecting to redis
+ ## default is 'redis:6379'
+ # endpoint = 'redis:6379'
+ #
+
+ ### Supported by Redis Cluster and Redis Sentinel #####################
+ ### These conigurations are ignored by Redis (standard)
+ ###
+
+ ## endpoints is used for Redis Cluster and Redis Sentinel to define a list of endpoints
+ ## default is ['redis:6379']
+ # endpoints = ['redis:6379']
+ #
+
+ ### Supported by Redis Sentinel #######################################
+ ### These conigurations are ignored by Redis (standard) and Redis Cluster
+ ###
+ ## sentinel_master should be set when using Redis Sentinel to indicate the Master Node
+ ## sentinel_master = ''
+ #
+
+ ### Supported by all Redis Client Types ###############################
+ ### See the go-redis documentation at https://github.com/go-redis/redis/blob/master/options.go
+ ### for more information on tuning these settings
+
+ ## protocol defines the protocol for connecting to redis ('unix' or 'tcp'). 'tcp' is default
+ # protocol = 'tcp'
+
+ ## password provides the redis password. default is empty string ''
+ # password = ''
+
+ ## db is the Database to be selected after connecting to the server. default is 0
+ # db = 0
+
+ ## max_retries is the maximum number of retries before giving up on the command
+ # max_retries = 0
+
+ ## min_retry_backoff_ms is the minimum backoff time between each retry
+ # min_retry_backoff_ms = 8
+
+ ## max_retry_backoff_ms is the maximum backoff time between each retry
+ # max_retry_backoff_ms = 512
+
+ ## dial_timeout_ms is the timeout for establishing new connections
+ # dial_timeout_ms = 5000
+
+ ## read_timeout_ms is the timeout for socket reads. If reached, commands will fail with a timeout instead of blocking.
+ # read_timeout_ms = 3000
+
+ ## write_timeout_ms is the timeout for socket writes. If reached, commands will fail with a timeout instead of blocking.
+ # write_timeout_ms = 3000
+
+ ## pool_size is the maximum number of socket connections.
+ # pool_size = 20
+
+ ## min_idle_conns is the minimum number of idle connections which is useful when establishing new connection is slow.
+ # min_idle_conns = 0
+
+ ## max_conn_age_ms is the connection age at which client retires (closes) the connection.
+ # max_conn_age_ms = 0
+
+ ## pool_timeout_ms is the amount of time client waits for connection if all connections are busy before returning an error.
+ # pool_timeout_ms = 4000
+
+ ## idle_timeout_ms is the amount of time after which client closes idle connections.
+ # idle_timeout_ms = 300000
+
+ ## idle_check_frequency_ms is the frequency of idle checks made by idle connections reaper.
+ # idle_check_frequency_ms = 60000
+
+
+ ### Configuration options when using a Filesystem Cache ###############
+ # [caches.default.filesystem]
+ ## cache_path defines the directory location under which the Trickster cache will be maintained
+ ## default is '/tmp/trickster'
+ # cache_path = '/tmp/trickster'
+
+ ### Configuration options when using a bbolt Cache ####################
+ # [caches.default.bbolt]
+
+ ## filename defines the file where the Trickster cache will be maintained
+ ## default is 'trickster.db'
# filename = 'trickster.db'
- # bucket defines the name of the BotlDb bucket (similar to a namespace) under which our key value store lives
- # default is 'trickster'
+ ## bucket defines the name of the bbolt bucket (similar to a namespace) under which our key value store lives
+ ## default is 'trickster'
# bucket = 'trickster'
+ ### Configuration options when using a Badger cache ###################
+ # [caches.default.badger]
+ ## directory defines the directory location under which the Badger data will be maintained
+ ## default is '/tmp/trickster'
+ # directory = '/tmp/trickster'
+ ## value_directory defines the directory location under which the Badger value log will be maintained
+ ## default is '/tmp/trickster'
+ # value_directory = '/tmp/trickster'
+
+ ## Example of a second cache, sans comments, that origin configs below could use with: cache_name = 'bbolt_example'
+ #
+ # [caches.bbolt_example]
+ # cache_type = 'bbolt'
+ # compression = true
+
+ # [caches.bbolt_example.bbolt]
+ # filename = 'trickster.db'
+ # bucket = 'trickster'
+
+ # [caches.bbolt_example.index]
+ # reap_interval_secs = 3
+ # flush_interval_secs = 5
+ # max_size_bytes = 536870912
+ # size_backoff_bytes = 16777216
+
+ ## Negative Caching Configurations
+ ## A Negative Cache is a map of HTTP Status Codes that are cached for the specified duration,
+ ## used for temporarily caching failures (e.g., 404's for 10 seconds)
+ ##
+ ## By default, each Origin Configuration maps to the 'default' negative cache which you can
+ ## configure below, or can define your own negative caches, and specify them in your origin configs.
+ ## See /docs/negative-caching.md for more info.
+ ##
+
+ # [negative_caches]
+ # [negative_caches.default]
+ # # The 'default' negative cache config, mapped by all origins by default,
+ # # is empty unless you populate it. Update it by adding entries here in the format of:
+ # # code = ttl_secs
+
+ ## Here's a pre-populated negative cache config ready to be uncommented and used in an origin config
+ ## The 'general' negative cache config will cache common failure response codes for 3 seconds
+ # [negative_caches.general]
+ # 400 = 3
+ # 404 = 3
+ # 500 = 3
+ # 502 = 3
# Configuration options for mapping Origin(s)
[origins]
- # The default origin
+ # example origin named default. default is always created with these settings unless a different origin is defined here.
[origins.default]
- # origin_url defines the URL of the origin. Default is http://prometheus:9090
- origin_url = 'http://prometheus:9090'
+ # is_default describes whether this origin is the default origin considered when routing http requests
+ # it is false, by default; but if you only have a single origin configured, is_default will be true unless explicitly set to false
+ # is_default = true
+
+ # origin_type identifies the origin type.
+ # Valid options are: 'prometheus', 'influxdb', 'clickhouse', 'irondb', 'reverseproxycache' (or just 'rpc')
+ # origin_type is a required configuration value
+ origin_type = 'prometheus'
+
+ # cache_name identifies the name of the cache (configured above) that you want to use with this origin proxy. default is 'default'
+ # cache_name = 'default'
+
+ ## cache_key_prefix defines the prefix this origin appends to cache keys. When using a shared cache like Redis,
+ ## this can help partition multiple trickster instances that may have the same same hostname or ip address (the default prefix)
+ # cache_key_prefix = 'example'
+
+ # negative_cache_name identifies the name of the negative cache (configured above) to be used with this origin. default is 'default'
+ # negative_cache_name = 'default'
+
+ # origin_url provides the base upstream URL for all proxied requests to this origin.
+ # it can be as simple as http://example.com or as complex as https://example.com:8443/path/prefix
+ # origin_url is a required configuration value
+ origin_url = 'http://prometheus:9090'
+
+ ## dearticulate_upstream_ranges, when true, instructs Trickster to make multiple parallel requests to the origin for each
+ ## range needed to fulfill the client request, rather than making a multipart range request. default is false
+ # dearticulate_upstream_ranges = false
+
+ ## multipart_ranges_disabled, when true, instructs Trickster to return the full object when the client provides
+ ## a multipart range request. The default is false.
+ # multipart_ranges_disabled = false
+
+ ## compressable_types defines the Content Types that will be compressed when stored in the Trickster cache
+ ## reasonable defaults are set, so use this with care. To disable compression, set compressable_types = []
+ ## Default list is provided here:
+ # compressable_types = [ 'text/javascript', 'text/css', 'text/plain', 'text/xml', 'text/json', 'application/json', 'application/javascript', 'application/xml' ]
+
+ ## timeout_secs defines how many seconds Trickster will wait before aborting and upstream http request. Default: 180s
+ # timeout_secs = 180
+
+ ## keep_alive_timeout_secs defines how long Trickster will wait before closing a keep-alive connection due to inactivity
+ ## if the origin's keep-alive timeout is shorter than Trickster's, the connect will be closed sooner. Default: 300
+ # keep_alive_timeout_secs = 300
+
+ ## max_idle_conns set the maximum concurrent keep-alive connections Trickster may have opened to this origin
+ ## additional requests will be queued. Default: 20
+ # max_idle_conns = 20
+
+ ## backfill_tolerance_secs prevents new datapoints that fall within the tolerance window (relative to time.Now) from being cached
+ ## Think of it as "never cache the newest N seconds of real-time data, because it may be preliminary and subject to updates"
+ ## default is 0
+ # backfill_tolerance_secs = 0
+
+ ## timeseries_retention_factor defines the maximum number of recent timestamps to cache for a given query. Default is 1024
+ # timeseries_retention_factor = 1024
+
+ ## timeseries_ttl_secs defines the relative expiration of cached timeseries. default is 6 hours (21600 seconds)
+ # timeseries_ttl_secs = 21600
+
+ ## timeseries_eviction_method selects the metholodogy used to determine which timestamps are removed once
+ ## the timeseries_retention_factor limit is reached. options are 'oldest' and 'lru'. Default is 'oldest'
+ # timeseries_eviction_method = 'oldest'
+
+ ## fast_forward_disable, when set to true, will turn off the 'fast forward' feature for any requests proxied to this origin
+ # fast_forward_disable = false
+
+ ## fastforward_ttl_secs defines the relative expiration of cached fast forward data. default is 15s
+ # fastforward_ttl_secs = 15
+
+ ## max_ttl_secs defines the maximum allowed TTL for any object cached for this origin. default is 86400
+ # max_ttl_secs = 86400
+
+ ## revalidation_factor is the multiplier for object lifetime expiration to determine cache object TTL; default is 2
+ ## for example, if a revalidatable object has Cache-Control: max-age=300, we will cache for 10 minutes (300s * 2)
+ ## so there is an opportunity to revalidate
+ # revalidation_factor = 2
+
+ ## max_object_size_bytes defines the largest byte size an object may be before it is uncacheable due to size. default is 524288 (512k)
+ # max_object_size_bytes = 524288
+
+ ##
+ ## Each origin type implements their own defaults for health_check_upstream_url, health_check_verb and health_check_query,
+ ## which can be overridden per origin. See /docs/health.md for more information
+
+ ## health_check_upstream_url is the URL Trickster will request against this origin to
+ ## when a health check request is received by Trickster via http:///trickster//health
+ ## this is the default value for prometheus:
+ # health_check_upstream_url = '/api/v1/query'
+
+ ## health_check_verb is the HTTP Method Trickster will when peforming an upstream health check for this origin
+ ## default is 'GET' for all origin types unless overridden per-origin here.
+ # health_check_verb = 'GET'
- # api path defines the path of the Prometheus API (usually '/api/v1')
- api_path = '/api/v1'
+ ## health_check_query is the query string Trickster will append the when peforming an upstream health check for this origin
+ ## This value is the default for prometheus (again, see /docs/health.md)
+ # health_check_query = 'query=up'
- # default_step defines the step (in seconds) of a query_range request if one is
- # not provided by the client. This helps to correct improperly formed client requests.
- default_step = 300
+ ## health_check_headers provides a list of HTTP Headers to add to Health Check HTTP Requests to this origin
+ # [origins.default.health_check_headers]
+ # Authorization = 'Basic SomeHash'
- # ignore_no_cache_header disables a client's ability to send a no-cache to refresh a cached query. Default is false
- #ignore_no_cache_header = false
+ ## [origins.ORIGIN_NAME.paths] section customizes the behavior of Trickster for specific paths. See /docs/paths.md for more info.
+ # [origins.default.paths]
+ # [origins.default.paths.example1]
+ # path = '/api/v1/admin/'
+ # methods = [ '*' ] # HTTP methods to be routed with this path config. '*' for all methods.
+ # match_type = 'prefix' # match $path* (using 'exact' will match just $path)
+ # handler = 'localresponse' # don't actually proxy this request, respond immediately
+ # response_code = 401
+ # response_body = 'No soup for you!'
+ # no_metrics = true # do not record metrics for requests to this path
+ # [origins.default.paths.example1.response_headers]
+ # 'Cache-Control' = 'no-cache' # attach these headers to the response down to the client
+ # 'Content-Type' = 'text/plain'
+
+ # [origins.default.paths.example2]
+ # path = '/example/'
+ # methods = [ 'GET', 'POST' ]
+ # collapsed_forwarding = 'progressive' # see /docs/collapsed_forwarding.md
+ # match_type = 'prefix' # this path is routed using prefix matching
+ # handler = 'proxycache' # this path is routed through the cache
+ # cache_key_params = [ 'ex_param1', 'ex_param2' ] # the cache key will be hashed with these query parameters (GET)
+ # cache_key_form_fields = [ 'ex_param1', 'ex_param2' ] # or these form fields (POST)
+ # cache_key_headers = [ 'X-Example-Header' ] # and these request headers, when present in the incoming request
+ # [origins.default.paths.example1.request_headers]
+ # 'Authorization' = 'custom proxy client auth header'
+ # '-Cookie' = '' # attach these request headers when proxying. the '+' in the header name
+ # '+Accept-Encoding' = 'gzip' # means append the value if the header exists, rather than replace
+ ## while the '-' will remove the header
+ # [origins.default.paths.example1.request_params]
+ # '+authToken' = 'SomeTokenHere' # manipulate request query parameters in the same way
- # max_value_age_secs defines the maximum age of specific datapoints in seconds. Default is 86400 (24 hours)
- max_value_age_secs = 86400
+ ## the [origins.ORIGIN_NAME.tls] section configures the frontend and backend TLS operation for the origin
+ # [origins.default.tls]
- # fast_forward_disable, when set to true, will turn off the 'fast forward' feature for any requests proxied to this origin
- #fast_forward_disable = false
+ ## TLS Frontend Configs
+ ## You can configure which certificate and key to use when this endpoint serves downstream clients over TLS
+ ## Trickster will fail out at startup if the provided files do not exist, are unreadable, or in an invalid format
+ ## These settings by default are '' (empty string), which disables this origin from being routed over the TLS port
+ # full_chain_cert_path = '/path/to/your/cert.pem'
+ # private_key_path = '/path/to/your/key.pem'
- # For multi-origin support, origins are named, and the name is the second word of the configuration section name.
- # In this example, an origin is named "foo". Clients can indicate this origin in their path (http://trickster.example.com:9090/foo/query_range?.....)
- # there are other ways for clients to indicate which origin to use in a multi-origin setup. See the documentation for more information
- #[origins.foo]
- #origin_url = 'http://prometheus-foo:9090'
- #api_path = '/api/v1'
- #default_step = 300
- #ignore_no_cache_header = false
- #max_value_age_secs = 86400
+ ## TLS Backend Configs
+ ## These settings configure how Trickster will behave as a client when communicating with
+ ## this origin over TLS
+
+ ## if insecure_skip_verify is true, Trickster will trust the origins certificate without any verification
+ ## default is false
+ # insecure_skip_verify = false
+
+ ## certificate_authority_paths provides a list of additional certificate authorities to be used to trust an upstream origin
+ ## in addition to Operating System CA's. default is an empty list, which insructs the Trickster to use only the OS List
+ # certificate_authority_paths = [ '../../testdata/test.rootca.pem' ]
+
+ ## client_cert_path provides the path to a client certificate for Trickster to use when authenticating with an upstream server
+ ## empty string '' by default
+ # client_cert_path = '/path/to/my/client/cert.pem'
+
+ ## client_key_path provides the path to a client key for Trickster to use when authenticating with an upstream server
+ ## empty string '' by default
+ # client_key_path = '/path/to/my/client/key.pem'
- # Configuration Options for Metrics Instrumentation
- [metrics]
+ ## For multi-origin support, origins are named, and the name is the second word of the configuration section name.
+ ## In this example, an origin is named "foo".
+ ## Clients can indicate this origin in their path (http://trickster.example.com:9090/foo/api/v1/query_range?.....)
+ ## there are other ways for clients to indicate which origin to use in a multi-origin setup. See the documentation for more information
- # listen_port defines the port that Trickster's metrics server listens on at /metrics
- listen_port = 8082
+ ## use quotes around FQDNs for host-based routing (see /docs/multi-origin.md).
+ # [origins.'foo.example.com']
+ # is_default = false
+ # origin_type = 'influxdb'
+ # origin_url = 'http://influx-origin:8086'
+ # cache_name = 'bbolt_example'
+ # negative_cache_name = 'general'
+ # timeseries_retention_factor = 1024
+ # timeseries_eviction_method = 'oldest'
+ # timeout_secs = 180
+ # backfill_tolerance_secs = 180
- # Configuration Options for Logging Instrumentation
- [logging]
+ ## Configuration Options for Metrics Instrumentation
+ # [metrics]
+ ## listen_port defines the port that Trickster's metrics server listens on at /metrics
+ # listen_port = 8082
+ ## listen_address defines the ip that Trickster's metrics server listens on at /metrics
+ ## empty by default, listening on all interfaces
+ # listen_address = ''
- # log_level defines the verbosity of the logger. Possible values are 'debug', 'info', 'warn', 'error'
- # default is info
- log_level = 'info'
+ ## Configuration Options for Logging Instrumentation
+ # [logging]
+ ## log_level defines the verbosity of the logger. Possible values are 'debug', 'info', 'warn', 'error'
+ ## default is 'info'
+ # log_level = 'info'
- # log_file defines the file location to store logs. These will be auto-rolled and maintained for you.
- # not specifying a log_file (this is the default behavior) will print logs to STDOUT
- #log_file = '/some/path/to/trickster.log'
+ ## log_file defines the file location to store logs. These will be auto-rolled and maintained for you.
+ ## not specifying a log_file (this is the default behavior) will print logs to STDOUT
+ # log_file = '/some/path/to/trickster.log'
diff --git a/docs/caches.md b/docs/caches.md
index b2e21d1d8..a2a6a57d2 100644
--- a/docs/caches.md
+++ b/docs/caches.md
@@ -1,37 +1,44 @@
# Cache Types
-There are 3 cache types supported by Trickster
+There are several cache types supported by Trickster
-* In-Memory Cache (default)
-* Filesystem Cache
-* Redis Cache
+* In-Memory (default)
+* Filesystem
+* bbolt
+* BadgerDB
+* Redis (basic, cluster, and sentinel)
-The sample configuration ([conf/example.conf](../conf/example.conf)) demonstrates how to select and configure a particular cache type, as well as how to configure generic cache configurations such as Retention Policy.
+The sample configuration ([cmd/trickster/conf/example.conf](../cmd/trickster/conf/example.conf)) demonstrates how to select and configure a particular cache type, as well as how to configure generic cache configurations such as Retention Policy.
-## In-Memory Cache
+## In-Memory
In-Memory Cache is the default type that Trickster will implement if none of the other cache types are configured. The In-Memory cache utilizes a Golang [sync.Map](https://godoc.org/sync#Map) object for caching, which ensures atomic reads/writes against the cache with no possibility of data collisions. This option is good for both development environments and most smaller dashboard deployments.
When running Trickster in a Docker container, ensure your node hosting the container has enough memory available to accommodate the cache size of your footprint, or your container may be shut down by Docker with an Out of Memory error (#137). Similarly, when orchestrating with Kubernetes, set resource allocations accordingly.
-We are working on better profiling of Trickster's In-Memory Cache footprint and will provide some general sizing guidance on when it is best to select one of the other Cache Types in a future release.
-
-## Filesystem Cache
+## Filesystem
The Filesystem Cache is a popular option when you have larger dashboard setup (e.g., many different dashboards with many varying queries, Dashboard as a Service for several teams running their own Prometheus instances, etc.) that requires more storage space than you wish to accommodate in RAM. A Filesystem Cache configuration keeps the Trickster RAM footprint small, and is generally comparable in performance to In-Memory. Trickster performance can be degraded when using the Filesystem Cache if disk i/o becomes a bottleneck (e.g., many concurrent dashboard users).
The default Filesystem Cache path is `/tmp/trickster`. The sample configuration demonstrates how to specify a custom cache path. Ensure that the user account running Trickster has read/write access to the custom directory or the application will exit on startup upon testing filesystem access. All users generally have access to /tmp so there is no concern about permissions in the default case.
-## BoltDB Cache
+## bbolt
+
+The BoltDB Cache is a popular key/value store, created by [Ben Johnson](https://github.com/benbjohnson). [CoreOS's bbolt fork](https://github.com/etcd-io/bbolt) is the version implemented in Trickster. A bbolt store is a filesystem-based solution that stores the entire database in a single file. Trickster, by default, creates the database at `trickster.db` and uses a bucket name of 'trickster' for storing key/value data. See the example config file for details on customizing this aspect of your Trickster deployment. The same guidance about filesystem permissions described in the Filesystem Cache section above apply to a bbolt Cache.
+
+## BadgerDB
+
+[BadgerDB](https://github.com/dgraph-io/badger) works similarly to bbolt, in that it is a filesystem-based key/value datastore. BadgerDB provides its own native object lifecycle management (TTL) and other additional features that distinguish it from bbolt. See the configuration for more info on using BadgerDB with Trickster.
-The BoltDB Cache is a popular key/value store, created by [Ben Johnson](https://github.com/benbjohnson). [CoreOS's bbolt fork](https://github.com/coreos/bbolt) is the version implemented in Trickster. A BoltDB store is a filesystem-based solution that stores the entire database in a single file. Trickster, by default, creates the database at `trickster.db` and uses a bucket name of 'trickster' for storing key/value data. See the example config file for details on customizing this aspect of your Trickster deployment. The same guidance about filesystem permissions described in the Filesystem Cache section above apply to a BoltDB Cache.
+## Redis
-## Redis Cache
+Note: Trickster does not come with a Redis server. You must provide a pre-existing Redis endpoint for Trickster to use.
Redis is a good option for larger dashboard setups that also have heavy user traffic, where you might see degraded performance with a Filesystem Cache. This allows Trickster to scale better than a Filesystem Cache, but you will need to provide your own Redis instance at which to point your Trickster instance. The default Redis endpoint is `redis:6379`, and should work for most docker and kube deployments with containers or services named `redis`. The sample configuration demonstrates how to customize the Redis endpoint. In addition to supporting TCP endpoints, Trickster supports Unix sockets for Trickster and Redis running on the same VM or bare-metal host.
Ensure that your Redis instance is located close to your Trickster instance in order to minimize additional roundtrip latency.
+In addition to basic Redis, Trickster also supports Redis Cluster and Redis Sentinel. Refer to the sample configuration for customizing the Redis client type.
## Purging the Cache
@@ -39,17 +46,25 @@ Cache purges should not be necessary, but in the event that you wish to do so, t
A future release will provide a mechanism to fully purge the cache (regardless of the underlying cache type) without stopping a running Trickster instance.
-### In-Memory
+### Purging In-Memory Cache
Since this cache type runs inside the virtual memory allocated to the Trickster process, bouncing the Trickster process or container will effectively purge the cache.
-### Filesystem
+### Purging Filesystem Cache
To completely purge a Filesystem-based Cache, you will need to:
-* Docker/Kube: delete the Trickster container and run a new one
+* Docker/Kube: delete the Trickster container (or mounted volume) and run a new one
* Metal/VM: Stop the Trickster process and manually run `rm -rf /tmp/trickster` (or your custom-configured directory).
-### Redis Cache
+### Purging Redis Cache
Connect to your Redis instance and issue a FLUSH command. Note that if your Redis instance supports more applications than Trickster, a FLUSH will clear the cache for all dependent applications.
+
+### Purging bbolt Cache
+
+Stop the Trickster process and delete the configured bbolt file.
+
+### Purging BadgerDB Cache
+
+Stop the Trickster process and delete the configured BadgerDB path.
diff --git a/docs/clickhouse.md b/docs/clickhouse.md
new file mode 100644
index 000000000..5c1b3530f
--- /dev/null
+++ b/docs/clickhouse.md
@@ -0,0 +1,22 @@
+# ClickHouse Support
+
+Trickster 1.0 provides experimental support for accelerating ClickHouse queries that return time series data normally visualized on a dashboard. Acceleration works by using the Time Series Delta Proxy Cache to minimize the number and time range of queries to the upstream ClickHouse server.
+
+## Scope of Support
+
+Trickster is tested with the [ClickHouse DataSource Plugin for Grafana](https://grafana.com/grafana/plugins/vertamedia-clickhouse-datasource) v1.9.3 by Vertamedia, and supports acceleration of queries constructed by this plugin using the plugin's built-in `$timeSeries` macro.
+
+Because ClickHouse does not provide a golang-based query parser, Trickster uses pre-compiled Regular Expression pattern matches on the incoming ClickHouse query to deconstruct its components, determine if it is cacheable and, if so, what elements are factored into the cache key derivation. We also determine what parts of the query are template-able (e.g., `time BETWEEN $time1 AND $time2`) based on the provided absolute values, in order to normalize the query before hashing the cache key.
+
+If you find query or response structures that are not yet supported, or providing inconsistent or unexpected results, we'd love for you to report those. We also always welcome any contributions around this functionality. The regular expression patterns we currently use will likely grow in complexity as support for more query patterns is added. Thus, we may need to find a more robust query parsing solution, and welcome any assistance with that as well.
+
+Trickster currently supports the following query patterns (case-insensitive) in the JSON response format, which align with the output of the ClickHouse Data Source Plugin for Grafana:
+
+```sql
+SELECT (intDiv(toUInt32(time_col), 60) * 60) * 1000 AS t, countMerge(val_col) AS cnt, field1, field2
+FROM exampledb.example_table WHERE time_col BETWEEN toDateTime(1574686300) AND toDateTime(1574689900)
+ AND field1 > 0 AND field2 = 'some_value' GROUP BY t, field1, field2 ORDER BY t, field1, field2
+FORMAT JSON
+```
+
+In this format, the first column must be the datapoint's timestamp, the second column must be the datapoint's value, and all additional fields define the datapoint's metric name. The time column must be in the format of `(intDiv(toUInt32($time_col), $period) * $period) * 1000`, and the value column must be numeric (integer or floating point). The where clause must include `time_col > toDateTime($epoch)` or `time_col BETWEEN toDateTime($epoch1) AND toDateTime($epoch2)`. Subqueries and other modifications are compatible so long as the key components of the time series, mentioned here, can be extracted.
diff --git a/docs/collapsed-forwarding.md b/docs/collapsed-forwarding.md
new file mode 100644
index 000000000..a022d0f04
--- /dev/null
+++ b/docs/collapsed-forwarding.md
@@ -0,0 +1,65 @@
+# Collapsed Forwarding
+
+Collapsed Forwarding is feature common among Reverse Proxy Cache solutions like Squid, Varnish and Apache Traffic Server. It works by ensuring only a single request to the upstream origin is performed for any object on a cache miss or revalidation attempt, no matter how many users are requesting the object at the same time.
+
+Trickster has support for two types of Collapsed Forwarding: Basic (default) and Progressive
+
+## Basic Collapsed Forwarding
+
+Basic Collapsed Forwarding is the default functionality for Trickster, and works by waitlisting all requests for a cacheable object while a cache miss is being serviced for the object, and then serving the waitlisted requests once the cache has been populated.
+
+The feature is further detailed in the following diagram:
+
+
+
+## Progressive Collapsed Forwarding
+
+Progressive Collapsed Forwarding (PCF) is an improvement upon the basic version, in that it eliminates the waitlist and serves all simultaneous requests concurrently while the object is still downloading from the server, similar to Apache Traffic Server's "read-while-write" feature. This may be useful in low-latency applications such as DASH or HLS video delivery, since PCF minimizes Time to First Byte latency for extremely popular objects.
+
+The feature is further detailed in the following diagram:
+
+
+
+### PCF for Proxy-Only Requests
+
+Trickster provides a unique feature that implements PCF in Proxy-Only configurations, to bring the benefits of Collapsed Forwarding to HTTP Paths that are not configured to be routed through the Reverse Proxy Cache. (See [Paths](./paths.md) documentation for more info on routing).
+
+The feature is further detailed in the following diagram:
+
+
+
+## How to enable Progressive Collapsed Forwarding
+
+When configuring path configs as described in [Paths Documentation](./paths.md) you simply need to add `progressive_collapsed_forwarding = true` in any path config using the `proxy` or `proxycache` handlers.
+
+Example:
+
+```toml
+ [origins.test.paths]
+ [origins.test.paths.thing1]
+ path = '/test_path1/'
+ match_type = 'prefix'
+ handler = 'proxycache'
+ progressive_collapsed_forwarding = true
+
+ [origins.test.paths.thing2]
+ path = '/test_path2/'
+ match_type = 'prefix'
+ handler = 'proxy'
+ progressive_collapsed_forwarding = true
+```
+
+See the [example.conf](../cmd/trickster/conf/example.conf) for more configuration examples.
+
+## How to test Progressive Collapsed Forwarding
+
+An easy way to test PCF is to set up your favorite file server to host a large file(Lighttpd, Nginx, Apache WS, etc.), In Trickster turn on PCF for that path config and try make simultaneous requests.
+If the networking between your machine and Trickster has enough bandwidth you should see both streaming at the equivalent rate as the origin request.
+
+Example:
+
+- Run a Lighttpd instance or docker container on your local machine and make a large file available to be served
+- Run Trickster locally
+- Make multiple curl requests of the same object
+
+You should see the speed limited on the origin request by your disk IO, and your speed between Trickster limited by Memory/CPU
diff --git a/docs/configuring.md b/docs/configuring.md
index 51f951a21..11e2e38b6 100644
--- a/docs/configuring.md
+++ b/docs/configuring.md
@@ -6,9 +6,11 @@ There are 3 ways to configure Trickster, listed here in the order of evaluation.
* Environment Variables
* Command Line Arguments
+Note that while the Confifguration file provides a very robust number of knobs you can adjust, the ENV and CLI Args options support only basic use cases.
+
## Internal Defaults
-Internal Defaults are set for all configuration values, and are overridden by the configuration methods described below. All Internal Defaults are described in [conf/example.conf](../conf/example.conf) comments.
+Internal Defaults are set for all configuration values, and are overridden by the configuration methods described below. All Internal Defaults are described in [cmd/trickster/conf/example.conf](../cmd/trickster/conf/example.conf) comments.
## Configuration File
@@ -16,16 +18,17 @@ Trickster accepts a `-config /path/to/trickster.conf` command line argument to s
When a `-config` parameter is not provided, Trickster will check for the presence of a config file at `/etc/trickster/trickster.conf` and load it if present, or proceed with the Internal Defaults if not present.
-Refer to [conf/example.conf](../conf/example.conf) for full documentation on format of a configuration file.
+Refer to [cmd/trickster/conf/example.conf](../cmd/trickster/conf/example.conf) for full documentation on format of a configuration file.
## Environment Variables
Trickster will then check for and evaluate the following Environment Variables:
-* `TRK_ORIGIN=http://prometheus.example.com:9090` - The default origin to proxy Prometheus requests
+* `TRK_ORIGIN=http://prometheus.example.com:9090` - The default origin for proxying all http requests
+* `TRK_ORIGIN_TYPE=prometheus` - The type of [supported origin server](./supported-origin-types.md)
* `TRK_LOG_LEVEL=INFO` - Level of Logging that Trickster will output
* `TRK_PROXY_PORT=8000` -Listener port for the HTTP Proxy Endpoint
-* `TRK_METRICS_PORT=8001` - Listener port for the HTTP Metrics Endpoint
+* `TRK_METRICS_PORT=8001` - Listener port for the Metrics and pprof debugging HTTP Endpoint
## Command Line Arguments
@@ -33,6 +36,7 @@ Finally, Trickster will check for and evaluate the following Command Line Argume
* `-log-level INFO` - Level of Logging that Trickster will output
* `-config /path/to/trickster.conf` - See [Configuration File](#configuration-file) section above
-* `-origin http://prometheus.example.com:9090` - The default origin to proxy Prometheus requests
+* `-origin http://prometheus.example.com:9090` - The default origin for proxying all http requests
+* `-origin-type prometheus` - The type of [supported origin server](./supported-origin-types.md)
* `-proxy-port 8000` - Listener port for the HTTP Proxy Endpoint
-* `-metrics-port 8001` - Listener port for the HTTP Metrics Endpoint
+* `-metrics-port 8001` - Listener port for the Metrics and pprof debugging HTTP Endpoint
diff --git a/docs/developer/adding-new-config.md b/docs/developer/adding-new-config.md
new file mode 100644
index 000000000..8f90dd4c1
--- /dev/null
+++ b/docs/developer/adding-new-config.md
@@ -0,0 +1,34 @@
+# Adding a New Configuration Value
+
+Trickster configurations are defined in `./internal/config/` and are mapped to `toml` annotations.
+
+When adding a configuration value, there are several places to add references, which are described below.
+
+## Configuration Code
+
+Each new configuration value must be defined in the `config` package under an existing Configuration collection (Origins, Caches, Paths, etc.).
+
+Make sure the TOML annotation uses a `lowercase_no_spaces` naming convention, while the configuration member name itself should be `CamelCase`. Follow the existing configs for guidance.
+
+Once you have defined the configuration member, if it is part of a `CacheConfig`, `OriginConfig` or `PathConfig`, it must also be added to the configuration parser for the specific type of configuration. These methods iterate through known TOML annoations to survey which configs have been set. This allows Trickster to know if a value is set because it is the initialized default value or because the operator has explicitly set it.
+
+## Feature Code
+
+Once you have defined your configuration value(s), you must put them to work by referencing them elsewhere in the Trickster code, and used to determine or customize the application functionality. Exactly where this happens in the code depends upon the context and reach or your new configuration, and what features its state affects. Consult with a project maintainer if you have any questions.
+
+## Tests
+
+All new values that you add should have accompanying unit tests to ensure the modifications the value makes to the application in the feature code work as designed. Unit Tests should include verification of: proper parsing of configuration value from test config files (in ./testdata), correct feature functionality enable/disable based on the configuration value, correct feature implementation, coverage of all executable lines of code. Unit Test will span the `config` package, and any package(s) wherein the configuration value is used by the applciation.
+
+## Documentation
+
+The feature should be documented under `./docs` directory, in a suitable existing or new markdown file based on the nature of the feature. The documentation should show the key example configuration options and describe their expected results, and point to the example config file for more information.
+
+The example config file (./cmd/trickster/conf/example.conf) should be updated to include the exhaustive description and options for the configuration value(s).
+
+## Deployment
+
+The `./deply/kube/configmap.yaml` must be updated to include the new configuration option(s). Generally this file contains a copy/paste of `./cmd/trickster/conf/example.conf`.
+
+The `./deploy/helm/trickster/values.yaml` file must be updated to mirror the configuration option(s) in the example.conf, and `./deploy/helm/trickster/templates/configmap.yaml` must be updated to map any new `yamlCaseValues` to their respective `toml_style_values` for config file generation via the template.
+
diff --git a/docs/developer/origin-extensibility.md b/docs/developer/origin-extensibility.md
new file mode 100644
index 000000000..7c3cc2203
--- /dev/null
+++ b/docs/developer/origin-extensibility.md
@@ -0,0 +1,116 @@
+# Extending Trickster to Support a New Origin Type
+
+Trickster 1.0 was written with extensibility in mind, and should be able to work with any time series database that has an HTTP-based API. In Trickster, we generically refer to our supported TSDB's as Origin Types. Some Origin Types are easier to implement and maintain than others, depending upon a host of factors that are covered later in this document. This document is meant to help anyone wishing to extend Trickster to support a new Origin Type, particularly in gauging the level of effort, understanding what is involved, and implementing the required interfaces and rules.
+
+## Qualifications
+
+Not every database server out there is a candidate for being fronted by Trickster. Trickster serves the specific purpose of accelerating the delivery of time series data sets, and will not benefit traditional relational databases, NoSQL, etc.
+
+As mentioned, the database must be able to be queried for and return time series data via HTTP. Some databases that are not specifically TSDB's actually do support querying for and returning data in a time series format, and Trickster will support those cases as detailed below, so long as they have an HTTP API.
+
+### Skills Needed
+
+In addition to these requirements in the technology, there are also skills qualifications to consider.
+
+Whether or not you've contributed to Open Source Software before, take a look at our Contributing guidelines so you know how the process works for the Trickster project. If you are unfamiliar with the Forking Workflow, read up on it so that you are able to contribute to the project through Pull Requests.
+
+Trickster is a 100% Go project, so you will need to have experience writing in Go, and in particular, data marshaling/unmarshaling and data set manipulation (sorting, merging, cropping, de-duplicating, etc.). You will need to have a good understanding of the prospective Origin Type's query language and response payload structure, so you can write the necessary parsing and modeling methods that allow Trickster to manipulate upstream HTTP requests and merge newly fetched data sets into the cache.
+
+While this might sound daunting, it is actually much easier than it appears on the surface. Since Trickster's DeltaProxyCache engine does a lot of the heavy lifting, you only have to write a series of interface functions before finding yourself near the finish line. And since a few Origin Types are already implemented, you can use their implementations for references, since the logic for your prospective Origin Type should be similar.
+
+## Interfaces
+
+Trickster provides 2 required interfaces for enabling a new Origin Type: The Proxy Client and the Time Series
+
+### Proxy Client Interface
+
+The Proxy Client Interface ([code](https://github.com/Comcast/trickster/blob/next/internal/proxy/model/client.go)) is used by Trickster to manipulate HTTP requests and responses in order to accelerate the requests.
+
+For your Proxy Client Implementation, you will need to know these things about the Origin:
+
+- What URL paths and methods must be supported, and which engine through which to route each path (Basic HTTP Proxy, Object Proxy Cache, or Time Series Delta Proxy Cache). The proxy engines will call your client implementation's interface exports in order to service user requests.
+
+- What data inputs the origin expects (Path, URL parameters, POST Data, HTTP Headers, cookies, etc.), and how to manipulate the query's time range when constructing those inputs to achieve a desired result.
+
+The Proxy Client Interface Methods you will need to implement are broken into several groups of functionality, as follows.
+
+#### Basic Getters
+
+- `Configuration` returns the \*config.OriginConfig object for the origin.
+
+- `Name` returns the configured name of the Origin Type instance.
+
+- `HTTPClient` returns the reusable \*http.Client object that communicates with the Origin.
+
+#### HTTP Request Routing and Handling
+
+- `RegisterRoutes` registers all of the HTTP Paths that will be used by the Origin Type and map them to handlers written to service the various paths.
+
+- `HealthHandler` this method is a standard HTTP Handler that can verify and report back the health of the upstream origin and the proxy's connection to it. You will certainly create at least one other handler in your Origin Type package, but this is the only one required for conformance to the Proxy Client interface.
+
+#### Caching
+
+- `DeriveCacheKey` inspects the client request and returns the corresponding cache key
+
+#### Time Series Handling
+
+- `UnmarshalTimeseries` deserializes an HTTP Response time series payload into a Go struct
+
+- `MarshalTimeseries` seralizes a time-series struct into a serialized byte slice.
+
+- `UnmarshalInstantaneous` deserializes an HTTP Response instantaneous payload into a Go struct. This may not be applicable to every potential Origin Type.
+
+- `ParseTimeRangeQuery` inspects the client request and returns a corresponding timeseries.TimeRangeQuery
+
+- `SetExtent` updates an upstream request's time range parameters as needed based on the delta gap analysis
+
+- `FastForwardURL` returns the URL to the origin to collect Fast Forward data points based on the provided HTTP Request
+
+### Time Series Interface
+
+The Time Series Interface ([code](https://github.com/Comcast/trickster/blob/next/internal/timeseries/timeseries.go)) is used by Trickster to manipulate Time Series documents in order to maintain the cache and construct downstream client request bodies.
+
+For your Time Series Implementation, you will need to know these things about the Origin:
+
+- The structure of the response payload and how that translates into Go structs. More often than not, a prospective Origin Type offers an importable model to assist with this.
+
+The Time Series Interface Methods you will need to implement are broken into several groups of functionality, as follows.
+
+#### Getters
+
+- `Extents` returns a list of the time ranges present in the cache
+- `Step` returns the Step (the duration between each timestamp in the series)
+- `SeriesCount` returns the number of series (e.g., graph lines) in the data set
+- `ValueCount` returns the total number of values across all series in the data set
+
+#### Setters
+
+- `SetExtents` sets the list of the time ranges present in the cache
+- `SetStep` sets the Step
+
+#### Data Set Manipulation
+
+- `Merge` merges a variadic list of time series into the base time series
+- `Sort` chronologically sorts the values in each series in the time series
+- `Copy` makes an new exact copy of the time series
+- `Crop` removes any values from the time series that are outside of the provided time range
+
+## Special Considerations
+
+### Query Language Complexity
+
+One of the main areas of consideration is the complexity of parsing and manipulating an inbound query. You will need to (1) determine if it is indeed a request for a timeseries; and if so (2) extract the requested time range and step duration for the query; and in the event of a partial cache hit, (3) adjust the time range for the query to a provided range - all of which allows the DeltaProxyCache to fetch just the needed sections of data from the upstream origin. Requirements 1 and 2 are functionality in `ParseTimeRangeQuery` while requirement 3 is the functionality of `SetExtent`. The overall complexity of this process can significantly affect the level of effort required to implement a new Origin Type.
+
+In the example of Prometheus, the process was extremely simple: since, in the Prometheus HTTP API, time range queries have a separate http endpoint path from instantaneous queries, and because the time range is provided as separate query parameters from the query itself, the range is easily modified without Trickster having any knowledge of the underlying query or having to even parse it at all.
+
+In the example of ClickHouse, the process is much harder: since the query language is a variant of SQL standard, the requested time ranges are embedded into the query itself behind the `WHERE` clause. In cases such as this, Trickster must have some way, either through (1) importing a database package that can deserialize the query, allow manipulation, and serialize the modified query for you; or (2) new parsing and search/replacement logic introduced in your own package - which allows the Client to interepret the time range and modify it with time ranges provided by the DeltaProxyCache. With ClickHouse, since it is a C++ project and Trickster is Golang, we could not import any package to handle this work, so we crafted a regular expression to match against inbound queries. This regex extracts the timerange (and any ClickHouse time modifiers like `startOfMinute`) and step value as well as any other areas that include the timerange, and then use simple builtin string functions to inject tokens in place of specific ranges in the provided query. Then, when SetExtent is called, those tokens are search/replaced with the provided time values.
+
+### Data Model Considerations
+
+Once you have the Client Interface implementation down and can interact with the upstream HTTP API, you will turn your attention to managing the response payload data and what to do with it, and that happens in your Timeseries Interface implementation. And like the Client interface, it will come with its own unique challenges.
+
+The main consideration here is the format of the output and what challenges are presented by it. For example, does the payload include any required metadata (e.g., a count of total rows returned) that you will need to synthesize within your Timeseries after a `Merge`, etc. Going back to the ClickHouse example, since it is a columnar database that happens to have time aggregation functions, there are a million ways to formulate a query that yields time series results. That can have implications on the resulting dataset: which fields are the time and value fields, and what are the rest? Are all datapoints for all the series in a single large slice or have they been segregated into their own slices? Is the Timestamp in Epoch format, and if so, does it represent seconds or milliseconds? In order to support an upstream database, you may need to establish or adopt guidelines around these and other questions to ensure full compatibility. The ClickHouse plugin for Grafana requires that for each datapoint of the response, the first field is the timestamp and the second field is the numeric value - so we adopt and document the same guideline to conform to existing norms.
+
+## Getting More Help
+
+On the Gophers Slack instance, you can find us on the #trickster channel for any help you may need.
diff --git a/docs/health.md b/docs/health.md
index 5cecd5b37..c86ac0510 100644
--- a/docs/health.md
+++ b/docs/health.md
@@ -1,15 +1,23 @@
# Health Checks
-## Ping Endpoint
-Trickster provides a `/ping` endpoint that returns a response of `200 OK` and the word `pong` if Trickster is up and running. The `/ping` endpoint does not check any proxy configurations or upstream origins.
+## Trickster Service Health - Ping Endpoint
-## Health Check Endpoint
-Trickster offers a `/health` endpoint for monitoring the health of the Trickster service and its upstream connection to the origin. To test the upstream origin, Trickster will make a request to its labels endpoint (`/label/__name__/values`).
+Trickster provides a `/trickster/ping` endpoint that returns a response of `200 OK` and the word `pong` if Trickster is up and running. The `/trickster/ping` endpoint does not check any proxy configurations or upstream origins. The path to the Ping endpoint is configurable, see the configuration documentation for more information.
-An HTTP response of `200 OK` indicates that the end-to-end request to the origin was successful.
+## Upstream Connection Health - Origin Health Endpoints
-In a multi-origin setup, requesting against `/health` will test the default origin. You can indicate a specific origin to test by crafting requests in the same way a normal multi-origin request is structured. For example, `/origin_moniker/health`. See [multi-origin.md](multi-origin.md) for more information.
+Trickster offers `health` endpoints for monitoring the health of the Trickster service with respect to its upstream connection to origin servers.
+
+Each configured origin's health check path is `/trickster/health/ORIGIN_NAME`. For example, if your origin is named `foo`, you can perform a health check of the upstream URL at `http:///trickster/health/foo`.
+
+ The behavior of a `health` request will vary based on the Origin Type, as each Origin Type implements a custom default health check behavior. For example, with Prometheus, Trickster makes a request to `/query?query=up` and (hopefully) receives a `200 OK`, while for InfluxDB the request is to `/ping` which returns a `204 No Content`. You can customize the behavior in the Trickster configuration. See the [example.conf](../cmd/trickster/conf/example.conf) for guidance.
+
+The Origin-Specific default health check configurations should return a 200-range status code to indicate that the end-to-end health check to the origin was successful. Note that this behavior is not guaranteed when operating under user-provided health check configurations.
## Other Ways to Monitor Health
In addition to the out-of-the-box health checks to determine up-or-down status, you may want to setup alarms and thresholds based on the metrics instrumented by Trickster. See [metrics.md](metrics.md) for collecting performance metrics about Trickster.
+
+## Config Endpoint
+
+Trickster also provides a `/trickster/config` endpoint, that returns the toml output of the currently-running Trickster configuration. The TOML-formatted configuration will include all defaults populated, overlaid with any configuration file settings, command-line arguments and or applicable environment variables. The path to the Config endpoint is configurable, see the configuration documentation for more information.
diff --git a/docs/images/basic-collapsed-forwarding.png b/docs/images/basic-collapsed-forwarding.png
new file mode 100644
index 000000000..c71e29da0
Binary files /dev/null and b/docs/images/basic-collapsed-forwarding.png differ
diff --git a/docs/images/deploy-multi-origin-1.png b/docs/images/deploy-multi-origin-1.png
new file mode 100644
index 000000000..9612ceb0d
Binary files /dev/null and b/docs/images/deploy-multi-origin-1.png differ
diff --git a/docs/images/deploy-multi-origin.png b/docs/images/deploy-multi-origin.png
deleted file mode 100644
index 1c02a280c..000000000
Binary files a/docs/images/deploy-multi-origin.png and /dev/null differ
diff --git a/docs/images/deploy-multi-trickster-1.png b/docs/images/deploy-multi-trickster-1.png
new file mode 100644
index 000000000..3a4e2e938
Binary files /dev/null and b/docs/images/deploy-multi-trickster-1.png differ
diff --git a/docs/images/deploy-multi-trickster.png b/docs/images/deploy-multi-trickster.png
deleted file mode 100644
index faf25fe59..000000000
Binary files a/docs/images/deploy-multi-trickster.png and /dev/null differ
diff --git a/docs/images/deploy-single-everything-1.png b/docs/images/deploy-single-everything-1.png
new file mode 100644
index 000000000..e430cc3e2
Binary files /dev/null and b/docs/images/deploy-single-everything-1.png differ
diff --git a/docs/images/deploy-single-everything.png b/docs/images/deploy-single-everything.png
deleted file mode 100644
index 8615153f1..000000000
Binary files a/docs/images/deploy-single-everything.png and /dev/null differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/data.plist b/docs/images/diagrams/trickster-docs-graphics.graffle/data.plist
new file mode 100644
index 000000000..2992eba12
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/data.plist differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image10.png b/docs/images/diagrams/trickster-docs-graphics.graffle/image10.png
new file mode 100644
index 000000000..51248ddc7
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image10.png differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image19.pdf b/docs/images/diagrams/trickster-docs-graphics.graffle/image19.pdf
new file mode 100644
index 000000000..360c8a8e0
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image19.pdf differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image23.png b/docs/images/diagrams/trickster-docs-graphics.graffle/image23.png
new file mode 100644
index 000000000..75d1d3d38
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image23.png differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image24.png b/docs/images/diagrams/trickster-docs-graphics.graffle/image24.png
new file mode 100644
index 000000000..7c87b08f5
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image24.png differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image25.png b/docs/images/diagrams/trickster-docs-graphics.graffle/image25.png
new file mode 100644
index 000000000..5cfc63edb
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image25.png differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image27.pdf b/docs/images/diagrams/trickster-docs-graphics.graffle/image27.pdf
new file mode 100644
index 000000000..101161a51
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image27.pdf differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image29.pdf b/docs/images/diagrams/trickster-docs-graphics.graffle/image29.pdf
new file mode 100644
index 000000000..20e4a1f1b
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image29.pdf differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image31.pdf b/docs/images/diagrams/trickster-docs-graphics.graffle/image31.pdf
new file mode 100644
index 000000000..76970e6c7
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image31.pdf differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image32.tiff b/docs/images/diagrams/trickster-docs-graphics.graffle/image32.tiff
new file mode 100644
index 000000000..5e0439283
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image32.tiff differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image33.png b/docs/images/diagrams/trickster-docs-graphics.graffle/image33.png
new file mode 100644
index 000000000..2264d7909
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image33.png differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image34.png b/docs/images/diagrams/trickster-docs-graphics.graffle/image34.png
new file mode 100644
index 000000000..2faa66750
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image34.png differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image35.png b/docs/images/diagrams/trickster-docs-graphics.graffle/image35.png
new file mode 100644
index 000000000..c73d97283
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image35.png differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image36.png b/docs/images/diagrams/trickster-docs-graphics.graffle/image36.png
new file mode 100644
index 000000000..2804fdd93
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image36.png differ
diff --git a/docs/images/diagrams/trickster-docs-graphics.graffle/image8.png b/docs/images/diagrams/trickster-docs-graphics.graffle/image8.png
new file mode 100644
index 000000000..1d18d1ac0
Binary files /dev/null and b/docs/images/diagrams/trickster-docs-graphics.graffle/image8.png differ
diff --git a/docs/images/external/clickhouse_logo.png b/docs/images/external/clickhouse_logo.png
new file mode 100644
index 000000000..0904a007b
Binary files /dev/null and b/docs/images/external/clickhouse_logo.png differ
diff --git a/docs/images/external/influx_logo_150.png b/docs/images/external/influx_logo_150.png
new file mode 100644
index 000000000..3c0f37eec
Binary files /dev/null and b/docs/images/external/influx_logo_150.png differ
diff --git a/docs/images/external/influx_logo_60.png b/docs/images/external/influx_logo_60.png
new file mode 100644
index 000000000..10dffdeb7
Binary files /dev/null and b/docs/images/external/influx_logo_60.png differ
diff --git a/docs/images/external/influx_logo_610.png b/docs/images/external/influx_logo_610.png
new file mode 100644
index 000000000..5cfc63edb
Binary files /dev/null and b/docs/images/external/influx_logo_610.png differ
diff --git a/docs/images/external/irondb_logo_150.png b/docs/images/external/irondb_logo_150.png
new file mode 100644
index 000000000..7fedb87db
Binary files /dev/null and b/docs/images/external/irondb_logo_150.png differ
diff --git a/docs/images/external/irondb_logo_60.png b/docs/images/external/irondb_logo_60.png
new file mode 100644
index 000000000..3e39cca0f
Binary files /dev/null and b/docs/images/external/irondb_logo_60.png differ
diff --git a/docs/images/external/prom_logo_150.png b/docs/images/external/prom_logo_150.png
new file mode 100644
index 000000000..b3c23d57c
Binary files /dev/null and b/docs/images/external/prom_logo_150.png differ
diff --git a/docs/images/external/prom_logo_60.png b/docs/images/external/prom_logo_60.png
new file mode 100644
index 000000000..4530f6cd2
Binary files /dev/null and b/docs/images/external/prom_logo_60.png differ
diff --git a/docs/images/external/prom_logo_800.png b/docs/images/external/prom_logo_800.png
new file mode 100644
index 000000000..2264d7909
Binary files /dev/null and b/docs/images/external/prom_logo_800.png differ
diff --git a/docs/images/high-level-1.png b/docs/images/high-level-1.png
new file mode 100644
index 000000000..d6666b653
Binary files /dev/null and b/docs/images/high-level-1.png differ
diff --git a/docs/images/high-level.png b/docs/images/high-level.png
deleted file mode 100644
index 940230ca2..000000000
Binary files a/docs/images/high-level.png and /dev/null differ
diff --git a/docs/images/partial-cache-hit.png b/docs/images/partial-cache-hit.png
index 8d380b3fe..fd6f14805 100644
Binary files a/docs/images/partial-cache-hit.png and b/docs/images/partial-cache-hit.png differ
diff --git a/docs/images/progressive-collapsed-forwarding-cache.png b/docs/images/progressive-collapsed-forwarding-cache.png
new file mode 100644
index 000000000..fff05b5d4
Binary files /dev/null and b/docs/images/progressive-collapsed-forwarding-cache.png differ
diff --git a/docs/images/progressive-collapsed-forwarding-proxy.png b/docs/images/progressive-collapsed-forwarding-proxy.png
new file mode 100644
index 000000000..24c25baf9
Binary files /dev/null and b/docs/images/progressive-collapsed-forwarding-proxy.png differ
diff --git a/docs/influxdb.md b/docs/influxdb.md
new file mode 100644
index 000000000..3e8b631a4
--- /dev/null
+++ b/docs/influxdb.md
@@ -0,0 +1,33 @@
+# InfluxDB Support
+
+Trickster 1.0 provides experimental support for accelerating InfluxDB queries that return time series data normally visualized on a dashboard. Acceleration works by using the Time Series Delta Proxy Cache to minimize the number and time range of queries to the upstream InfluxDB server.
+
+## Scope of Support
+
+Trickster is tested with the built-in [InfluxDB DataSource Plugin for Grafana](https://grafana.com/grafana/plugins/influxdb) v5.0.0.
+
+Trickster uses pre-compiled Regular Expression pattern matches on the incoming InfluxDB query to deconstruct its components, determine if it is cacheable and, if so, what elements are factored into the cache key derivation. We also determine what parts of the query are template-able (e.g., `time >= $ts1 AND <= $ts2`) based on the provided absolute values, in order to normalize the query before hashing the cache key.
+
+If you find query or response structures that are not yet supported, or providing inconsistent or unexpected results, we'd love for you to report those. We also always welcome any contributions around this functionality. The regular expression patterns we currently use will likely grow in complexity as support for more query patterns is added. Thus, we may need to find a more robust query parsing solution, and welcome any assistance with that as well.
+
+Trickster currently supports the following InfluxDB query patterns (case-insensitive), which align with queries generated by the InfluxDB Data Source Plugin for Grafana:
+
+```sql
+SELECT field1 [, field2, field3...]
+FROM "exampledb"."example_table"
+ WHERE ("some_field" = "some_val")
+ AND $timeExpression
+GROUP BY time($duration) [, group2, group3...]
+```
+
+The `$timeExpression` section must be in the format of `time $ts1 [AND $ts2]`
+
+Example `$timeExpression` strings:
+
+* `time >= now()`
+* `time >= 1574699000000ms`
+* `time >= 1574699000000ms AND time <= 1574699900000ms`
+
+$duration must be in the format of `ms` such as `60s`.
+
+The InfluxDB `epoch` HTTP request query parameter is currently required to be set to `ms`.
diff --git a/docs/metrics.md b/docs/metrics.md
index b5eebdf27..fd08c0588 100644
--- a/docs/metrics.md
+++ b/docs/metrics.md
@@ -2,22 +2,119 @@
Trickster exposes a Prometheus /metrics endpoint with a customizable listener port number (default is 8082). For more information on customizing the metrics configuration, see [configuring.md](configuring.md).
-The following metrics are available for polling:
+---
-* `trickster_requests_total` (Counter) - The total number of requests Trickster has handled.
+The following metrics are available for polling with any Trickster configuration:
+
+* `trickster_frontend_requests_total` (Counter) - Count of front end requests handled by Trickster
+ * labels:
+ * `origin_name` - the name of the configured origin handling the proxy request$
+ * `origin_type` - the type of the configured origin handling the proxy request
+ * `method` - the HTTP Method of the proxied request
+ * `http_status` - The HTTP response code provided by the origin
+ * `path` - the Path portion of the requested URL
+
+* `trickster_frontend_requests_duration_seconds` (Histogram) - Histogram of front end request durations handled by Trickster
+ * labels:
+ * `origin_name` - the name of the configured origin handling the proxy request$
+ * `origin_type` - the type of the configured origin handling the proxy request
+ * `method` - the HTTP Method of the proxied request
+ * `http_status` - The HTTP response code provided by the origin
+ * `path` - the Path portion of the requested URL
+
+* `trickster_frontend_written_byte_total` (Counter) - Count of bytes written in front end requests handled by Trickster`
+ * labels:
+ * `origin_name` - the name of the configured origin handling the proxy request$
+ * `origin_type` - the type of the configured origin handling the proxy request
+ * `method` - the HTTP Method of the proxied request
+ * `http_status` - The HTTP response code provided by the origin
+ * `path` - the Path portion of the requested URL
+
+
+
+* `trickster_proxy_requests_total` (Counter) - The total number of requests Trickster has handled.
* labels:
- * `method` - 'query' or 'query_range'
- * `status` - 'hit', 'phit', (partial hit) 'kmiss', (key miss) 'rmiss' (range miss)
+ * `origin_name` - the name of the configured origin handling the proxy request$
+ * `origin_type` - the type of the configured origin handling the proxy request
+ * `method` - the HTTP Method of the proxied request
+ * `cache_status` - 'hit', 'phit', (partial hit) 'kmiss', (key miss) 'rmiss' (range miss)
+ * `http_status` - The HTTP response code provided by the origin
+ * `path` - the Path portion of the requested URL
+* `trickster_proxy_points_total` (Counter) - The total number of data points Trickster has handled.
+ * labels:
+ * `origin_name` - the name of the configured origin handling the proxy request$
+ * `origin_type` - the type of the configured origin handling the proxy request
+ * `cache_status` - 'hit', 'phit', (partial hit) 'kmiss', (key miss) 'rmiss' (range miss)
+ * `path` - the Path portion of the requested URL
-* `trickster_points_total` (Counter) - The total number of data points Trickster has handled.
+* `trickster_proxy_request_duration_seconds` (Histogram) - Time required to proxy a given Prometheus query.
* labels:
- * `status` - 'hit', 'phit', (partial hit) 'kmiss', (key miss) 'rmiss' (range miss)
+ * `origin_name` - the name of the configured origin handling the proxy request$
+ * `origin_type` - the type of the configured origin handling the proxy request
+ * `method` - the HTTP Method of the proxied request
+ * `cache_status` - 'hit', 'phit', (partial hit) 'kmiss', (key miss) 'rmiss' (range miss)
+ * `http_status` - The HTTP response code provided by the origin
+ * `path` - the Path portion of the requested URL
+
+* `trickster_proxy_max_connections` (Gauge) - Trickster max number of allowed concurrent connections
+
+* `trickster_proxy_active_connections` (Gauge) - Trickster number of concurrent connections
+* `trickster_proxy_requested_connections_total` (Counter) - Trickster total number of connections requested by clients.
-* `trickster_proxy_duration_seconds` (Histogram) - Time required to proxy a given Prometheus query.
+* `trickster_proxy_accepted_connections_total` (Counter) - Trickster total number of accepted client connections.
+
+* `trickster_proxy_closed_connections_total` (Counter) - Trickster total number of administratively closed client connections.
+
+* `trickster_proxy_failed_connections_total` (Counter) - Trickster total number of failed client connections.
+
+* `trickster_cache_operation_objects_total` (Counter) - The total number of objects upon which the Trickster cache has operated.
* labels:
- * `method` - 'query' or 'query_range'
- * `status` - 'hit', 'phit', (partial hit) 'kmiss', (key miss) 'rmiss' (range miss)
+ * `cache_name` - the name of the configured cache performing the operation$
+ * `cache_type` - the type of the configured cache performing the operation
+ * `operation` - the name of the operation being performed (read, write, etc.)
+ * `status` - the result of the operation being performed
+
+
+* `trickster_cache_operation_bytes_total` (Counter) - The total number of bytes upon which the Trickster cache has operated.
+ * labels:
+ * `cache_name` - the name of the configured cache performing the operation$
+ * `cache_type` - the type of the configured cache performing the operation
+ * `operation` - the name of the operation being performed (read, write, etc.)
+ * `status` - the result of the operation being performed
+
+---
+
+The following metrics are available only for Caches Types whose object lifecycle Trickster manages internally (Memory, Filesystem and bbolt):
+
+* `trickster_cache_events_total` (Counter) - The total number of events that change the Trickster cache, such as retention policy evictions.
+ * labels:
+ * `cache_name` - the name of the configured cache experiencing the event$
+ * `cache_type` - the type of the configured cache experiencing the event
+ * `event` - the name of the event being performed
+ * `reason` - the reason the event occurred
+
+* `trickster_cache_usage_objects` (Gauge) - The current count of objects in the Trickster cache.
+ * labels:
+ * `cache_name` - the name of the configured cache$
+ * `cache_type` - the type of the configured cache$
+
+* `trickster_cache_usage_bytes` (Gauge) - The current count of bytes in the Trickster cache.
+ * labels:
+ * `cache_name` - the name of the configured cache$
+ * `cache_type` - the type of the configured cache$
+
+* `trickster_cache_max_usage_objects` (Gauge) - The maximum allowed size of the Trickster cache in objects.
+ * labels:
+ * `cache_name` - the name of the configured cache$
+ * `cache_type` - the type of the configured cache
+
+* `trickster_cache_max_usage_bytes` (Gauge) - The maximum allowed size of the Trickster cache in bytes.
+ * labels:
+ * `cache_name` - the name of the configured cache$
+ * `cache_type` - the type of the configured cache
+
+---
-In addition to these custom metrics, Trickster also exposes the standard Prometheus metrics that are part of the [client_golang](https://github.com/prometheus/client_golang) package, including memory and cpu utilization, etc.
+In addition to these custom metrics, Trickster also exposes the standard Prometheus metrics that are part of the [client_golang](https://github.com/prometheus/client_golang) metrics instrumentation package, including memory and cpu utilization, etc.
diff --git a/docs/multi-origin.md b/docs/multi-origin.md
index 03e3476d5..b07d4ce73 100644
--- a/docs/multi-origin.md
+++ b/docs/multi-origin.md
@@ -1,120 +1,111 @@
# Using Multiple-Origins with a single Trickster instance
-There are 3 ways to configure multi-origin support.
+Trickster supports proxying to multiple origins by examining the inbound request and using a multiplexer to direct the proxied request to the correct upstream origin, in the same way that web servers support virtual hosting. Multi-origin does _not_ equate to High Availability support; Trickster does not offer any kind of redundancy features. Using Multiple Origins simply means that a single Trickster instance can accelerate any number of unrelated upstream origins instead of requiring a Trickster instance per-origin.
+
+There are 2 ways to configure multi-origin support.
* HTTP Pathing
-* HTTP URL Parameters
* DNS Aliasing
## Basic Usage
-To utilize Multiple Origins, you must craft a Trickster configuration file to be read when Trickster starts up. There is a good example in [conf/example.conf](../conf/example.conf). The config file should be placed in `/etc/trickster/trickster.conf` unless you specify a different path when starting Trickster with the `-config` command line argument.
+To utilize Multiple Origins, you must craft a Trickster configuration file to be read when Trickster starts up - multi-origin is not supported with simply environment variables or command line arguments. The [example.conf](../cmd/trickster/conf/example.conf) provides good documentation and commented sections demonstrating multi-origin. The config file should be placed in `/etc/trickster/trickster.conf` unless you specify a different path when starting Trickster with the `-config` command line argument.
Each origin that your Trickster instance supports must be explicitly enumerated in the configuration file. Trickster does not support open proxying.
-Each origin is identified by an origin moniker, provided in the configuration section header for the origin ([origins.MONIKER]). For path and urlparam multi-origin configurations, the Moniker can be simple words. For DNS Aliasing, the origin moniker must match an FQDN that resolves to your Trickster instance.
+Each origin is identified by an Origin Name, provided in the configuration section header for the origin ([origins.NAME]). For path-based routing configurations, the Origin Name can be simple words. For DNS Aliasing, the Origin Name must match an FQDN that resolves to your Trickster instance. Also for DNS Aliasing, enclose the FQDN in quotes in the origin config section header (e.g., `[origins.'db.example.com']`).
+
+### Default Origin
+
+Whether proxying to one or more upstreams, Trickster has the concept of a "default" origin, which means it does not require a specific DNS hostname in the request, or a specific URL path, in order to proxy the request to a known origin. When a default origin is configured, if the inbound request does not match any mapped origins by path or FQDN, the request will automatically be mapped to the default origin. You are probably familiar with this behavior from when you first tried out Trickster with the using command line arguments.
+
+Here's an example: if you have Trickster configured with an origin named `foo` that proxies to `http://foo/` and is configured as the default origin, then requesting `http://trickster/image.jpg` will initiate a proxy request to `http://foo/image.jpg`, without requiring the path be prefixed with `/foo`. But requesting to `http://trickster/foo/image.jpg` would also work.
-In all cases, if Trickster cannot identify a valid origin by the client-provided moniker, it will proxy the request to the default origin.
+The default origin can be configured by setting `is_default = true` for the origin you have elected to make the default. Having a default origin is optional. In a single-origin configuration, Trickster will automatically set the sole origin as `is_default = true` unless you explicly set `is_default = false` in the configuration file. If you have multiple origins, and don't wish to have a default origin, you can just omit the value for all origins. If you set `is_default = true` for more than one origin, Trickster will exit with a fatal error on startup.
-### Path and URL Param Configurations
+### Path-based Routing Configurations
-In these modes, Trickster will use a single FQDN but still map to multiple upstream origins. This is the simplest setup and requires the least amount of work. The client will indicate which origin is desired in the URL Parameter or Path for the request.
+In this mode, Trickster will use a single FQDN but still map to multiple upstream origins. This is the simplest setup and requires the least amount of work. The client will indicate which origin is desired in URL Path for the request.
-Example Path/URLParam Multi-Origin Configuration:
+Example Path-based Multi-Origin Configuration:
```
[origins]
- # default origin
- [origins.default]
+ # origin1 origin
+ [origins.origin1]
origin_url = 'http://prometheus.example.com:9090'
- api_path = '/api/v1'
- default_step = 300
- ignore_no_cache_header = false
- max_value_age_secs = 86400
+ origin_type = 'prometheus'
+ cache_name = 'default'
+ is_default = true
# "foo" origin
[origins.foo]
- origin_url = 'http://prometheus-foo.example.com:9090'
- api_path = '/api/v1'
- default_step = 300
- ignore_no_cache_header = false
- max_value_age_secs = 86400
+ origin_url = 'http://influxdb-foo.example.com:9090'
+ origin_type = 'influxdb'
+ cache_name = 'default'
# "bar" origin
[origins.bar]
origin_url = 'http://prometheus-bar.example.com:9090'
- api_path = '/api/v1'
- default_step = 300
- ignore_no_cache_header = false
- max_value_age_secs = 86400
+ origin_type = 'prometheus'
+ cache_name = 'default'
```
#### Using HTTP Path as the Multi-Origin Indicator
-The client prefixes the Trickster request path with the origin moniker.
+The client prefixes the Trickster request path with the Origin Name.
This is the recommended method for integrating multi-origin support into Grafana.
Example Client Request URLs:
-* To Request from Origin `foo`: http://trickster.example.com:9090/foo/query?query=xxx
-* To Request from Origin `bar`: http://trickster.example.com:9090/bar/query?query=xxx
+* To Request from Origin `foo`:
+
+* To Request from Origin `bar`:
-* To Request from Origin `default` (Method 1, no Moniker): http://trickster.example.com:9090/query?query=xxx
+* To Request from Origin `origin1` as default:
-* To Request from Origin `default` (Method 2, with Moniker): http://trickster.example.com:9090/default/query?query=xxx
+* To Request from Origin `origin1` (Method 2, with Origin Name):
* Configuring Grafana to request from origin `foo` via Trickster:
-#### Using urlparam as the Multi-Origin Indicator
-
-The client provides the origin moniker as an url param.
-
-Example Client Request URLs:
-
-* To Request from Origin `foo`: http://trickster.example.com:9090/query?origin=foo&query=xxx
-
-* To Request from Origin `bar`: http://trickster.example.com:9090/query?origin=bar&query=xxx
-
### DNS Alias Configuration
-In this mode, multiple DNS records point to a single Trickster instance. The FQDN used by the client to reach Trickster represents the Origin Moniker. Therefore, the entire FQDN must be part of the configuration section header. In this mode, the path and url params requested against Trickster are _not_ modified to specify an origin.
+In this mode, multiple DNS records point to a single Trickster instance. The FQDN used by the client to reach Trickster represents the Origin Name. Therefore, the entire FQDN must be part of the configuration section header. In this mode, the URL Path is _not_ considered during Origin Selection.
Example DNS-based Origin Configuration:
```
[origins]
- # default origin
- [origins.default]
+ # origin1 origin
+ [origins.origin1]
origin_url = 'http://prometheus.example.com:9090'
- api_path = '/api/v1'
- default_step = 300
- ignore_no_cache_header = false
- max_value_age_secs = 86400
+ origin_type = 'prometheus'
+ cache_name = 'default'
+ is_default = true
# "foo" origin
- [origins.trickster-foo.example.com]
+ [origins.'trickster-foo.example.com']
origin_url = 'http://prometheus-foo.example.com:9090'
- api_path = '/api/v1'
- default_step = 300
- ignore_no_cache_header = false
- max_value_age_secs = 86400
+ origin_type = 'prometheus'
+ cache_name = 'default'
# "bar" origin
- [origins.trickster-bar.example.com]
+ [origins.'trickster-bar.example.com']
origin_url = 'http://prometheus-bar.example.com:9090'
- api_path = '/api/v1'
- default_step = 300
- ignore_no_cache_header = false
- max_value_age_secs = 86400
+ origin_type = 'prometheus'
+ cache_name = 'default'
```
Example Client Request URLs:
-* To Request from Origin `foo`: http://trickster-foo.example.com:9090/query?query=xxx
-* To Request from Origin `bar`: http://trickster-bar.example.com:9090/query?query=xxx
+* To Request from Origin `foo`:
+
+* To Request from Origin `bar`:
+
+* To Request from Origin `origin1` as default:
-* To Request from Origin `default`: http://trickster.example.com:9090/query?query=xxx
+* To Request from Origin `origin1` (Method 2, via FQDN):
diff --git a/docs/negative-caching.md b/docs/negative-caching.md
new file mode 100644
index 000000000..51ff9cd7a
--- /dev/null
+++ b/docs/negative-caching.md
@@ -0,0 +1,22 @@
+# Negative Caching
+
+Negative Caching means to cache undesired HTTP responses for a very short period of time, in order to prevent overwhelming a system that would otherwise scale normally when desired, cacheable HTTP responses are being returned. For example, Trickster can be configured to cache `404 Not Found` or `500 Internal Server Error` responses for a short period of time, to ensure that a thundering herd of HTTP requests for a non-existent object, or unexpected downtime of a citical service, do not create an i/o bottleneck in your application pipeline.
+
+Trickster supports negative caching of any status code >= 300 and < 600, on a per-Origin basis. In your Trickster configuration file, add the desired Negative Cache Map to the desired Origin config. The format of the Negative Cache Map is `status_code = ttl_in_secs` such as `404 = 30`. See the [example.conf](../cmd/trickster/conf/example.conf), or refer to the snippet below for more information.
+
+The Negative Cache Map must be an all-inclusive list of explicit status codes; there is currently no wildcard or status code range support for Negative Caching entries. By default, the Negative Cache Map is empty for all origin configs. The Negative Cache only applies to Cacheable Objects, and does not apply to Timeseries-Accelerated Requests via the Delta Proxy Cache engine, or to Proxy-Only configurations.
+
+For any response code handled by the Negative Cache, the response object's effective cache TTL is explicitly overridden to the value of that code's Negative Cache TTL, regardless of any response headers provided by the Origin concerning cacheability. All response headers are left in-tact and unmodified by Trickster's Negative Cache, such that Negative Caching is transparent to the client. Trickster currently does not insert any response headers or information indicating to downstream clients that the response was served from the Negative Cache.
+
+## Example Negative Caching Config
+
+```toml
+[origins]
+
+ [origins.default]
+ origin_type = 'rpc'
+
+ [origins.default.negative_cache]
+ 404 = 10 # Cache 404's for 10 seconds
+ 500 = 10 # Cache 500's for 10 seconds
+```
diff --git a/docs/new-changed-1.0.md b/docs/new-changed-1.0.md
new file mode 100644
index 000000000..af0ef4e26
--- /dev/null
+++ b/docs/new-changed-1.0.md
@@ -0,0 +1,102 @@
+# Trickster 1.0
+
+## What's Improved
+
+1.0 is a major improvement in over 0.1.x, with thousands of lines of code for new features, bug fixes, and optimizations. Here's the quick rundown of what's new and improved:
+
+- Cache management is improved, with enhancements like a configurable max cache size and better metrics.
+- Configuration now allows per-origin cache provider selection.
+- Customizable HTTP Path Behaviors
+- Built-in TLS Support
+- The Time Series Delta Proxy is overhauled to be more efficient and performant.
+- Support for [negative caching](./negative-caching.md)
+- We now support Redis Cluster and Redis Sentinel (see [example.conf](../cmd/trickster/conf/example.conf))
+- We've added a Prometheus data simulator for more robust unit testing. Any other project that queries Prometheus may use it too as a standalone binary or as a package import for tests. See the [docs](./promsim.md) for more info.
+- For Gophers: we've refactored the project into packages with a much more cohesive structure, so it's much easier for you to contribute.
+- Also: The Cache Provider and Origin Proxy are exposed as Interfaces for easy extensibility.
+- Experimental Support For:
+ - [InfluxDB](./influxdb.md)
+ - [ClickHouse](./clickhouse.md)
+ - Circonus IRONdb
+ - Generic HTTP Reverse Proxy Cache
+
+And so much more! See the main [README](../readme.md) for more info.
+
+## Status
+
+We are currently in the beta phase of Trickster 1.0. We expect to release the 1.0 Release Candidate build by November 20, 2019, and have the 1.0 GA release by December 1, 2019.
+
+## How to Try Trickster 1.0
+
+The Docker image is available at `tricksterio/trickster:1.0-beta`, or see the Releases for downloadable binaries. We will push to this label each time a new beta release is ready, so you will need to `docker pull` to update to the latest beta as they are released. Additionally, we push to a monotonically incrementing beta label (e.g., `tricksterio/trickster:1.0-beta1`) to distinguish between beta builds.
+
+We'd love your help testing Trickster 1.0, as well as contributing any improvements or bug reports and fixes. Thank you!
+
+## Breaking Changes from 0.1.x
+
+### Prometheus Proxy as the Default Is Removed
+
+Since Trickster 1.0 supports multiple Origin Types (instead of just Prometheus), the Prometheus-specific default operating configuration has been removed from the application code. The `example.conf` will, for now, continue to function as the example promtheus configuration.
+
+This means you can't simply run `trickster` and have a functioning proxy to `prometheus:9090` as you could in 0.1.x. Instead, Trickster will fail out with an error that you have not defined any Origins.
+
+This also means that with Trickster 1.0, you _must_ provide an `origin_type` for each Origin, so Trickster knows how to proxy requests to it.
+
+ So in 1.0, you can run `trickster -origin_type prometheus -origin_url=http://prometheus:9090` or `trickster -config /path/to/example.conf` to achieve the same result as running `trickster` with no arguments in 0.1.x.
+
+See the section below on migrating a 0.1.x configuration for more information.
+
+### Ping, Config, and Upstream Health CHeck URL Endpoints
+
+In Trickster 1.0, non-proxied / administrative endpoints have been moved behind a `/trickster` root path, as follows:
+
+- The previous `/ping` path, for checking if Trickster is up, is now at `/trickster/ping`.
+
+- Origin-specific health check endpoints, previously routed via `//health`, are now routed via `/trickster/health/`.
+
+- A new endpoint to expose the current running configuration is available `/trickster/config`.
+
+### Origin Selection using Query Parameters
+
+In a multi-origin setup, Trickster 1.0 no longer supports the ability to select an Origin using Query Parameters. Trickster 1.0 continues to support Origin Selection via URL Path or Host Header as in 0.1.x.
+
+### Configuration Settings
+
+#### ignore_caching_headers / ignore_no_cache_header
+
+The `ignore_caching_headers` and `ignore_no_cache_header` configuration parameters that evolved in 0.1.x and early 1.0 betas has been removed. Trickster 1.0's customizable Path Configurations capability allows for unlimited paths to be defined and managed, including header manipulation; this subsumes the functionality of these configurations.
+
+#### api_path
+
+The `api_path` configuration parameter in 0.1.x that defaulted to `/api/v1/` has been removed. Trickster 1.0's customizable Path Configurations capability allows for unlimited paths to be defined and managed; this subsumes the functionality of the `api_path`.
+
+#### timeseries_retention_factor
+
+A new setting called `timeseries_retention_factor` replaces `max_value_age_secs` from 0.1.x, which is removed.
+
+`max_value_age_secs` provided a maximum relative age on the timestamp of any value retained in Trickster's cache, on a per-origin basis. That methodology works really well for browsers with a dashboard time range set to the last 24 hours (the default for max_value_age_secs) or less. But if your dashboards are set to a 5-day view, Trickster 0.1.x will not cache the oldest 4 days of the data set, even though it is likely at a low-enough resolution to be ideal for caching. So each time your last-5-days dashboard reloads, 80% of the needed data is always requested from the origin server, instead of just 1%.
+
+Conversely, while causing some large-timerange-with-low-resolution datasets to be undercached, `max_value_age_secs` also caused small-timerange-with-high-resolution datasets to be overcached. Imagine you have on display 24x7x365 an auto-refreshing 30-minute dashboaard on a large screen in the NOC. In that case, 24 hours' worth of data for each of the dashboard's queries, at the highest resolution of 15 seconds, is cached -- although most of it will never be read again once turning 31 minutes old. So those data sets cache 10x more data than they will ever need to retrieve in 0.1.x.
+
+Enter `timeseries_retention_factor`. It improves upon `max_value_age_secs` by considering the _number_ of recent elements retained in the cache, rather than the _age_ of the elements' timestamps, when exercising the retention policy. This allows for virtually any chronological data set to be cached, regardless of its resolution or age, instead of just relatively recent datasets. This means Trickster 1.0 will perform flawlessly for the 5-day example, and keep the cache nice and lean in the 30-minute example, too. The eviction methodology of `timeseries_retention_factor` is controlled by an additional new setting called `timeseries_eviction_method` that allows you to choose between a performant methodology (`oldest`) that evicts chronologically oldest datapoints during eviction, or a more compute-intensive eviction methodology (`lru`) that evicts least-recently-used items, regardless of chronology. While the `lru` methodology will run hotter, it could result in a slightly better cache hit rate depending upon your specific use case. See the [retention documentation](./retention.md) for more info.
+
+### Config File
+
+Trickster 1.0 is incompatible with a 0.1.x config file. However, it can be made compatible with a few quick migration steps (your mileage may vary):
+
+- Make a backup of your config file.
+- Tab-indent the entire `[cache]` configuration block.
+- Search/Replace `[cache` with `[caches.default` (no trailing square bracket).
+- Unless you are using Redis, copy/paste the `[caches.default.index]` section from the [example.conf](../cmd/trickster/conf/example.conf) into your new config file under `[caches.default]`, as in the example.
+- Add a line with `[caches]` (unindented) immediately above the line with `[caches.default]`
+- Under each of your `[origins.]` configurations, add the following lines
+
+```toml
+ cache_name = 'default'
+ origin_type = 'prometheus'
+```
+
+- Search and replace `boltdb` with `bbolt`
+- Examine each `max_value_age_secs` setting in your config and convert to a `timeseries_retention_factor` setting as per the above section. The recommended value for `timeseries_retention_factor` is `1024`.
+
+- For more information, refer to the [example.conf](../cmd/trickster/conf/example.conf), which is well-documented.
diff --git a/docs/paths.md b/docs/paths.md
new file mode 100644
index 000000000..0ea211989
--- /dev/null
+++ b/docs/paths.md
@@ -0,0 +1,223 @@
+# Customizing HTTP Path Behavior
+
+Trickster supports, via configuration, customizing the upstream request and downstream response behavior on a per-Path, per-Origin basis, by providing a `paths` configuration section for each origin configuration. Here are the basic capabilities for customizing Path behavior:
+
+- Modify client request headers prior to contacting the origin while proxying
+- Modify origin response headers prior to processing the response object in Trickster and delivering to the client
+- Modify the response code and body
+- Limit the scope of a path by HTTP Method
+- Select the HTTP Handler for the path (`proxy`, `proxycache` or a published origin-type-specific handler)
+- Select which HTTP Headers, URL Parameters and other client request characteristics will be used to derive the Cache Key under which Trickster stores the object.
+- Disable Metrics Reporting for the path
+
+## Path Matching Scope
+
+Paths are matchable as `exact` or `prefix`
+
+The default match is `exact`, meaning the client's requested URL Path must be an exact match to the configured path in order to match and be handled by a given Path Config. For example a request to `/foo/bar` will not match an `exact` Path Config for `/foo`.
+
+A `prefix` match will match any client-requested path to the Path Config with the longest prefix match. A `prefix` match Path Config to `/foo` will match `/foo/bar` as well as `/foobar` and `/food`. A basic string match is used to evaluate the incoming URL path, so it is recommended to consider finishing paths with a trailing `/`, like `/foo/` in Path Configurations, if needed to avoid any unintentional matches.
+
+### Method Matching Scope
+
+The `methods` section of a Path Config takes a string array of HTTP Methods that are routed through this Path Config. You can provide `[ '*' ]` to route all methods for this path.
+
+## Suggested Use Cases
+
+- Redirect a path by configuring Trickster to respond with a `302` response code and a `Location` header
+- Issue a blanket `401 Unauthorized` code and custom response body to all requests for a given path.
+- Adjust Cache Control headers in either direction
+- Affix an Authorization header to requests proxied out by Trickster.
+- Control which paths are cached by Trickster, and which ones are simply proxied.
+
+## Header and Query Parameter Behavior
+
+### Basics
+
+You can specify request query parameters, as well as request and response headers, to be Set, Appended or Removed.
+
+#### Setting
+
+To Set a header or parameter means to insert if non-existent, or fully replace if pre-existing. To set a header, provide the header name and value you wish to set in the Path Config `request_params`, `request_headers` or `response_headers` sections, in the format of `'Header-or-Parameter-Name' = 'Value'`.
+
+As an example, if the client request provides a `Cache-Control: no-store` header, a Path Config with a header 'set' directive for `'Cache-Control' = 'no-transform'` will replace the `no-store` entirely with a `no-transform`; client requests that have no `Cache-Control` header that are routed through this Path will have the Trickster-configured header injected outright. The same logic applies to query parameters.
+
+#### Appending
+
+Appending a means inserting the header or parameter if it doesn't exist, or appending the configured value(s) into a pre-existing header with the given name. To indicate an append behavior (as opposed to set), prefix the header or parameter name with a '+' in the Path Config.
+
+Example: if the client request provides a `token=SomeHash` parameter and the Path Config includes the parameter `'+token' = 'ProxyHash'`, the effective parameter when forwarding the request to the origin will be `token=SomeHash&token=ProxyHash`.
+
+#### Removing
+
+Removing a header or parameter means to strip it from the HTTP Request or Response when present. To do so, prefix the header/parameter name with '-', for example, `-Cache-control: none`. When removing headers, a value is required to be provided in order to conform to TOML specification; this value, however, is innefectual. Note that there is currently no ability to remove a specific header value from a specific header - only the entire removal header. Consider setting the header value outright as described above, to strip any unwanted values.
+
+#### Response Header Timing
+
+Response Header injections occur as the object is received from the origin and before Trickster handles the object, meaning any caching response headers injected by Trickster will also be used by Trickster immediately to handle caching policies internally. This allows users to override cache controls from upstream systems if necessary to alter the actual caching behavior inside of Trickster. For example, InfluxDB sends down a `Cache-Control: No-Cache` header, which is fine for the user's browser, but Trickster needs to ignore this header in order to accelerate InfluxDB; so the default Path Configs for InfluxDB actually removes this header.
+
+### Cache Key Components
+
+By default, Trickster will use the HTTP Method, URL Path and any Authorization header to derive its Cache Key. In a Path Config, you may specify any additional HTTP headers and URL Parameters to be used for cache key derivation, as well as information in the Request Body.
+
+#### Using Request Body Fields in Cache Key Hashing
+
+Trickster supports the parsing of the HTTP Request body for the purpose of deriving the Cache Key for a cacheable object. Note that body parsing requires reading the entire request body into memory and parsing it before operating on the object. This will result in slightly higher resource utilization and latency, depending upon the size of the client request body.
+
+ Body parsing is supported when the request's HTTP method is `POST`, `PUT` or `PATCH`, and the request `Content-Type` is either `application/x-www-form-urlencoded`, `multipart/form-data`, or `application/json`.
+
+In a Path Config, provide the `cache_key_form_fields` setting with a list of form field names to include when hashing the cache key.
+
+Trickster supports parsing of the Request body as a JSON document, including documents that are multiple levels deep, using a basic pathing convention of forward slashes, to indicate the path to a field that should be included in the cache key. Take the following JSON document:
+
+```json
+{
+ "requestType": "query",
+ "query": {
+ "table": "movies",
+ "fields": "eidr,title",
+ "filter": "year=1979"
+ }
+}
+```
+
+To include the `requestType`, `table`, `fields`, and `filter` fields from this document when hashing the cache key, you can provide the following setting in a Path Configuration:
+
+`cache_key_form_fields = [ 'requestType', 'query/table', 'query/fields', 'query/filter' ]`
+
+## Example Reverse Proxy Cache Config with Path Customizations
+
+```toml
+[origins]
+
+ [origins.default]
+ origin_type = 'rpc'
+
+ [origins.default.paths]
+
+ # root path '/'. Paths must be uniquely named but the
+ # name is otherwise unimportant
+ [origins.default.paths.root]
+ path = '/' # each path must be unique for the origin
+ methods = [ '*' ] # All HTTP methods applicable to this config
+ match_type = 'prefix' # matches any path under '/'
+ handler = 'proxy' # proxy only, no caching (this is the default)
+
+ # modify the query parameters en route to the origin
+ [origins.default.paths.root.request_params]
+ 'authToken' = 'secret string'
+
+ # When a user requests a path matching this route, Trickster will
+ # inject these headers into the request before contacting the Origin
+ [origins.default.paths.root.request_headers]
+ 'Cache-Control' = 'No-Transform' # Due to hyphens, quote the key name
+
+ # inject these headers into the response from the Origin
+ # before replying to the client
+ [origins.default.paths.root.response_headers]
+ 'Expires' = '-1'
+
+ [origins.default.paths.images]
+ path = '/images/'
+ methods = [ 'GET', 'HEAD' ]
+ handler = 'proxycache' # Trickster will cache the images directory
+ match_type = 'prefix'
+
+ [origins.default.paths.images.response_headers]
+ 'Cache-Control' = 'max-age=2592000' # cache for 30 days
+
+ # but only cache this rotating image for 30 seconds
+ [origins.default.paths.images_rotating]
+ path = '/images/rotating.jpg'
+ methods = [ 'GET' ]
+ handler = 'proxycache'
+ match_type = 'exact'
+
+ [origins.default.paths.images_rotating.response_headers]
+ 'Cache-Control' = 'max-age=30'
+ '-Expires' = '
+
+ # redirect this sunsetted feature to a discontinued message
+ [origins.default.paths.redirect]
+ path = '/blog'
+ methods = [ '*' ]
+ handler = 'localresponse'
+ match_type = 'prefix'
+ response_code = 302
+
+ [origins.default.paths.redirect.response_headers]
+ Location = '/discontinued'
+
+ # cache this API endpoint, keying on the query parameter
+ [origins.default.paths.api]
+ path = '/api/'
+ methods = [ 'GET', 'HEAD' ]
+ handler = 'proxycache'
+ match_type = 'prefix'
+ cache_key_params = [ 'query' ]
+
+ # same API endpoint, different HTTP methods to route against
+ [origins.default.paths.api-deny]
+ path = '/api/'
+ methods = [ 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS', 'CONNECT' ]
+ handler = 'localresponse'
+ match_type = 'prefix'
+ response_code = 401
+ response_body = 'this is a read-only api endpoint'
+
+ # cache the query endpoint, permitting GET or POST
+ [origins.default.paths.api-query]
+ path = '/api/query/'
+ methods = [ 'GET', 'HEAD', 'POST' ]
+ handler = 'proxycache'
+ match_type = 'prefix'
+ cache_key_params = [ 'query' ] # for GET / HEAD
+ cache_key_form_fields = [ 'query' ] # for POST
+```
+
+## Modifying Behavior of Time Series Origin Types
+
+Each of the Time Series Origin Types supported in Trickster comes with its own custom handlers and pre-defined Path Configs that are registered with the HTTP Router when Trickster starts up.
+
+For example, when Trickster is configured to accelerate Prometheus, pre-defined Path Configs are registered to control how requests to `/api/v1/query` work differently from requests to `/api/v1/query_range`. For example, the `/ap1/v1/query` Path Config uses the `query` and `time` URL query qarameters when creating the cache key, and is routed through the Object Proxy Cache; while the `/api/v1/query_range` Path Config uses the `query`, `start`, `end` and `step` parameters, and is routed through the Time Series Delta Proxy Cache.
+
+In the Trickster config file, you can add your own Path Configs to your time series origin, as well override individual settings for any of the pre-defined Path Configs, and those custom settings will be applied at startup.
+
+To know what configs you'd like to add or modify, take a look at the Trickster source code and examine the pre-definitions for the selected Origin Type. Each supported Origin Type's handlers and default Path Configs can be viewed under `/internal/proxy/origins//routes.go`. These files are in a standard format that are quite human-readable, even for a non-coder, so don't be too intimidated. If you can understand Path Configs as TOML, you can understand them as Go code.
+
+Examples of customizing Path Configs for Origin Types with Pre-Definitions:
+
+```toml
+[origins]
+
+ [origins.default]
+ origin_type = 'prometheus'
+
+ [origins.default.paths]
+
+ # route /api/v1/label* (including /labels/*)
+ # through Proxy instead of ProxyCache as pre-defined
+ [origins.default.paths.label]
+ path = '/api/v1/label'
+ methods = [ 'GET' ]
+ match_type = 'prefix'
+ handler = 'proxy'
+
+ # route fictional new /api/v1/coffee to ProxyCache
+ [origins.default.paths.series_range]
+ path = '/api/v1/coffee'
+ methods = [ 'GET' ]
+ match_type = 'prefix'
+ handler = 'proxycache'
+ cache_key_params = [ 'beans' ]
+
+ # block /api/v1/admin/ from being reachable via Trickster
+ [origins.default.paths.admin]
+ path = '/api/v1/admin/'
+ methods = [ 'GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS' ]
+ match_type = 'prefix'
+ handler = 'localresponse'
+ response_code = 401
+ response_body = 'No soup for you!'
+ no_metrics = true
+```
diff --git a/docs/placement.md b/docs/placement.md
index c5aa4d3c9..6cb4ac5ba 100644
--- a/docs/placement.md
+++ b/docs/placement.md
@@ -1,27 +1,29 @@
# Where to Place Trickster
-Depending upon the size of your existing or planned deployment, there are several placement configurations available. These designs are suggestions based on common usage, and you may find alternative or hybrid placement configurations that make the most sense for your situation, based on the activity of your Dashboard and Prometheus instance(s).
+Depending upon the size of your existing or planned deployment, there are several placement configurations available. These designs are suggestions based on common usage, and you may find alternative or hybrid placement configurations that make the most sense for your situation, based on the activity of your Dashboard and TSDB instance(s).
## Single "Everything"
-
+
-Single "Everything" is the most common placement model. In this configuration, you have one dashboard endpoint, one Trickster endpoint and one Prometheus endpoint. Behind each endpoint, you may have a single instance or a cluster. Each component is only aware of the other component's endpoint exposure and not the underlying configuration. This configuration represents a one-for-one-for-one deployment of your Dashboard, Prometheus, and Trickster endpoints.
+Single "Everything" is the most common placement model. In this configuration, you have one optional dashboard endpoint, one Trickster endpoint and one HTTP or TSDB endpoint. Behind each endpoint, you may have a single instance or a cluster. Each component is only aware of the other component's endpoint exposure and not the underlying configuration. This configuration represents a one-for-one-for-one deployment of your Dashboard, Origin, and Trickster endpoints.
-## Multi-Origin
+## Multiple Origins
-
+
-In a Multi-Origin placement, you have one dashboard endpoint, one Trickster endpoint, and multiple Prometheus endpoints. Trickster is aware of each Prometheus endpoint and treats them as unique databases to which it proxies and caches data independently of each other.
+In a Multi-Origin placement, you have one dashboard endpoint, one Trickster endpoint, and multiple TSDB and/or HTTP endpoints. Trickster is aware of each upstream endpoint and treats each as a unique origin to which it proxies and caches data independently from the others. Trickster selects the origin based on Host Header or URL Path from the client request.
-This is a good configuration to use when you have a single dashboard that displays data about multiple redundant clusters (each with its own Prometheus), or when you have a single dashboard representing information about many different kinds of systems. For example, if you operate a "Dashboard as a Service" solution under which many teams use your Dashboard system by designing their own dashboard screens and bringing their own databases, a single Trickster endpoint can be used to accelerate dashboards for all of your customers.
+This setup may benefit situations where you have one ore more a static file server origins serving HTML, CSS and JavaScript assets and/or one or more API endpoints, all supporting a common platform.
-You will need to configure each Trickster-to-Prometheus mapping separately in your dashboard application as a separately named Prometheus data source. Refer to the [multi-origin](./multi-origin.md) documentation for configuring multi-origin support in Trickster and Grafana.
+For Time Series Dasbhoard acceleration, this is a good configuration to use when you have a single dashboard that displays data about multiple redundant clusters (each with its own TSDB), or when you have a single dashboard representing information about many different kinds of systems. For example, if you operate a "Dashboard as a Service" solution under which many teams use your Dashboard system by designing their own dashboard screens and bringing their own databases, a single Trickster endpoint can be used to accelerate dashboards for all of your customers.
+
+You will need to configure each Trickster-to-TSDB mapping separately in your dashboard application as a separately named TSDB data source. Refer to the [multi-origin](./multi-origin.md) documentation for configuring multi-origin support in Trickster and Grafana.
In this configuration, be aware that the default 'memory' cache may be underpowered depending on the number of customers, as well as the size and number of queries that need to be cached by each customer. Refer to the [caches](./caches.md) document to select and configure the caching layers as needed to meet your specific situation.
## Multi-Trickster
-
+
-In a Multi-Trickster configuration, you have one dashboard endpoint, multiple Trickster endpoints, and multiple Prometheus endpoints, with each Trickster Endpoint having a one-to-one mapping to a Prometheus Endpoint as a pair. This is a good design if Multi-Origin is not performant enough for the amount of activity associated with your solution (e.g., you need more Tricksters). If the Dashboard system owner is different from the Prometheus system owner, either party could own and operate the Trickster instance.
+In a Multi-Trickster configuration, you have one dashboard endpoint, multiple Trickster endpoints, and multiple TSDB or HTTP endpoints, with each Trickster Endpoint having a one-to-one mapping to a TSDB/HTTP Endpoint as a pair. This is a good design if Multi-Origin is not performant enough for the amount of activity associated with your solution (e.g., you need more Tricksters). If the Dashboard system owner is different from the TSDB system owner, either party could own and operate the Trickster instance.
diff --git a/docs/promsim.md b/docs/promsim.md
new file mode 100644
index 000000000..d5859032e
--- /dev/null
+++ b/docs/promsim.md
@@ -0,0 +1,102 @@
+# PromSim - a barebones Prometheus data simulator
+
+PromSim is a golang package available at `github.com/Comcast/trickster/pkg/promsim` that facilitates unit testing of components that are direct consumers of Prometheus JSON data. It works by simulating datasets, output in the Prometheus's v1 HTTP API format, that consist of values that are repeatably generatable for the provided query and timerange inputs. The data output by PromSim does not represent reality in any way, and is only useful for unit testing and integration testing, by providing a synthesized Prometheus environment that outputs meaningless data. None of PromSim's result sets are stored on or retrieved from disk, and are calculated just-in-time on every request, using simple mathematical computations. In Trickster, we use PromSim to conduct end-to-end testing of our DeltaProxyCache during unit testing, without requiring a real Prometheus server.
+
+## Supported Simulation Endpoints
+
+- `/query` (Instantaneous)
+- `/query_range` (Time Series)
+
+## Example Usage in Unit Testing
+
+PromSim only uses builtin golang packages and should thus work out-of-the-box without any other dependency concerns.
+
+```go
+package mypackage
+
+import (
+ "io/ioutil"
+ "net/http"
+ "testing"
+
+ "github.com/Comcast/trickster/pkg/promsim"
+)
+
+func TestPromSim(t *testing.T) {
+
+ ts := promsim.NewTestServer()
+ client := &http.Client{}
+ const expected = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"random_label":"57","series_count":"1","series_id":"0"},"values":[[2,"93"]]}]}}`
+ resp, err := client.Get(ts.URL + `/api/v1/query_range?query=my_test_query{random_label="57",series_count="1"}&start=2&end=2&step=15`)
+ if err != nil {
+ t.Error(err)
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(body) != expected {
+ t.Errorf("expected [%s] got [%s]", expected, string(body))
+ }
+}
+```
+
+## Behavior Modifiers
+
+PromSim's behavior can be modified in several ways, on a per-query basis, to produce a desired behavior. This is done by providing specific query label values as part of your test queries. All modifier labels are optional, and can be used together in any possible combination without conflict. Providing the same modifier label more than once in a query will result in the last instance of the modifier to be used when constructing the response values.
+
+### Series Count
+
+By default, PromSim will only return a single series in the result set. You can provide a label of `series_count` to indicate the exact number of series that should be returned.
+
+Example query that returns 3 series: `query=my_test_query{series_count="3"}&start=2&end=2&step=15`
+
+### Line Pattern
+
+By default, PromSim uses a "repeatable number generator" to output data. Under the hood, it works by re-seeding `math.Rand` with a hashed value for the provided query string and the timestamp for which a value is needed, and returning the first value from the generator after seeding.
+
+You can provide a `line_pattern` label to utilize other supported number generators. The options are `repeatable_random` (default, described above) and `usage_curve`.
+
+`usage_curve` will return numbers that follow a simulated usage curve pattern (rising in the afternoon, peaking in the evening, troughing overnight).
+
+Example using the usage_curve line pattern: `query=my_test_query{series_count="3",max_value="250",min_value="10",line_pattern="usage_curve"}&start=2&end=2&step=15`
+
+### Latency
+
+PromSim is capable of simulating latency by accepting 2 optional query labels: `latency_ms` and `range_latency_ms`. Both labels can be used in conjunction to produce a desired effect.
+
+#### Upfront Latency
+
+The `latency_ms` label introduces an upfront static processing latency of the provided duration on each http response. This is useful in simulating roundtrip wire latency.
+
+Example adding 300ms of upfront latency: `query=my_test_query{latency_ms="300"}&start=2&end=2&step=15`
+
+#### Range Latency
+
+The `range_latency_ms` label produces a per-unique-value latency effect. The result is that the response from PromSim will be delayed by a certain amount, depending upon on the number of series, size of desired timerange and step value. This is useful in simulating very broad label scopes that slow down query response times in the real world.
+
+Example adding 5ms of range latency: `query=my_test_query{range_latency_ms="5",series_count="2"}&start=0&end=1800&step=15`. In this example, 1.2s of total latency is introduced (120 datapoints x 2 series x 5ms) into the HTTP response.
+
+### Min and Max Values
+
+The `min_value` and `max_value` labels allow you to define the extent of possible values returned by PromSim in the result set, and are fairly straightforward. The default min and max values, when not customized, are 0 and 100, respectively.
+
+Example of min and max: `query=my_test_query{series_count="2",min_value="32",max_value="212"}&start=0&end=90&step=15`. In this case, the returned values will be between 32 and 212, rather than 0 and 100.
+
+### Status Code
+
+The `status_code` label will cause PromSim to return the provided status code instead of `200 OK`. This is useful for testing simulated failcases such as invalid query parameters.
+
+Example query that returns 400 Bad Request: `query=my_test_query{status_code="400"}&start=2&end=2&step=15`
+
+### Invalid Response Body
+
+The `invalid_response_body` label, when provided and set to a value other than 0, will cause PromSim to return a response that cannot be deserialized into a Prometheus Matrix or Vector object, which is again useful for testing failure handling within your app.
+
+Example query that returns invalid response: `query=my_test_query{invalid_response_body="1"}&start=2&end=2&step=15`
+
+## Example Usage as Standalone App
+
+Trickster provides a sample standalone implementation of PromSim. This is useful for backing full simulation dashboards or running a local background app for querying during development of a Prometheus consumer app. You can find it at `github.com/Comcast/trickster/cmd/promsim`, and, from that working directory, simply run `go run *.go \[PORT\]`. If a port number is not provided, it defaults to 9090, just like Prometheus.
diff --git a/docs/range_request.md b/docs/range_request.md
new file mode 100644
index 000000000..174e0a106
--- /dev/null
+++ b/docs/range_request.md
@@ -0,0 +1,19 @@
+# Range Request Support
+
+Trickster's HTTP Reverse Proxy Cache offers best-in-class acceleration and caching of Range Requests.
+
+Much like its Time Series Delta Proxy Cache, Trickster's Reverse Proxy Cache will determine what ranges are cached, and only request from the origin any uncached ranges needed to service the client request, reconstituting the ranges within the cache object. This ensures minimal response time in the event of a cache miss.
+
+In addition to supporting basic single-Range requests (`Range: bytes=0-5`) Trickster also supports Multipart Range Requests (`Range: bytes=0-5, 10-20`).
+
+In the event that an upstream does not support Multipart Range Requests, Trickster offers a unique feature called Upstream Range Dearticulation, that will separate any ranges needed from the origin into individual, parallel HTTP requests, which are reconstituted by Trickster. This feature can be enabled for an origin by setting `dearticulate_upstream_ranges = true`, as in this example:
+
+```toml
+[origins]
+ [origins.default]
+ origin_type = 'reverseproxycache'
+ origin_url = 'http://example.com/'
+ dearticulate_upstream_ranges = true
+```
+
+In the event that downstream clients should not expect MultiPart Range Request support, Trickster offers a setting to fully disable support on a per-origin basis. Set `multipart_ranges_disabled = true` and Trickster will strip Range Request headers that include multiple Ranges, which will result in a 200 OK response with the full body.
diff --git a/docs/retention.md b/docs/retention.md
new file mode 100644
index 000000000..7ccc5bdd9
--- /dev/null
+++ b/docs/retention.md
@@ -0,0 +1,45 @@
+# Trickster Caching Retention Policies
+
+## Basic HTTP Origins
+
+Trickster will respect HTTP 1.0, 1.1 and 2.0 caching directives from both the downstream client and the upstream origin when determining objectly cacheability and TTL. You can override the TTL by setting a custom `Cache-Control` header on a per-[Path Config](./paths.md) basis.
+
+### Cache Object Evictions
+
+If you use a Trickster-managed cache (Memory, Filesystem, bbolt), then a maximum cache size is maintained by Trickster. You can configure the maximum size in number of bytes, number of objects, or both. See the example configuration for more information.
+
+Once the cache has reached its configured maximum size of objects or bytes, Trickster will undergo an eviction routine that removes cache objects until the size has fallen below the configured maximums. Trickster-managed caches maintain a last access time for each cache object, and utilizes a Least Recently Used (LRU) methodology when selecting objects for eviction.
+
+Caches whose object lifetimes are not managed internally by Trickster (Redis, BadgerDB) will use their own policies and methodologies for evicting cache records.
+
+## Time Series Origins
+
+For non-time series responses from a TSDB, Trickster will adhere to HTTP caching rules as directed by the downstream client and upstream origin.
+
+For time series data responses, Trickster will cache as follows:
+
+### TTL Settings
+
+TTL settings for each Origin configured in Trickster can be customized independently of each other, and separate TTL configurations are available for timeseries objects, and fast forward data. See [cmd/trickster/conf/example.conf](../cmd/trickster/conf/example.conf) for more info on configuring default TTLs.
+
+### Time Series Data Retention
+
+Separately from the TTL of a time series cache object, Trickster allows you to control the size of each timeseries object, represented as a count of maximum timestamps in the cache object, on a _per origin_ basis. This configuration is known as the `timeseries_retention_factor` (TRF), and has a default of 1024. Most dashboards for most users request and display approximately 300-to-400 timestamps, so the default TRF allows users to still recall recently-displayed data from the Trickster cache for a period of time after the data has aged off of real-time views.
+
+If you have users with a high-resolution dashboard configuration (e.g., a 24-hour view with a 1-minute step, amounting to 1440 data points per graph), then you may benefit from increasing the `timeseries_retention_factor` accordingly. If you use a managed cache (see [caches](./caches.md)) and increase the `timeseries_retention_factor`, the overall size of your cache will not change; the result will be fewer objects in cache, with the timeseries objects having a larger share of the overall cache size with more aged data.
+
+#### Time Series Data Evictions
+
+Once the TRF is reached for a time series cache object, Trickster will undergo a timestamp eviction process for the record in question. Unlike the Cache Object Eviction, which removes an object from cache completely, TRF evictions examine the data set contained in a cache object and remove timestamped data in order to reduce the object size down to the TRF.
+
+Time Series Data Evictions apply to all cached time seres data sets, regardless of whether or not the cache object lifecycle is managed by Trickster.
+
+Trickster provides two eviction methodologies (`timeseries_eviction_method`) for time series data eviction: `oldest` (default) and `lru`, and is configurable per-origin.
+
+When `timeseries_eviction_method` is set to `oldest`, Trickster maintains time series data by calculating the "oldest cacheable timestamp" value upon each request, using `time.Now().Add(step * timeseries_retention_factor * -1)`. Any queries for data older than the oldest cacheable timestamp are intelligently offloaded to the proxy since they will never be cached, and no data that is older than the oldest cacheable timestamp will be stored in the query's cache record.
+
+When `timeseries_eviction_method` is set to `lru`, Trickster will not calculate an oldest cacheable timestamp, but rather maintain a last-accessed time for _each timestamp_ in the cache object, and evict the Least-Recently-Used items in order to maintian the cache size.
+
+The advantage of the `oldest` methodology better cache performance, at the cost of not caching very old data. Thus, Trickster will be more performant computationally while providing a slightly lower cache hit rate. The `lru` methodology, since it requires accessing the cache on _every request_ and maintaining access times for every timestamp, is computationally more expensive, but can achieve a higher cache hit rate since it permits caching data of any age, so long as it is accessed frequently enough to avoid eviction.
+
+Most users will find the `oldest` methodology to meet their needs, so it is recommended to use `lru` only if you have a specific use case (e.g., dashboards with data from a diverse set of time ranges, where caching only relatively young data does not suffice).
diff --git a/docs/roadmap.md b/docs/roadmap.md
index b659183c9..13a6abea3 100644
--- a/docs/roadmap.md
+++ b/docs/roadmap.md
@@ -3,33 +3,33 @@
Our roadmap for Trickster is largely focused on a 1.0 release, which will have a completely refactored codebase. Trickster 1.0 will be more efficient and easily extensible.
Trickster 1.0 will have the following enhancements:
-- The application is refactored into Packages to simplify reuse
-- Simplified hash collision prevention and pipelining (replacing channels with mutexes)
-- Upstream Proxy interface to facilitate support for additional TSDB types
-- Support for InfluxDB acceleration
-- Full compliance with HTTP 1.0/1.1 RFC's for Proxy/Caching
-- Simpler and more efficient Delta computations
-- Caches per-origin instead of per-process
-- Size-based cache quota
-- Distributed Tracing support
+- [x] The application is refactored into Packages to simplify reuse
+- [x] Simplified hash collision prevention and pipelining (replacing channels with mutexes)
+- [x] Upstream Proxy interface to facilitate support for additional TSDB types
+- [x] Support for InfluxDB acceleration
+- [x] Simpler and more efficient Delta computations
+- [x] Caches per-origin instead of per-process
+- [x] Size-based cache quota
+- [ ] Full compliance with HTTP 1.0/1.1 RFC's for Proxy/Caching
+- [ ] Distributed Tracing support
## Timeline
### Q1 2019 - Trickster 1.0 Beta Release
-We intend to provide a Trickster 1.0 Beta Release by the end of Q1 2019 that will include the majority of features listed above.
+We intend to provide a Trickster 1.0 Beta Release by the end of Q1 2019 that will include the majority of features listed above. Our progress is indicated above via the checkboxes.
-### Q2 2019 - Trickster 1.0 GA Release
+### Q4 2019 - Trickster 1.0 GA Release
-We hope to provdie a Trickster 1.0 GA Release in the first half of Q2 2019 that includes all of the features listed above.
+We hope to provdie a Trickster 1.0 GA Release in the first half of Q4 2019 that includes all of the features listed above.
## How to Help
-You can help by contributing to Trickster 1.0 on the `next` branch, or trying it out in your environment. Docker images for the latest Trickster 1.0 Beta release will be published under the `next` tag.
+You can help by contributing to Trickster 1.0 on the `next` branch, or trying it out in your environment. Docker images for the latest Trickster 1.0 Beta release will be published under the `beta` tag.
By giving Trickster 1.0 Beta a spin, you can help us identify and fix defects more quickly. Be sure to file issues if you find something wrong, using the `1.0` label. If you can reliably reproduce the issue, provide detailed steps so that developers can more easily root-cause the issue.
-If you want to contribute to Trickster 1.0, take any of the issues labeled `1.0` that are not already assigned. Many of these have been outstanding for some time pending the Interface model, so now is great time to look at extending Trickster to work with your TSDB of choice.
+If you want to contribute to Trickster 1.0, take any of the issues labeled `1.0 Release` or `1.x Release` that are not already assigned. Many of these have been outstanding for some time, pending the Interface model, so now is great time to look at extending Trickster to work with your TSDB of choice.
## Thank You
diff --git a/docs/supported-origin-types.md b/docs/supported-origin-types.md
new file mode 100644
index 000000000..ac6dc2228
--- /dev/null
+++ b/docs/supported-origin-types.md
@@ -0,0 +1,33 @@
+# Supported Origin Types
+
+Trickster currently supports the following Origin Types:
+
+### Generic HTTP Reverse Proxy Cache _(Currently Experimental)_
+
+Trickster operates as a fully-featured and highly-customizable reverse proxy cache, designed to accellerate and scale upstream endpoints like API services and other simple http services. Specify `'reverseproxycache'` or just `'rpc'` as the Origin Type when configuring Trickster.
+
+---
+
+## Time Series Databases
+
+### Prometheus
+
+Trickster fully supports the [Prometheus HTTP API (v1)](https://prometheus.io/docs/prometheus/latest/querying/api/). Specify `'prometheus'` as the Origin Type when configuring Trickster.
+
+### InfluxDB _(Currently Experimental)_
+
+Trickster 1.0 has experimental support for InfluxDB. Specify `'influxdb'` as the Origin Type when configuring Trickster.
+
+See the [InfluxDB Support Document](./influxdb.md) for more information.
+
+### ClickHouse _(Currently Experimental)_
+
+Trickster 1.0 has experimental support for ClickHouse. Specify `'clickhouse'` as the Origin Type when configuring Trickster.
+
+See the [ClickHouse Support Document](./clickhouse.md) for more information.
+
+### Circonus IRONdb _(Currently Experimental)_
+
+Experimental support has been included for the Circonus IRONdb time-series database. If Grafana is used for visualizations, the Circonus IRONdb data source plug-in for Grafana can be configured to use Trickster as its data source. All IRONdb data retrieval operations, including CAQL queries, are supported.
+
+When configuring an IRONdb origin, specify `'irondb'` as the origin type in the Trickster configuration. The `host` value can be set directly to the address and port of an IRONdb node, but it is recommended to use the Circonus API proxy service. When using the proxy service, set the `host` value to the address and port of the proxy service, and set the `api_path` value to `'irondb'`.
diff --git a/docs/tls.md b/docs/tls.md
new file mode 100644
index 000000000..ebf6b62de
--- /dev/null
+++ b/docs/tls.md
@@ -0,0 +1,52 @@
+# TLS Support
+
+Trickster supports TLS on both the frontend server and backend clients.
+
+## Basics
+
+To enable the TLS server, you must specify the `tls_listen_port`, and optionally, the `tls_listen_address` in the `[proxy_server]` section of your config file. For example:
+
+```toml
+[proxy_server]
+
+listen_port = 9090
+tls_listen_port = 8443
+```
+
+Note, Trickster will only start listening on the TLS port if at least one origin has a valid certificate and key configured.
+
+Each origin section of a Trickster config file can be augmented with the optional `tls` section to modify TLS behavior for front-end and back-end requests. For example:
+
+```toml
+[origins]
+
+ [origins.example] # origin for example
+
+ [origins.example.tls] # TLS settigs for origin named example
+ # front-end configs
+ full_chain_cert_path = '/path/to/my/cert.pem'
+ private_key_path = '/path/to/my/key.pem'
+ # back-end configs
+ insecure_skip_verify = true
+ certificate_authority_paths = [ '/path/to/ca1.pem', '/path/to/ca2.pem' ]
+ client_cert_path = '/path/to/client/cert.pem'
+ client_key_path = '/path/to/client/key.pem'
+```
+
+## Front-End
+
+Each origin can handle encryption with exactly 1 certificate and key pair, as configured in the TLS section of the origin config (demonstrated above).
+
+If the path to any configured Certificate or Key file is unreachable or unparsable, Trickster will exit upon startup with an error providing reasonable context.
+
+You may use the same TLS certificate and key for multiple origins, depending upon how your Trickster configurations are laid out. Any certificates configured by Trickster must match the hostname header of the inbound http request (exactly, or by wildcard interpolation), or clients will likely reject the certificate for security issues.
+
+## Back-End
+
+Each Trickster origin front-end configuration is paired with its own back-end http(s) client, which can be configured in the TLS section of the origin config, as demonstrated above.
+
+`insecure_skip_verify` will instruct the http client to ignore hostname verification issues with the upstream origin's certificate, and process the request anyway. This is analogous to `-k | --insecure` in curl.
+
+`certificate_authority_paths` will provide the http client with a list of certificate authorities (used in addition to any OS-provided root CA's) to use when determining the trust of an upstream origin's tls certificate. In all cases, the Root CA's installed to the operating system on which Trickster is running are used for trust by the client.
+
+To us Mutual Authentication with an upstream origin server, configure Trickster with Client Certificates using `client_cert_path` and `client_key_path` parameters, as shown above. You will likely need to also configure a custom CA in `certificate_authority_paths` to represent your certificate signer, unless it has been added to the underlying Operating System's CA list.
diff --git a/filesystem.go b/filesystem.go
deleted file mode 100644
index 3f9f41a7b..000000000
--- a/filesystem.go
+++ /dev/null
@@ -1,168 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/go-kit/kit/log/level"
- "golang.org/x/sys/unix"
-)
-
-// FilesystemCache describes a Filesystem Cache
-type FilesystemCache struct {
- T *TricksterHandler
- Config FilesystemCacheConfig
- mutexes map[string]*sync.Mutex
- mapMutex sync.Mutex
-}
-
-// Connect instantiates the FilesystemCache mutex map and starts the Expired Entry Reaper goroutine
-func (c *FilesystemCache) Connect() error {
- level.Info(c.T.Logger).Log("event", "filesystem cache setup", "cachePath", c.Config.CachePath)
-
- if err := makeDirectory(c.Config.CachePath); err != nil {
- return err
- }
-
- c.mutexes = make(map[string]*sync.Mutex)
-
- go c.Reap()
- return nil
-}
-
-// Store places an object in the cache using the specified key and ttl
-func (c *FilesystemCache) Store(cacheKey string, data string, ttl int64) error {
- expFile, dataFile := c.getFileNames(cacheKey)
- expiration := []byte(strconv.FormatInt(time.Now().Unix()+ttl, 10))
-
- level.Debug(c.T.Logger).Log("event", "filesystem cache store", "key", cacheKey, "expFile", expFile, "dataFile", dataFile)
- mtx := c.getMutex(cacheKey)
- mtx.Lock()
- err1 := ioutil.WriteFile(dataFile, []byte(data), os.FileMode(0777))
- err2 := ioutil.WriteFile(expFile, expiration, os.FileMode(0777))
- mtx.Unlock()
-
- if err1 != nil {
- return err1
- } else if err2 != nil {
- return err2
- }
- return nil
-}
-
-// Retrieve looks for an object in cache and returns it (or an error if not found)
-func (c *FilesystemCache) Retrieve(cacheKey string) (string, error) {
- _, dataFile := c.getFileNames(cacheKey)
- level.Debug(c.T.Logger).Log("event", "filesystem cache retrieve", "key", cacheKey, "dataFile", dataFile)
-
- mtx := c.getMutex(cacheKey)
- mtx.Lock()
- content, err := ioutil.ReadFile(dataFile)
- mtx.Unlock()
- if err != nil {
- return "", fmt.Errorf("Value for key [%s] not in cache", cacheKey)
- }
-
- return string(content), nil
-}
-
-// Reap continually iterates through the cache to find expired elements and removes them
-func (c *FilesystemCache) Reap() {
- for {
- now := time.Now().Unix()
-
- files, err := ioutil.ReadDir(c.Config.CachePath)
- if err == nil {
- for _, file := range files {
- if strings.HasSuffix(file.Name(), ".expiration") {
- cacheKey := strings.Replace(file.Name(), ".expiration", "", 1)
- expFile, dataFile := c.getFileNames(cacheKey)
- mtx := c.getMutex(cacheKey)
- mtx.Lock()
- content, err := ioutil.ReadFile(expFile)
- if err == nil {
- expiration, err := strconv.ParseInt(string(content), 10, 64)
- if err != nil || expiration < now {
- level.Debug(c.T.Logger).Log("event", "filesystem cache reap", "key", cacheKey, "dataFile", dataFile)
-
- // Get a lock
- c.T.ChannelCreateMtx.Lock()
-
- // Delete the key
- os.Remove(expFile)
- os.Remove(dataFile)
-
- // Close out the channel if it exists
- if _, ok := c.T.ResponseChannels[cacheKey]; ok {
- close(c.T.ResponseChannels[cacheKey])
- delete(c.T.ResponseChannels, cacheKey)
- }
-
- // Unlock
- c.T.ChannelCreateMtx.Unlock()
- }
- }
- mtx.Unlock()
- }
- }
- }
-
- time.Sleep(time.Duration(c.T.Config.Caching.ReapSleepMS) * time.Millisecond)
- }
-}
-
-// Close is not used for FilesystemCache
-func (c *FilesystemCache) Close() error {
- return nil
-}
-
-func (c *FilesystemCache) getFileNames(cacheKey string) (string, string) {
- prefix := strings.Replace(c.Config.CachePath+"/"+cacheKey+".", "//", "/", 1)
- return prefix + "expiration", prefix + "data"
-}
-
-func (c *FilesystemCache) getMutex(cacheKey string) *sync.Mutex {
- var mtx *sync.Mutex
- var ok bool
- c.mapMutex.Lock()
- if mtx, ok = c.mutexes[cacheKey]; !ok {
- mtx = &sync.Mutex{}
- c.mutexes[cacheKey] = mtx
- }
- c.mapMutex.Unlock()
-
- return mtx
-}
-
-// writeable returns true if the path is writeable by the calling process.
-func writeable(path string) bool {
- return unix.Access(path, unix.W_OK) == nil
-}
-
-// makeDirectory creates a directory on the filesystem and exits the application in the event of a failure.
-func makeDirectory(path string) error {
- err := os.MkdirAll(path, 0755)
- if err != nil || !writeable(path) {
- return fmt.Errorf("[%s] directory is not writeable by the trickster: %v", path, err)
- }
-
- return nil
-}
diff --git a/filesystem_test.go b/filesystem_test.go
deleted file mode 100644
index 7a05cb4a4..000000000
--- a/filesystem_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "testing"
-
- "github.com/go-kit/kit/log"
-)
-
-func TestFilesystemCache_Connect(t *testing.T) {
- cfg := Config{Caching: CachingConfig{ReapSleepMS: 1}}
- tr := TricksterHandler{Logger: log.NewNopLogger(), Config: &cfg}
- fc := FilesystemCache{T: &tr, Config: FilesystemCacheConfig{CachePath: "."}}
-
- // it should connect
- err := fc.Connect()
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestFilesystemCache_Store(t *testing.T) {
- cfg := Config{Caching: CachingConfig{ReapSleepMS: 1}}
- tr := TricksterHandler{Logger: log.NewNopLogger(), Config: &cfg}
- fc := FilesystemCache{T: &tr, Config: FilesystemCacheConfig{CachePath: "."}}
-
- err := fc.Connect()
- if err != nil {
- t.Error(err)
- }
-
- // it should store a value
- err = fc.Store("cacheKey", "data", 60000)
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestFilesystemCache_Retrieve(t *testing.T) {
- cfg := Config{Caching: CachingConfig{ReapSleepMS: 1}}
- tr := TricksterHandler{Logger: log.NewNopLogger(), Config: &cfg}
- fc := FilesystemCache{T: &tr, Config: FilesystemCacheConfig{CachePath: "."}}
-
- err := fc.Connect()
- if err != nil {
- t.Error(err)
- }
- err = fc.Store("cacheKey", "data", 60000)
- if err != nil {
- t.Error(err)
- }
-
- // it should retrieve a value
- data, err := fc.Retrieve("cacheKey")
- if err != nil {
- t.Error(err)
- }
- if data != "data" {
- t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
- }
-}
diff --git a/flags.go b/flags.go
deleted file mode 100644
index c313a81f5..000000000
--- a/flags.go
+++ /dev/null
@@ -1,159 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "strconv"
-)
-
-const (
- // Command-line flags
- cfConfig = "config"
- cfVersion = "version"
- cfLogLevel = "log-level"
- cfInstanceID = "instance-id"
- cfOrigin = "origin"
- cfProxyPort = "proxy-port"
- cfMetricsPort = "metrics-port"
- cfProfilerPort = "profiler-port"
-
- // Environment variables
- evOrigin = "TRK_ORIGIN"
- evProxyPort = "TRK_PROXY_PORT"
- evMetricsPort = "TRK_METRICS_PORT"
- evLogLevel = "TRK_LOG_LEVEL"
- evProfilerPort = "TRK_PROFILER_PORT"
-)
-
-// loadConfiguration reads the config path from Flags,
-// Loads the configs (w/ default values where missing)
-// and then evaluates any provided flags as overrides
-func loadConfiguration(c *Config, arguments []string) error {
- var path string
- var version bool
-
- f := flag.NewFlagSet(applicationName, -1)
- f.SetOutput(ioutil.Discard)
- f.StringVar(&path, cfConfig, "", "Supplies Path to Config File")
- f.BoolVar(&version, cfVersion, false, "Prints trickster version")
- f.Parse(arguments)
-
- // If the config file is not specified on the cmdline then try the default
- // location to load the config file. If the default config does not exist
- // then move on, no big deal.
- if path != "" {
- if err := c.LoadFile(path); err != nil {
- return err
- }
- } else {
- _, err := os.Open(c.Main.ConfigFile)
- if err == nil {
- if err := c.LoadFile(c.Main.ConfigFile); err != nil {
- return err
- }
- }
- }
-
- // Display version information then exit the program
- if version == true {
- fmt.Println(applicationVersion)
- os.Exit(3)
- }
-
- // Load from Environment Variables
- loadEnvVars(c)
-
- //Load from command line flags.
- loadFlags(c, arguments)
-
- return nil
-}
-
-func loadEnvVars(c *Config) {
- // Origin
- if x := os.Getenv(evOrigin); x != "" {
- c.DefaultOriginURL = x
- }
-
- // Proxy Port
- if x := os.Getenv(evProxyPort); x != "" {
- if y, err := strconv.ParseInt(x, 10, 64); err == nil {
- c.ProxyServer.ListenPort = int(y)
- }
- }
-
- // Metrics Port
- if x := os.Getenv(evMetricsPort); x != "" {
- if y, err := strconv.ParseInt(x, 10, 64); err == nil {
- c.Metrics.ListenPort = int(y)
- }
- }
-
- // ProfilerPort
- if x := os.Getenv(evProfilerPort); x != "" {
- if y, err := strconv.ParseInt(x, 10, 64); err == nil {
- c.Profiler.ListenPort = int(y)
- c.Profiler.Enabled = true
- }
- }
-
- // LogLevel
- if x := os.Getenv(evLogLevel); x != "" {
- c.Logging.LogLevel = x
- }
-
-}
-
-// loadFlags loads configuration from command line flags.
-func loadFlags(c *Config, arguments []string) {
- var path string
- var version bool
- var origin string
- var proxyListenPort int
- var metricsListenPort int
- var profilerListenPort int
-
- f := flag.NewFlagSet(applicationName, flag.ExitOnError)
- f.BoolVar(&version, cfVersion, true, "Prints Trickster version")
- f.StringVar(&c.Logging.LogLevel, cfLogLevel, c.Logging.LogLevel, "Level of Logging to use (debug, info, warn, error)")
- f.IntVar(&c.Main.InstanceID, cfInstanceID, 0, "Instance ID for when running multiple processes")
- f.StringVar(&origin, cfOrigin, "", "URL to the Prometheus Origin. Enter it like you would in grafana, e.g., http://prometheus:9090")
- f.IntVar(&proxyListenPort, cfProxyPort, 0, "Port that the Proxy server will listen on.")
- f.IntVar(&metricsListenPort, cfMetricsPort, 0, "Port that the /metrics endpoint will listen on.")
- f.IntVar(&profilerListenPort, cfProfilerPort, 0, "Port that the /debug/pprof endpoint will listen on.")
-
- // BEGIN IGNORED FLAGS
- f.StringVar(&path, cfConfig, "", "Path to Trickster Config File")
- // END IGNORED FLAGS
-
- f.Parse(arguments)
-
- if len(origin) > 0 {
- c.DefaultOriginURL = origin
- }
- if proxyListenPort > 0 {
- c.ProxyServer.ListenPort = proxyListenPort
- }
- if metricsListenPort > 0 {
- c.Metrics.ListenPort = metricsListenPort
- }
- if profilerListenPort > 0 {
- c.Profiler.ListenPort = profilerListenPort
- c.Profiler.Enabled = true
- }
-}
diff --git a/go.mod b/go.mod
index c6d09d8b4..5591772c0 100644
--- a/go.mod
+++ b/go.mod
@@ -1,28 +1,44 @@
module github.com/Comcast/trickster
+replace gotest.tools => github.com/gotestyourself/gotest.tools v2.2.0+incompatible
+
require (
+ github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
github.com/BurntSushi/toml v0.3.1
+ github.com/alecthomas/gometalinter v3.0.0+incompatible // indirect
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect
- github.com/alicebob/miniredis v0.0.0-20181205055656-cfad8aca71cc
- github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
- github.com/coreos/bbolt v1.3.0
- github.com/go-kit/kit v0.8.0
- github.com/go-logfmt/logfmt v0.4.0 // indirect
- github.com/go-redis/redis v6.14.2+incompatible
+ github.com/alicebob/miniredis v2.5.0+incompatible
+ github.com/boltdb/bolt v1.3.1 // indirect
+ github.com/coreos/bbolt v1.3.3
+ github.com/dgraph-io/badger v1.6.0
+ github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b // indirect
+ github.com/fatih/color v1.7.0 // indirect
+ github.com/go-kit/kit v0.9.0
+ github.com/go-logfmt/logfmt v0.5.0 // indirect
+ github.com/go-redis/redis v6.15.6+incompatible
github.com/go-stack/stack v1.8.0
- github.com/golang/protobuf v1.2.0 // indirect
- github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db
+ github.com/gojp/goreportcard v0.0.0-20200102082842-9f8184163627 // indirect
+ github.com/golang/snappy v0.0.1
github.com/gomodule/redigo v2.0.0+incompatible // indirect
- github.com/gorilla/context v1.1.1 // indirect
- github.com/gorilla/handlers v1.4.0
- github.com/gorilla/mux v1.6.2
- github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
- github.com/pkg/errors v0.8.0
- github.com/prometheus/client_golang v0.9.1
- github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect
- github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
- github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a // indirect
- github.com/yuin/gopher-lua v0.0.0-20181109042959-a0dfe84f6227 // indirect
- golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a
- gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3
+ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
+ github.com/gorilla/handlers v1.4.2
+ github.com/gorilla/mux v1.7.3
+ github.com/influxdata/influxdb v1.7.9
+ github.com/mattn/go-colorable v0.1.4 // indirect
+ github.com/mattn/go-isatty v0.0.10 // indirect
+ github.com/mattn/goveralls v0.0.4 // indirect
+ github.com/philhofer/fwd v1.0.0 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/prometheus/client_golang v1.3.0
+ github.com/prometheus/common v0.8.0
+ github.com/rakyll/gotest v0.0.0-20191108192113-45d501058f2a // indirect
+ github.com/tinylib/msgp v1.1.1
+ github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 // indirect
+ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa
+ golang.org/x/sys v0.0.0-20200116001909-b77594299b42
+ golang.org/x/tools v0.0.0-20191206204035-259af5ff87bd // indirect
+ gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20191105091915-95d230a53780 // indirect
+ gopkg.in/natefinch/lumberjack.v2 v2.0.0
)
+
+go 1.13
diff --git a/go.sum b/go.sum
index a8ab84058..bdc881cf8 100644
--- a/go.sum
+++ b/go.sum
@@ -1,51 +1,226 @@
+github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=
+github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
+github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
+github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/alecthomas/gometalinter v3.0.0+incompatible h1:e9Zfvfytsw/e6Kd/PYd75wggK+/kX5Xn8IYDUKyc5fU=
+github.com/alecthomas/gometalinter v3.0.0+incompatible/go.mod h1:qfIpQGGz3d+NmgyPBqv+LSh50emm1pt72EtcX2vKYQk=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U=
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
-github.com/alicebob/miniredis v0.0.0-20181205055656-cfad8aca71cc h1:oajiik5ClkUDgM2JOF9hI7muk5/jI+FOCa7YDpFqmGY=
-github.com/alicebob/miniredis v0.0.0-20181205055656-cfad8aca71cc/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
+github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
+github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
+github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
+github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
+github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/coreos/bbolt v1.3.0 h1:HIgH5xUWXT914HCI671AxuTTqjj64UOFr7pHn48LUTI=
-github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
+github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgraph-io/badger v1.6.0 h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo=
+github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b h1:SeiGBzKrEtuDddnBABHkp4kq9sBGE9nuYmk6FPTg0zg=
+github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-redis/redis v6.14.2+incompatible h1:UE9pLhzmWf+xHNmZsoccjXosPicuiNaInPgym8nzfg0=
-github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-redis/redis v6.15.5+incompatible h1:pLky8I0rgiblWfa8C1EV7fPEUv0aH6vKRaYHc/YRHVk=
+github.com/go-redis/redis v6.15.5+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-redis/redis v6.15.6+incompatible h1:H9evprGPLI8+ci7fxQx6WNZHJSb7be8FqJQRhdQZ5Sg=
+github.com/go-redis/redis v6.15.6+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gojp/goreportcard v0.0.0-20200102082842-9f8184163627 h1:g5bGbrCY46a0KjewTqkYB8PtV9r6TQPrX34CIy+6OdQ=
+github.com/gojp/goreportcard v0.0.0-20200102082842-9f8184163627/go.mod h1:/DA2Xpp+OaR3EHafQSnT9SKOfbG2NPQR/qp6Qr8AgIw=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
-github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/handlers v1.4.0 h1:XulKRWSQK5uChr4pEgSE4Tc/OcmnU9GJuSwdog/tZsA=
-github.com/gorilla/handlers v1.4.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
+github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=
+github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/influxdata/influxdb v1.7.8 h1:oXd5TjXzU1b+xyFaH/8Ij+nCoUgyuO3ZDpgCuo62yg0=
+github.com/influxdata/influxdb v1.7.8/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
+github.com/influxdata/influxdb v1.7.9 h1:uSeBTNO4rBkbp1Be5FKRsAmglM9nlx25TzVQRQt1An4=
+github.com/influxdata/influxdb v1.7.9/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/goveralls v0.0.4 h1:/mdWfiU2y8kZ48EtgByYev/XT3W4dkTuKLOJJsh/r+o=
+github.com/mattn/goveralls v0.0.4/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/nicksnyder/go-i18n v2.0.3+incompatible h1:XCCaWsCoy4KlWkhOr+63dkv6oJmitJ573uJqDBAiFiQ=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
+github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI=
+github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
+github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
-github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
-github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/yuin/gopher-lua v0.0.0-20181109042959-a0dfe84f6227 h1:GRy+0tGtORsCA+CJUMfhLuN71eQ0LtsQRDBQKbzESdc=
-github.com/yuin/gopher-lua v0.0.0-20181109042959-a0dfe84f6227/go.mod h1:fFiAh+CowNFr0NK5VASokuwKwkbacRmHsVA7Yb1Tqac=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.8.0 h1:bLkjvFe2ZRX1DpcgZcdf7j/+MnusEps5hktST/FHA34=
+github.com/prometheus/common v0.8.0/go.mod h1:PC/OgXc+UN7B4ALwvn1yzVZmVwvhXp5JsbBv6wSv6i0=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/rakyll/gotest v0.0.0-20191108192113-45d501058f2a h1:/kX+lZpr87Pb0yJKyxW40ZZO6jl52jFMmoQ0YhfwGLM=
+github.com/rakyll/gotest v0.0.0-20191108192113-45d501058f2a/go.mod h1:jpFrc1UTqK0FtfF3doi3pEUBgWHYELkOPPECUlDsM2Q=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
+github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/tinylib/msgp v1.1.1 h1:TnCZ3FIuKeaIy+F45+Cnp+caqdXGy4z74HvwXN+570Y=
+github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 h1:1b6PAtenNyhsmo/NKXVe34h7JEZKva1YB/ne7K7mqKM=
+github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191021124707-24d2ffbea1e8 h1:L4W1teiyF4Jl6VuapLNV/LYho36udiBQsfbNu7eRMeo=
+golang.org/x/net v0.0.0-20191021124707-24d2ffbea1e8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3 h1:AFxeG48hTWHhDTQDk/m2gorfVHUEa9vo3tp3D7TzwjI=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191020212454-3e7259c5e7c2 h1:nq114VpM8lsSlP+lyUbANecYHYiFcSNFtqcBlxRV+gA=
+golang.org/x/sys v0.0.0-20191020212454-3e7259c5e7c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab h1:FvshnhkKW+LO3HWHodML8kuVX8rnJTxKm9dFPuI68UM=
+golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20191206204035-259af5ff87bd h1:Zc7EU2PqpsNeIfOoVA7hvQX4cS3YDJEs5KlfatT3hLo=
+golang.org/x/tools v0.0.0-20191206204035-259af5ff87bd/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20191105091915-95d230a53780 h1:CEBpW6C191eozfEuWdUmIAHn7lwlLxJ7HVdr2e2Tsrw=
+gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20191105091915-95d230a53780/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/handlers.go b/handlers.go
deleted file mode 100644
index fb53cbc6c..000000000
--- a/handlers.go
+++ /dev/null
@@ -1,1208 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "crypto/md5"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "math"
- "net/http"
- "net/url"
- "regexp"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
- "github.com/golang/snappy"
- "github.com/gorilla/mux"
- "github.com/pkg/errors"
- "github.com/prometheus/common/model"
-)
-
-const (
- // Origin database types
- otPrometheus = "prometheus"
-
- // Common HTTP Header Values
- hvNoCache = "no-cache"
- hvApplicationJSON = "application/json"
-
- // Common HTTP Header Names
- hnCacheControl = "Cache-Control"
- hnAllowOrigin = "Access-Control-Allow-Origin"
- hnContentType = "Content-Type"
- hnAuthorization = "Authorization"
-
- // HTTP methods
- hmGet = "GET"
-
- // Prometheus response values
- rvSuccess = "success"
- rvMatrix = "matrix"
- rvVector = "vector"
-
- // Common URL parameter names
- upQuery = "query"
- upStart = "start"
- upEnd = "end"
- upStep = "step"
- upOriginFqdn = "origin_fqdn"
- upOriginPort = "origin_port"
- upTimeout = "timeout"
- upOrigin = "origin"
- upTime = "time"
-
- // Cache lookup results
- crKeyMiss = "kmiss"
- crRangeMiss = "rmiss"
- crHit = "hit"
- crPartialHit = "phit"
- crPurge = "purge"
-)
-
-// TricksterHandler contains the services the Handlers need to operate
-type TricksterHandler struct {
- Logger log.Logger
- Config *Config
- Metrics *ApplicationMetrics
- Cacher Cache
- ResponseChannels map[string]chan *ClientRequestContext
- ChannelCreateMtx sync.Mutex
-}
-
-// HTTP Handlers
-
-// pingHandler handles calls to /ping, which checks the health of the Trickster app, but not connectivity to upstream origins
-// it respond with 200 OK and "pong" so long as the HTTP Server is running and taking requests
-func (t *TricksterHandler) pingHandler(w http.ResponseWriter, r *http.Request) {
- w.Header().Set(hnCacheControl, hvNoCache)
- w.WriteHeader(http.StatusOK)
- w.Write([]byte("pong"))
-}
-
-// promHealthCheckHandler returns the health of Trickster
-// can't support multi-origin full proxy for path-based proxying
-func (t *TricksterHandler) promHealthCheckHandler(w http.ResponseWriter, r *http.Request) {
- level.Debug(t.Logger).Log(lfEvent, "promHealthCheckHandler", "path", r.URL.Path, "method", r.Method)
-
- // Check the labels path for Prometheus Origin Handler to satisfy health check
- path := prometheusAPIv1Path + mnLabels
-
- origin := t.getOrigin(r)
- originURL := origin.OriginURL + strings.Replace(path, "//", "/", 1)
- body, resp, _, err := t.getURL(origin, r.Method, originURL, r.URL.Query(), getProxyableClientHeaders(r))
- if err != nil {
- level.Error(t.Logger).Log(lfEvent, "error fetching data from origin Prometheus", lfDetail, err.Error())
- w.WriteHeader(http.StatusBadGateway)
- return
- }
-
- for k, v := range resp.Header {
- w.Header().Set(k, strings.Join(v, ","))
- }
-
- w.WriteHeader(resp.StatusCode)
- w.Write(body)
-}
-
-// promFullProxyHandler handles calls to non-api paths for single-origin configurations and multi-origin via param or hostname
-// can't support multi-origin full proxy for path-based proxying
-func (t *TricksterHandler) promFullProxyHandler(w http.ResponseWriter, r *http.Request) {
- level.Debug(t.Logger).Log(lfEvent, "promFullProxyHandler", "path", r.URL.Path, "method", r.Method)
-
- path := r.URL.Path
- vars := mux.Vars(r)
-
- // clear out the origin moniker from the front of the API path
- if originName, ok := vars["originMoniker"]; ok {
- if strings.HasPrefix(path, "/"+originName) {
- path = strings.Replace(path, "/"+originName, "", 1)
- }
- }
-
- origin := t.getOrigin(r)
- originURL := origin.OriginURL + strings.Replace(path, "//", "/", 1)
- body, resp, _, err := t.getURL(origin, r.Method, originURL, r.URL.Query(), getProxyableClientHeaders(r))
- if err != nil {
- level.Error(t.Logger).Log(lfEvent, "error fetching data from origin Prometheus", lfDetail, err.Error())
- w.WriteHeader(http.StatusBadGateway)
- return
- }
-
- for k, v := range resp.Header {
- w.Header().Set(k, strings.Join(v, ","))
- }
-
- writeResponse(w, body, resp)
-}
-
-// promQueryHandler handles calls to /query (for instantaneous values)
-func (t *TricksterHandler) promQueryHandler(w http.ResponseWriter, r *http.Request) {
- path := r.URL.Path
- vars := mux.Vars(r)
-
- // clear out the origin moniker from the front of the API path
- if originName, ok := vars["originMoniker"]; ok {
- if strings.HasPrefix(path, "/"+originName) {
- path = strings.Replace(path, "/"+originName, "", 1)
- }
- }
-
- originURL := t.getOrigin(r).OriginURL + strings.Replace(path, "//", "/", 1)
-
- // Get the params from the User request so we can inspect them and pass on to prometheus
- if err := r.ParseForm(); err != nil {
- level.Error(t.Logger).Log(lfEvent, "error parsing form", lfDetail, err.Error())
- w.WriteHeader(http.StatusBadRequest)
- return
- }
- params := r.Form
-
- body, resp, err := t.fetchPromQuery(originURL, params, r)
- if err != nil {
- level.Error(t.Logger).Log(lfEvent, "error fetching data from origin Prometheus", lfDetail, err.Error())
- w.WriteHeader(http.StatusBadGateway)
- return
- }
-
- writeResponse(w, body, resp)
-}
-
-// promQueryRangeHandler handles calls to /query_range (requests for timeseries values)
-func (t *TricksterHandler) promQueryRangeHandler(w http.ResponseWriter, r *http.Request) {
- ctx, err := t.buildRequestContext(w, r)
- if err != nil {
- level.Error(t.Logger).Log(lfEvent, "error building request context", lfDetail, err.Error())
- w.WriteHeader(http.StatusBadRequest)
- return
- }
-
- // This WaitGroup ensures that the server does not write the response until we are 100% done Trickstering the range request.
- // The responsders that fulfill client requests will mark the waitgroup done when the response is ready for delivery.
- ctx.WaitGroup.Add(1)
- if ctx.CacheLookupResult == crHit {
- t.respondToCacheHit(ctx)
- } else {
- t.queueRangeProxyRequest(ctx)
- }
-
- // Wait until the response is fulfilled before delivering.
- ctx.WaitGroup.Wait()
-}
-
-// End HTTP Handlers
-
-// Helper functions
-
-// defaultPrometheusMatrixEnvelope returns an empty envelope
-func defaultPrometheusMatrixEnvelope() PrometheusMatrixEnvelope {
- return PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: rvMatrix,
- Result: make([]*model.SampleStream, 0),
- },
- }
-}
-
-// getProxyableClientHeaders returns any pertinent http headers from the client that we should pass through to the Origin when proxying
-func getProxyableClientHeaders(r *http.Request) http.Header {
- headers := http.Header{}
-
- // pass through Authorization Header
- if authorization, ok := r.Header[hnAuthorization]; ok {
- headers.Add(hnAuthorization, strings.Join(authorization, " "))
- }
-
- return headers
-}
-
-// getOrigin determines the origin server to service the request based on the Host header and url params
-func (t *TricksterHandler) getOrigin(r *http.Request) PrometheusOriginConfig {
- var originName string
- var ok bool
-
- vars := mux.Vars(r)
-
- // Check for the Origin Name URL Path
- if originName, ok = vars["originMoniker"]; !ok {
- // Check for the Origin Name URL Parmameter (origin=)
- if on, ok := r.URL.Query()[upOrigin]; ok {
- originName = on[1]
- } else {
- // Otherwise use the Host Header
- originName = r.Host
- }
- }
-
- // If we have matching origin in our Origins Map, return it.
- if p, ok := t.Config.Origins[originName]; ok {
- return p
- }
-
- // Otherwise, return the default origin if it is configured
- p, ok := t.Config.Origins["default"]
- if !ok {
- p = defaultOriginConfig()
- }
-
- if t.Config.DefaultOriginURL != "" {
- p.OriginURL = t.Config.DefaultOriginURL
- }
-
- return p
-}
-
-// setResponseHeaders adds any needed headers to the response object.
-// this should be called before the body is written
-func setResponseHeaders(w http.ResponseWriter, resp *http.Response) {
- // We're read only and a harmless API, so allow all CORS
- w.Header().Set(hnAllowOrigin, "*")
- // Set the Content-Type to what the response header is
- if contentType, ok := resp.Header["Content-Type"]; ok && len(contentType) > 0 {
- w.Header().Set(hnContentType, contentType[0])
- }
-}
-
-// getURL makes an HTTP request to the provided URL with the provided parameters and returns the response body
-func (t *TricksterHandler) getURL(o PrometheusOriginConfig, method string, uri string, params url.Values, headers http.Header) ([]byte, *http.Response, time.Duration, error) {
- if len(params) > 0 {
- uri += "?" + params.Encode()
- }
-
- parsedURL, err := url.Parse(uri)
- if err != nil {
- return nil, nil, 0, fmt.Errorf("error parsing URL %q: %v", uri, err)
- }
-
- startTime := time.Now()
- client := &http.Client{
- Timeout: time.Duration(o.TimeoutSecs * time.Second.Nanoseconds()),
- CheckRedirect: func(req *http.Request, via []*http.Request) error {
- return http.ErrUseLastResponse
- },
- }
-
- resp, err := client.Do(&http.Request{Method: method, URL: parsedURL})
- if err != nil {
- return nil, nil, 0, fmt.Errorf("error downloading URL %q: %v", uri, err)
- }
- defer resp.Body.Close()
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, nil, 0, fmt.Errorf("error reading body from HTTP response for URL %q: %v", uri, err)
- }
-
- if resp.StatusCode != http.StatusOK {
- // We don't want to return non-200 status codes as internal Go errors,
- // as we want to proxy those status codes all the way back to the user.
- level.Warn(t.Logger).Log(lfEvent, "error downloading URL", "url", uri, "status", resp.Status)
- return body, resp, 0, nil
- }
-
- duration := time.Since(startTime)
-
- level.Debug(t.Logger).Log(lfEvent, "prometheusOriginHttpRequest", "url", uri, "duration", duration)
-
- return body, resp, duration, nil
-}
-
-func (t *TricksterHandler) getVectorFromPrometheus(url string, params url.Values, r *http.Request) (PrometheusVectorEnvelope, []byte, *http.Response, error) {
- pe := PrometheusVectorEnvelope{}
-
- // Make the HTTP Request
- body, resp, err := t.fetchPromQuery(url, params, r)
- if err != nil {
- return pe, body, nil, fmt.Errorf("error fetching data from Prometheus: %v", err)
- }
- // Unmarshal the prometheus data into another PrometheusMatrixEnvelope
- err = json.Unmarshal(body, &pe)
- if err != nil {
- // If we get a scalar response, we just want to return the resp without an error
- // this will allow the upper layers to just use the raw response
- if pe.Data.ResultType != "scalar" {
- return pe, nil, nil, fmt.Errorf("Prometheus vector unmarshaling error for URL %q: %v", url, err)
- }
- }
-
- return pe, body, resp, nil
-}
-
-func (t *TricksterHandler) getMatrixFromPrometheus(url string, params url.Values, r *http.Request) (PrometheusMatrixEnvelope, []byte, *http.Response, time.Duration, error) {
- pe := PrometheusMatrixEnvelope{}
-
- // Make the HTTP Request - don't use fetchPromQuery here, that is for instantaneous only.
- body, resp, duration, err := t.getURL(t.getOrigin(r), r.Method, url, params, getProxyableClientHeaders(r))
- if err != nil {
- return pe, nil, nil, 0, err
- }
-
- if resp.StatusCode == http.StatusOK {
- // Unmarshal the prometheus data into another PrometheusMatrixEnvelope
- err := json.Unmarshal(body, &pe)
- if err != nil {
- return pe, nil, nil, 0, fmt.Errorf("Prometheus matrix unmarshaling error for URL %q: %v", url, err)
- }
- }
-
- return pe, body, resp, duration, nil
-}
-
-// fetchPromQuery checks for cached instantaneous value for the query and returns it if found,
-// otherwise proxies the request to the Prometheus origin and sets the cache with a low TTL
-// fetchPromQuery does not do any data marshalling
-func (t *TricksterHandler) fetchPromQuery(originURL string, params url.Values, r *http.Request) ([]byte, *http.Response, error) {
- var ttl int64 = 15
- var end int64
- var err error
-
- cacheKeyBase := originURL
- // if we have an authorization header, that should be part of the cache key to ensure only authorized users can access cached datasets
- if authorization, ok := r.Header[hnAuthorization]; ok {
- cacheKeyBase += strings.Join(authorization, " ")
- }
-
- if ts, ok := params[upTime]; ok {
- reqStart, err := parseTime(ts[0])
- if err != nil {
- return nil, nil, err
- }
- end = reqStart.Unix()
- if end <= (time.Now().Unix()-1800) && end%1800 == 0 {
- // the Time param is perfectly on the hour and not recent, this is unusual for random dashboard loads.
- // It might be some kind of a daily or hourly rollup. Let's cache it longer than 15s
- ttl = 1800
- }
- end = (end / 15) * 15
-
- params.Set(upTime, strconv.Itoa(int(end)))
- }
-
- cacheKey := deriveCacheKey(cacheKeyBase, params)
-
- var body []byte
- resp := &http.Response{}
- var duration time.Duration
-
- cacheResult := crKeyMiss
-
- // check for it in the cache
- cachedBody, err := t.Cacher.Retrieve(cacheKey)
- if err != nil {
- // Cache Miss, we need to get it from prometheus
- body, resp, duration, err = t.getURL(t.getOrigin(r), r.Method, originURL, params, getProxyableClientHeaders(r))
- if err != nil {
- return nil, nil, err
- }
-
- t.Metrics.ProxyRequestDuration.WithLabelValues(originURL, otPrometheus, mnQuery, crKeyMiss, strconv.Itoa(resp.StatusCode)).Observe(duration.Seconds())
- t.Cacher.Store(cacheKey, string(body), ttl)
- } else {
- // Cache hit, return the data set
- body = []byte(cachedBody)
- cacheResult = crHit
- resp.StatusCode = http.StatusOK
- }
-
- t.Metrics.CacheRequestStatus.WithLabelValues(originURL, otPrometheus, mnQuery, cacheResult, strconv.Itoa(resp.StatusCode)).Inc()
-
- return body, resp, nil
-}
-
-// buildRequestContext Creates a ClientRequestContext based on the incoming client request
-func (t *TricksterHandler) buildRequestContext(w http.ResponseWriter, r *http.Request) (*ClientRequestContext, error) {
- var err error
-
- ctx := &ClientRequestContext{
- Request: r,
- Writer: w,
- Origin: t.getOrigin(r),
- Time: time.Now().Unix(),
- }
-
- ctx.Origin.OriginURL += strings.Replace(ctx.Origin.APIPath+"/", "//", "/", 1)
-
- // Get the params from the User request so we can inspect them and pass on to prometheus
- if err := r.ParseForm(); err != nil {
- return nil, errors.Wrap(err, "unable to parse form")
- }
- ctx.RequestParams = r.Form
-
- // Validate and parse the step value from the user request URL params.
- if len(ctx.RequestParams[upStep]) == 0 {
- return nil, fmt.Errorf("missing step parameter")
- }
- ctx.StepParam = ctx.RequestParams[upStep][0]
- step, err := parseDuration(ctx.StepParam)
- if err != nil {
- return nil, errors.Wrap(err, fmt.Sprintf("failed to parse parameter %q with value %q", upStep, ctx.StepParam))
- }
- if step <= 0 {
- return nil, fmt.Errorf("step parameter %v <= 0, has to be positive", step)
- }
- ctx.StepMS = int64(step.Seconds() * 1000)
-
- cacheKeyBase := ctx.Origin.OriginURL + ctx.StepParam
- // if we have an authorization header, that should be part of the cache key to ensure only authorized users can access cached datasets
- if authorization, ok := r.Header[hnAuthorization]; ok {
- cacheKeyBase += strings.Join(authorization, " ")
- }
-
- // Derive a hashed cacheKey for the query where we will get and set the result set
- // inclusion of the step ensures that datasets with different resolutions are not written to the same key.
- ctx.CacheKey = deriveCacheKey(cacheKeyBase, ctx.RequestParams)
-
- // We will look for a Cache-Control: No-Cache request header and,
- // if present, bypass the cache for a fresh full query from prometheus.
- // Any user can trigger w/ hard reload (ctrl/cmd+shift+r) to clear out cache-related anomalies
- noCache := false
- if ctx.Origin.IgnoreNoCacheHeader == false && (strings.ToLower(r.Header.Get(hnCacheControl)) == hvNoCache) {
- noCache = true
- }
-
- // get the browser-requested start/end times, so we can determine what part of the range is not in the cache
- if len(ctx.RequestParams[upStart]) == 0 {
- return nil, fmt.Errorf("missing start time parameter")
- }
-
- reqStart, err := parseTime(ctx.RequestParams[upStart][0])
- if err != nil {
- return nil, errors.Wrap(err, fmt.Sprintf("failed to parse parameter %q with value %q", upStart, ctx.RequestParams[upStart][0]))
- }
-
- if len(ctx.RequestParams[upEnd]) == 0 {
- return nil, fmt.Errorf("missing end time parameter")
- }
-
- reqEnd, err := parseTime(ctx.RequestParams[upEnd][0])
- if err != nil {
- return nil, errors.Wrap(err, fmt.Sprintf("failed to parse parameter %q with value %q", upEnd, ctx.RequestParams[upEnd][0]))
- }
-
- ctx.RequestExtents.Start, ctx.RequestExtents.End, err = alignStepBoundaries(reqStart.Unix()*1000, reqEnd.Unix()*1000, ctx.StepMS, ctx.Time)
- if err != nil {
- return nil, errors.Wrap(err, "error aligning step boundary")
- }
- // setup some variables to determine and track the status of the query vs what's in the cache
- ctx.Matrix = defaultPrometheusMatrixEnvelope()
- ctx.CacheLookupResult = crKeyMiss
-
- // parameters for filling gap on the upper bound
- ctx.OriginUpperExtents.Start = ctx.RequestExtents.Start
- ctx.OriginUpperExtents.End = ctx.RequestExtents.End
-
- // If the entire request is outside of the MaxValueAgeSecs, then lets not look in the
- // cache, we won't have it
- if (ctx.Time*1000 - ctx.RequestExtents.End) > ctx.Origin.MaxValueAgeSecs*1000 {
- ctx.CacheLookupResult = crRangeMiss
- return ctx, nil
- }
-
- // Get the cached result set if present
- cachedBody, err := t.Cacher.Retrieve(ctx.CacheKey)
-
- if err != nil || noCache {
- // Cache Miss, Get the whole blob from Prometheus.
- // Pass on the browser-requested start/end parameters to our Prom Query
- if noCache {
- ctx.CacheLookupResult = crPurge
- }
- } else {
- // We had a Redis Key Hit for the hashed query key, but we may not have all points requested by browser
- // So we can have a Range Miss, Partial Hit, Full Hit when comparing cached range to what the client requested.
- // So let's find out what we are missing (if anything) and fetch what we don't have
-
- // See if cache data is compressed by looking for the first character to be "{":, with which the uncompressed JSON would start
- // We do this instead of checking the Compression config bit because if someone turns compression on or off when using filesystem or redis cache,
- // we will have no idea if what is already in the cache was compressed or not based on previous settings
- cb := []byte(cachedBody)
- if cb[0] != 123 {
- // Not a JSON object, try decompressing
- level.Debug(t.Logger).Log("event", "Decompressing Cached Data", "cacheKey", ctx.CacheKey)
- cb, err = snappy.Decode(nil, cb)
- if err == nil {
- cachedBody = string(cb)
- }
- }
-
- // Marshall the cache payload into a PrometheusMatrixEnvelope struct
- err = json.Unmarshal([]byte(cachedBody), &ctx.Matrix)
- // If there is an error unmarshaling the cache we should treat it as a cache miss
- // and re-fetch from origin
- if err != nil {
- ctx.CacheLookupResult = crRangeMiss
- return ctx, nil
- }
-
- // Get the Extents of the data in the cache
- ce := ctx.Matrix.getExtents()
-
- extent := "none"
-
- // Figure out our Deltas
- if ce.End == 0 || ce.Start == 0 {
- // Something went wrong fetching extents
- ctx.CacheLookupResult = crRangeMiss
- } else if ctx.RequestExtents.Start >= ce.Start && ctx.RequestExtents.End <= ce.End {
- // Full cache hit, no need to refresh dataset.
- // Everything we are requesting is already in cache
- ctx.CacheLookupResult = crHit
- ctx.OriginUpperExtents.Start = 0
- ctx.OriginUpperExtents.End = 0
- } else if ctx.RequestExtents.Start < ce.Start && ctx.RequestExtents.End > ce.End {
- // Partial Cache hit on both ends.
- ctx.CacheLookupResult = crPartialHit
- ctx.OriginUpperExtents.Start = ce.End + ctx.StepMS
- ctx.OriginUpperExtents.End = ctx.RequestExtents.End
- ctx.OriginLowerExtents.Start = ((ctx.RequestExtents.Start / ctx.StepMS) * ctx.StepMS)
- ctx.OriginLowerExtents.End = ce.Start
- extent = "both"
- } else if ctx.RequestExtents.Start > ce.End {
- // Range Miss on the Upper Extent of Cache. We will fill from where our cached data stops to the requested end
- ctx.CacheLookupResult = crRangeMiss
- ctx.OriginUpperExtents.Start = ce.End + ctx.StepMS
- extent = "upper"
- } else if ctx.RequestExtents.End > ce.End {
- // Partial Cache Hit, Missing the Upper Extent
- ctx.CacheLookupResult = crPartialHit
- ctx.OriginUpperExtents.Start = ce.End + ctx.StepMS
- extent = "upper"
- } else if ctx.RequestExtents.End < ce.Start {
- // Range Miss on the Lower Extent of Cache. We will fill from the requested start up to where our cached data stops
- ctx.CacheLookupResult = crRangeMiss
- ctx.OriginLowerExtents.Start = ((ctx.RequestExtents.Start / ctx.StepMS) * ctx.StepMS)
- ctx.OriginLowerExtents.End = ce.Start - ctx.StepMS
- ctx.OriginUpperExtents.Start = 0
- ctx.OriginUpperExtents.End = 0
- extent = "lower"
- } else if ctx.RequestExtents.Start < ce.Start {
- // Partial Cache Hit, Missing Lower Extent
- ctx.CacheLookupResult = crPartialHit
- ctx.OriginLowerExtents.Start = ((ctx.RequestExtents.Start / ctx.StepMS) * ctx.StepMS)
- ctx.OriginLowerExtents.End = ce.Start - ctx.StepMS
- ctx.OriginUpperExtents.Start = 0
- ctx.OriginUpperExtents.End = 0
- extent = "upper"
- } else {
- panic(fmt.Sprintf("Reaching this final clause should be impossible. Yikes! reqStart=%d, reqEnd=%d, ce.Start=%d, ce.End=%d", ctx.RequestExtents.Start, ctx.RequestExtents.End, ce.Start, ce.End))
- }
-
- level.Debug(t.Logger).Log(lfEvent, "deltaRoutineCompleted", "CacheLookupResult", ctx.CacheLookupResult, lfCacheKey, ctx.CacheKey,
- "cacheStart", ce.Start, "cacheEnd", ce.End, "reqStart", ctx.RequestExtents.Start, "reqEnd", ctx.RequestExtents.End,
- "OriginLowerExtents.Start", ctx.OriginLowerExtents.Start, "OriginLowerExtents.End", ctx.OriginLowerExtents.End,
- "OriginUpperExtents.Start", ctx.OriginUpperExtents.Start, "OriginUpperExtents.End", ctx.OriginUpperExtents.End, "extent", extent)
- }
-
- return ctx, nil
-}
-
-func (t *TricksterHandler) respondToCacheHit(ctx *ClientRequestContext) {
- defer ctx.WaitGroup.Done()
- t.Metrics.CacheRequestStatus.WithLabelValues(ctx.Origin.OriginURL, otPrometheus, mnQueryRange, ctx.CacheLookupResult, "200").Inc()
-
- // Do the extraction of the range the user requested from the fully cached dataset, if needed.
- ctx.Matrix.cropToRange(ctx.RequestExtents.Start, ctx.RequestExtents.End+ctx.StepMS)
-
- r := &http.Response{}
-
- // If Fast Forward is enabled and the request is a real-time request, go get that data
- if !ctx.Origin.FastForwardDisable && !(ctx.RequestExtents.End < (ctx.Time*1000)-ctx.StepMS) {
- // Query the latest points if Fast Forward is enabled
- queryURL := ctx.Origin.OriginURL + mnQuery
- originParams := url.Values{}
- // Add the prometheus query params from the user urlparams to the origin request
- passthroughParam(upQuery, ctx.RequestParams, originParams, nil)
- passthroughParam(upTimeout, ctx.RequestParams, originParams, nil)
- passthroughParam(upTime, ctx.RequestParams, originParams, nil)
- ffd, _, resp, err := t.getVectorFromPrometheus(queryURL, originParams, ctx.Request)
- if err != nil {
- level.Error(t.Logger).Log(lfEvent, "error fetching data from origin Prometheus", lfDetail, err.Error())
- ctx.Writer.WriteHeader(http.StatusBadGateway)
- return
- }
- r = resp
- if resp.StatusCode == http.StatusOK && ffd.Status == rvSuccess {
- ctx.Matrix = t.mergeVector(ctx.Matrix, ffd)
- }
- }
-
- // Marshal the Envelope back to a json object for User Response)
- body, err := json.Marshal(ctx.Matrix)
- if err != nil {
- level.Error(t.Logger).Log(lfEvent, "prometheus matrix marshaling error", lfDetail, err.Error())
- ctx.Writer.WriteHeader(http.StatusInternalServerError)
- return
- }
-
- writeResponse(ctx.Writer, body, r)
-}
-
-func writeResponse(w http.ResponseWriter, body []byte, resp *http.Response) {
- // Now we need to respond to the user request with the dataset
- setResponseHeaders(w, resp)
-
- if resp.StatusCode == 0 {
- resp.StatusCode = http.StatusOK
- }
-
- w.WriteHeader(resp.StatusCode)
- w.Write(body)
-}
-
-func (t *TricksterHandler) queueRangeProxyRequest(ctx *ClientRequestContext) {
- t.ChannelCreateMtx.Lock()
- ch, ok := t.ResponseChannels[ctx.CacheKey]
- if !ok {
- level.Info(t.Logger).Log(lfEvent, "starting originRangeProxyHandler", lfCacheKey, ctx.CacheKey)
- ch = make(chan *ClientRequestContext, 100)
- t.ResponseChannels[ctx.CacheKey] = ch
- go t.originRangeProxyHandler(ctx.CacheKey, ch)
- }
- t.ChannelCreateMtx.Unlock()
-
- ch <- ctx
-}
-
-func (t *TricksterHandler) originRangeProxyHandler(cacheKey string, originRangeRequests <-chan *ClientRequestContext) {
- // Close handler goroutine if its request channel is empty.
- go func() {
- for {
- time.Sleep(10 * time.Second)
-
- t.ChannelCreateMtx.Lock()
-
- if len(originRangeRequests) == 0 {
- if _, ok := t.ResponseChannels[cacheKey]; ok {
- close(t.ResponseChannels[cacheKey])
- delete(t.ResponseChannels, cacheKey)
- t.ChannelCreateMtx.Unlock()
- return
- }
- }
-
- t.ChannelCreateMtx.Unlock()
- }
- }()
-
- for r := range originRangeRequests {
- // get the cache data for this request again, in case anything about the record has changed
- // between the time we queued the request and the time it was consumed from the channel
- ctx, err := t.buildRequestContext(r.Writer, r.Request)
- if err != nil {
- level.Error(t.Logger).Log(lfEvent, "error building request context", lfDetail, err.Error())
- r.Writer.WriteHeader(http.StatusBadRequest)
- r.WaitGroup.Done()
- continue
- }
-
- // The cache miss became a cache hit between the time it was queued and processed.
- if ctx.CacheLookupResult == crHit {
- level.Debug(t.Logger).Log(lfEvent, "delayedCacheHit", lfDetail, "cache was populated with needed data by another proxy request while this one was queued.")
- // Lay the newly-retreived data into the original origin range request so it can fully service the client
- r.Matrix = ctx.Matrix
- // And change the lookup result to a hit.
- r.CacheLookupResult = crHit
- // Respond with the modified original request object so the right WaitGroup is marked as Done()
- t.respondToCacheHit(r)
- } else {
-
- // Now we know if we need to make any calls to the Origin, lets set those up
- upperDeltaData := PrometheusMatrixEnvelope{}
- lowerDeltaData := PrometheusMatrixEnvelope{}
- fastForwardData := PrometheusVectorEnvelope{}
-
- var wg sync.WaitGroup
-
- var m sync.Mutex // Protects originErr and resp below.
- var originErr error
- var errorBody []byte
- resp := &http.Response{}
-
- if ctx.OriginLowerExtents.Start > 0 && ctx.OriginLowerExtents.End > 0 {
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- queryURL := ctx.Origin.OriginURL + mnQueryRange
- originParams := url.Values{}
- // Add the prometheus query params from the user urlparams to the origin request
- passthroughParam(upQuery, ctx.RequestParams, originParams, nil)
- passthroughParam(upTimeout, ctx.RequestParams, originParams, nil)
- originParams.Add(upStep, ctx.StepParam)
- originParams.Add(upStart, strconv.FormatInt(ctx.OriginLowerExtents.Start/1000, 10))
- originParams.Add(upEnd, strconv.FormatInt(ctx.OriginLowerExtents.End/1000, 10))
- ldd, b, r, duration, err := t.getMatrixFromPrometheus(queryURL, originParams, r.Request)
-
- if err != nil {
- m.Lock()
- originErr = err
- m.Unlock()
- return
- }
-
- m.Lock()
- if resp.StatusCode == 0 || r.StatusCode != http.StatusOK {
- if r.StatusCode != http.StatusOK {
- errorBody = b
- }
- resp = r
- }
- m.Unlock()
-
- if r.StatusCode == http.StatusOK && ldd.Status == rvSuccess {
- lowerDeltaData = ldd
- t.Metrics.ProxyRequestDuration.WithLabelValues(ctx.Origin.OriginURL, otPrometheus,
- mnQueryRange, ctx.CacheLookupResult, strconv.Itoa(r.StatusCode)).Observe(duration.Seconds())
- }
- }()
- }
-
- if ctx.OriginUpperExtents.Start > 0 && ctx.OriginUpperExtents.End > 0 {
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- queryURL := ctx.Origin.OriginURL + mnQueryRange
- originParams := url.Values{}
- // Add the prometheus query params from the user urlparams to the origin request
- passthroughParam(upQuery, ctx.RequestParams, originParams, nil)
- passthroughParam(upTimeout, ctx.RequestParams, originParams, nil)
- originParams.Add(upStep, ctx.StepParam)
- originParams.Add(upStart, strconv.FormatInt(ctx.OriginUpperExtents.Start/1000, 10))
- originParams.Add(upEnd, strconv.FormatInt(ctx.OriginUpperExtents.End/1000, 10))
- udd, b, r, duration, err := t.getMatrixFromPrometheus(queryURL, originParams, r.Request)
-
- if err != nil {
- m.Lock()
- originErr = err
- m.Unlock()
- return
- }
-
- m.Lock()
- if resp.StatusCode == 0 || r.StatusCode != http.StatusOK {
- if r.StatusCode != http.StatusOK {
- errorBody = b
- }
- resp = r
- }
- m.Unlock()
-
- if r != nil && r.StatusCode == http.StatusOK && udd.Status == rvSuccess {
- upperDeltaData = udd
- t.Metrics.ProxyRequestDuration.WithLabelValues(ctx.Origin.OriginURL, otPrometheus,
- mnQueryRange, ctx.CacheLookupResult, strconv.Itoa(r.StatusCode)).Observe(duration.Seconds())
- }
- }()
- }
-
- if !ctx.Origin.FastForwardDisable && !(ctx.RequestExtents.End < ctx.Time*1000-ctx.StepMS) {
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- // Query the latest points if Fast Forward is enabled
- queryURL := ctx.Origin.OriginURL + mnQuery
- originParams := url.Values{}
- // Add the prometheus query params from the user urlparams to the origin request
- passthroughParam(upQuery, ctx.RequestParams, originParams, nil)
- passthroughParam(upTimeout, ctx.RequestParams, originParams, nil)
- passthroughParam(upTime, ctx.RequestParams, originParams, nil)
- ffd, b, r, err := t.getVectorFromPrometheus(queryURL, originParams, r.Request)
-
- if err != nil {
- m.Lock()
- originErr = err
- m.Unlock()
- return
- }
-
- m.Lock()
- if resp.StatusCode == 0 || r.StatusCode != http.StatusOK {
- if r.StatusCode != http.StatusOK {
- errorBody = b
- }
- resp = r
- }
- m.Unlock()
-
- if r != nil && r.StatusCode == http.StatusOK && ffd.Status == rvSuccess {
- fastForwardData = ffd
- }
- }()
- }
-
- wg.Wait()
-
- if originErr != nil {
- level.Error(t.Logger).Log(lfEvent, "error fetching data from origin Prometheus", lfDetail, originErr.Error())
- r.Writer.WriteHeader(http.StatusBadGateway)
- r.WaitGroup.Done()
- continue
- }
-
- t.Metrics.CacheRequestStatus.WithLabelValues(ctx.Origin.OriginURL, otPrometheus, mnQueryRange, ctx.CacheLookupResult, strconv.Itoa(resp.StatusCode)).Inc()
-
- uncachedElementCnt := int64(0)
-
- if lowerDeltaData.Status == rvSuccess {
- uncachedElementCnt += lowerDeltaData.getValueCount()
- ctx.Matrix = t.mergeMatrix(ctx.Matrix, lowerDeltaData)
- }
-
- if upperDeltaData.Status == rvSuccess {
- uncachedElementCnt += upperDeltaData.getValueCount()
- ctx.Matrix = t.mergeMatrix(upperDeltaData, ctx.Matrix)
- }
-
- // If the request is entirely outside of the cache window, we don't want to cache it
- // otherwise we actually *clear* the cache of any data it has in it!
- skipCache := (ctx.Time*1000 - ctx.RequestExtents.End) > ctx.Origin.MaxValueAgeSecs*1000
-
- // If it's not a full cache hit, we want to write this back to the cache
- if ctx.CacheLookupResult != crHit && !skipCache {
- cacheMatrix := ctx.Matrix.copy()
-
- // Prune any old points based on retention policy
- cacheMatrix.cropToRange(int64(ctx.Time-ctx.Origin.MaxValueAgeSecs)*1000, 0)
-
- if ctx.Origin.NoCacheLastDataSecs != 0 {
- cacheMatrix.cropToRange(0, int64(ctx.Time-ctx.Origin.NoCacheLastDataSecs)*1000)
- }
-
- // Marshal the Envelope back to a json object for Cache Storage
- cacheBody, err := json.Marshal(cacheMatrix)
- if err != nil {
- level.Error(t.Logger).Log(lfEvent, "prometheus matrix marshaling error", lfDetail, err.Error())
- r.Writer.WriteHeader(http.StatusInternalServerError)
- r.WaitGroup.Done()
- continue
- }
-
- if t.Config.Caching.Compression {
- level.Debug(t.Logger).Log("event", "Compressing Cached Data", "cacheKey", ctx.CacheKey)
- cacheBody = snappy.Encode(nil, cacheBody)
- }
-
- // Set the Cache Key with the merged dataset
- t.Cacher.Store(cacheKey, string(cacheBody), t.Config.Caching.RecordTTLSecs)
- level.Debug(t.Logger).Log(lfEvent, "setCacheRecord", lfCacheKey, cacheKey, "ttl", t.Config.Caching.RecordTTLSecs)
- }
-
- //Do the extraction of the range the user requested, if needed.
- // The only time it may not be needed is if the result was a Key Miss (so the dataset we have is exactly what the user asked for)
- // I add one more step on the end of the request to ensure we catch the fast forward data
- if ctx.CacheLookupResult != crKeyMiss {
- ctx.Matrix.cropToRange(ctx.RequestExtents.Start, ctx.RequestExtents.End+ctx.StepMS)
- }
-
- allElementCnt := ctx.Matrix.getValueCount()
- cachedElementCnt := allElementCnt - uncachedElementCnt
-
- if uncachedElementCnt > 0 {
- t.Metrics.CacheRequestElements.WithLabelValues(ctx.Origin.OriginURL, otPrometheus, "uncached").Add(float64(uncachedElementCnt))
- }
-
- if cachedElementCnt > 0 {
- t.Metrics.CacheRequestElements.WithLabelValues(ctx.Origin.OriginURL, otPrometheus, "cached").Add(float64(cachedElementCnt))
- }
-
- // Stictch in Fast Forward Data
- if fastForwardData.Status == rvSuccess {
- ctx.Matrix = t.mergeVector(ctx.Matrix, fastForwardData)
- }
-
- // Marshal the Envelope back to a json object for User Response)
- body, err := json.Marshal(ctx.Matrix)
- if err != nil {
- level.Error(t.Logger).Log(lfEvent, "prometheus matrix marshaling error", lfDetail, err.Error())
- r.Writer.WriteHeader(http.StatusInternalServerError)
- r.WaitGroup.Done()
- continue
- }
-
- if resp.StatusCode != http.StatusOK {
- writeResponse(r.Writer, errorBody, resp)
- } else {
- writeResponse(r.Writer, body, resp)
- }
- r.WaitGroup.Done()
- }
- // Explicitly release the request context so that the underlying memory can be
- // freed before the next request is received via the channel, which overwrites "r".
- r = nil
- }
-}
-
-func alignStepBoundaries(start int64, end int64, stepMS int64, now int64) (int64, int64, error) {
- // Don't query beyond Time.Now() or charts will have weird data on the far right
- if end > now*1000 {
- end = now * 1000
- }
-
- // In case the user had the start/end parameters reversed chronologically, lets return an error
- if start > end {
- return 0, 0, fmt.Errorf("start is after end")
- }
-
- // Failsafe to 60s if something inexplicably happened to the step param
- if stepMS <= 0 {
- return 0, 0, fmt.Errorf("step must be > 0")
- }
-
- // Align start/end to step boundaries
- start = (start / stepMS) * stepMS
- end = ((end / stepMS) * stepMS)
-
- return start, end, nil
-}
-
-func (pe PrometheusMatrixEnvelope) getValueCount() int64 {
- i := int64(0)
- for j := range pe.Data.Result {
- i += int64(len(pe.Data.Result[j].Values))
- }
- return i
-}
-
-// mergeVector merges the passed PrometheusVectorEnvelope object with the calling PrometheusVectorEnvelope object
-func (t *TricksterHandler) mergeVector(pe PrometheusMatrixEnvelope, pv PrometheusVectorEnvelope) PrometheusMatrixEnvelope {
- if len(pv.Data.Result) == 0 {
- level.Debug(t.Logger).Log(lfEvent, "mergeVectorPrematureExit")
- return pe
- }
-
- for i := range pv.Data.Result {
- result2 := pv.Data.Result[i]
- for j := range pe.Data.Result {
- result1 := pe.Data.Result[j]
- if result2.Metric.Equal(result1.Metric) {
- if result2.Timestamp > result1.Values[len(result1.Values)-1].Timestamp {
- pe.Data.Result[j].Values = append(pe.Data.Result[j].Values, model.SamplePair{
- Timestamp: model.Time((int64(result2.Timestamp) / 1000) * 1000),
- Value: result2.Value,
- })
- }
- }
- }
- }
-
- return pe
-}
-
-// mergeMatrix merges the passed PrometheusMatrixEnvelope object with the calling PrometheusMatrixEnvelope object
-func (t *TricksterHandler) mergeMatrix(pe PrometheusMatrixEnvelope, pe2 PrometheusMatrixEnvelope) PrometheusMatrixEnvelope {
- if pe.Status != rvSuccess {
- pe = pe2
- return pe2
- } else if pe2.Status != rvSuccess {
- return pe
- }
-
- for i := range pe2.Data.Result {
- metricSetFound := false
- result2 := pe2.Data.Result[i]
- METRIC_MERGE:
- for j := range pe.Data.Result {
- result1 := pe.Data.Result[j]
- if result2.Metric.Equal(result1.Metric) {
- metricSetFound = true
- // Ensure that we don't duplicate datapoints or put points out-of-order
- // This method assumes that `pe2` is "before" `pe`, we need to actually
- // check and enforce that assumption
- first := result1.Values[0]
- for x := len(result2.Values) - 1; x >= 0; x-- {
- v := result2.Values[x]
- if v.Timestamp < first.Timestamp {
- result1.Values = append(result2.Values[:x+1], result1.Values...)
- break METRIC_MERGE
- }
- }
- break METRIC_MERGE
- }
- }
-
- if !metricSetFound {
- level.Debug(t.Logger).Log(lfEvent, "MergeMatrixEnvelopeNewMetric", lfDetail, "Did not find mergeable metric set in cache", "metricFingerprint", result2.Metric.Fingerprint())
- // Couldn't find metrics with that name in the existing resultset, so this must
- // be new for this poll. That's fine, just add it outright instead of merging.
- pe.Data.Result = append(pe.Data.Result, result2)
- }
- }
-
- return pe
-}
-
-// cropToRange crops the datasets in a given PrometheusMatrixEnvelope down to the provided start and end times
-func (pe *PrometheusMatrixEnvelope) cropToRange(start int64, end int64) {
- seriesToRemove := make([]int, 0)
-
- // iterate through each metric series in the result
- for i := range pe.Data.Result {
- if start > 0 {
- // Now we First determine the correct start index for each series in the Matrix
- // iterate through each value in the given metric series
- for j := range pe.Data.Result[i].Values {
- // If the timestamp for this data point is at or after the client requested start time,
- // update the slice and break the loop.
- ts := int64(pe.Data.Result[i].Values[j].Timestamp)
- if ts >= start {
- pe.Data.Result[i].Values = pe.Data.Result[i].Values[j:]
- break
- }
- }
-
- if len(pe.Data.Result[i].Values) == 0 || int64(pe.Data.Result[i].Values[len(pe.Data.Result[i].Values)-1].Timestamp) < start {
- seriesToRemove = append(seriesToRemove, i)
- }
- }
-
- if end > 0 {
- // Then we determine the correct end index for each series in the Matrix
- // iterate *backwards* through each value in the given metric series
- for j := len(pe.Data.Result[i].Values) - 1; j >= 0; j-- {
- // If the timestamp of this metric is at or after the client requested start time,
- // update the offset and break.
- ts := int64(pe.Data.Result[i].Values[j].Timestamp)
- if ts <= end {
- pe.Data.Result[i].Values = pe.Data.Result[i].Values[:j+1]
- break
- }
- }
-
- if len(pe.Data.Result[i].Values) == 0 || int64(pe.Data.Result[i].Values[0].Timestamp) > end {
- if len(seriesToRemove) == 0 || seriesToRemove[len(seriesToRemove)-1] != i {
- seriesToRemove = append(seriesToRemove, i)
- }
- }
- }
- }
-
- for i := len(seriesToRemove) - 1; i >= 0; i-- {
- toRemove := seriesToRemove[i]
- pe.Data.Result = append(pe.Data.Result[:toRemove], pe.Data.Result[toRemove+1:]...)
- }
-}
-
-// getCacheExtents returns the timestamps of the oldest and newest cached data points for the given query.
-func (pe PrometheusMatrixEnvelope) getExtents() MatrixExtents {
- r := pe.Data.Result
-
- var oldest int64
- var newest int64
-
- for series := range r {
- if len(r[series].Values) > 0 {
- // Update Oldest Value
- ts := int64(r[series].Values[0].Timestamp)
- if oldest == 0 || ts < oldest {
- oldest = ts
- }
-
- // Update Newest Value
- ts = int64(r[series].Values[len(r[series].Values)-1].Timestamp)
- if newest == 0 || ts > newest {
- newest = ts
- }
- }
- }
-
- return MatrixExtents{Start: oldest, End: newest}
-}
-
-// copy return a deep copy of PrometheusMatrixEnvelope.
-func (pe PrometheusMatrixEnvelope) copy() PrometheusMatrixEnvelope {
- resPe := PrometheusMatrixEnvelope{
- Status: pe.Status,
- Data: PrometheusMatrixData{
- ResultType: pe.Data.ResultType,
- Result: make([]*model.SampleStream, len(pe.Data.Result)),
- },
- }
- for index := range pe.Data.Result {
- resSampleSteam := *pe.Data.Result[index]
- resPe.Data.Result[index] = &resSampleSteam
- }
- return resPe
-}
-
-// passthroughParam passes the parameter with paramName, if present in the requestParams, on to the proxyParams collection
-func passthroughParam(paramName string, requestParams url.Values, proxyParams url.Values, filterFunc func(string) string) {
- if value, ok := requestParams[paramName]; ok {
- if filterFunc != nil {
- value[0] = filterFunc(value[0])
- }
- proxyParams.Add(paramName, value[0])
- }
-}
-
-// md5sum returns the calculated hex string version of the md5 checksum for the input string
-func md5sum(input string) string {
- return fmt.Sprintf("%x", md5.Sum([]byte(input)))
-}
-
-// deriveCacheKey calculates a query-specific keyname based on the prometheus query in the user request
-func deriveCacheKey(prefix string, params url.Values) string {
- k := ""
- // if we have a prefix, set it up
- if len(prefix) > 0 {
- k = md5sum(prefix)
- }
-
- if query, ok := params[upQuery]; ok {
- k += "." + md5sum(query[0])
- }
-
- if t, ok := params[upTime]; ok {
- k += "." + md5sum(t[0])
- }
-
- return k
-}
-
-var reRelativeTime = regexp.MustCompile(`([0-9]+)([mshdw])`)
-
-// parseTime converts a query time URL parameter to time.Time.
-// Copied from https://github.com/prometheus/prometheus/blob/v2.2.1/web/api/v1/api.go#L798-L807
-func parseTime(s string) (time.Time, error) {
- if t, err := strconv.ParseFloat(s, 64); err == nil {
- s, ns := math.Modf(t)
- return time.Unix(int64(s), int64(ns*float64(time.Second))), nil
- }
- if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
- return t, nil
- }
- return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
-}
-
-// parseDuration converts a duration URL parameter to time.Duration.
-// Copied from https://github.com/prometheus/prometheus/blob/v2.2.1/web/api/v1/api.go#L809-L821
-func parseDuration(s string) (time.Duration, error) {
- if d, err := strconv.ParseFloat(s, 64); err == nil {
- ts := d * float64(time.Second)
- if ts > float64(math.MaxInt64) || ts < float64(math.MinInt64) {
- return 0, fmt.Errorf("cannot parse %q to a valid duration. It overflows int64", s)
- }
- return time.Duration(ts), nil
- }
- if d, err := model.ParseDuration(s); err == nil {
- return time.Duration(d), nil
- }
- return 0, fmt.Errorf("cannot parse %q to a valid duration", s)
-}
diff --git a/handlers_test.go b/handlers_test.go
deleted file mode 100644
index 9ec2b5dda..000000000
--- a/handlers_test.go
+++ /dev/null
@@ -1,752 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "net/url"
- "reflect"
- "strconv"
- "testing"
-
- "github.com/go-kit/kit/log"
- "github.com/prometheus/common/model"
-)
-
-const (
- nonexistantOrigin = "http://nonexistent-origin:54321"
- exampleQuery = "/api/v1/query?query=up&time=2015-07-01T20:11:15.781Z"
- exampleRangeQuery = "/api/v1/query_range?query=up&start=2015-07-01T20:10:30.781Z&end=2015-07-01T20:11:00.781Z&step=15"
- exampleRangeQuery_query = "up"
- exampleRangeQuery_start = "2015-07-01T20:10:30.781Z"
- exampleRangeQuery_end = "2015-07-01T20:11:00.781Z"
- exampleRangeQuery_step = "15"
-
- // this example should have 2 data points later than those in exampleRangeResponse
- exampleResponse = `{
- "status" : "success",
- "data" : {
- "resultType" : "vector",
- "result" : [
- {
- "metric" : {
- "__name__" : "up",
- "job" : "prometheus",
- "instance" : "localhost:9090"
- },
- "value": [ 1435781475.781, "1" ]
- },
- {
- "metric" : {
- "__name__" : "up",
- "job" : "node",
- "instance" : "localhost:9091"
- },
- "value" : [ 1435781475.781, "0" ]
- }
- ]
- }
-}`
-
- // this example should have 6 data points
- // NOTE: Times in this response should end with '.781' not '.000'. Had
- // to truncate due to how extents are measured in TricksterHandler.
- exampleRangeResponse = `{
- "status" : "success",
- "data" : {
- "resultType" : "matrix",
- "result" : [
- {
- "metric" : {
- "__name__" : "up",
- "job" : "prometheus",
- "instance" : "localhost:9090"
- },
- "values" : [
- [ 1435781430.000, "1" ],
- [ 1435781445.000, "1" ],
- [ 1435781460.000, "1" ]
- ]
- },
- {
- "metric" : {
- "__name__" : "up",
- "job" : "node",
- "instance" : "localhost:9091"
- },
- "values" : [
- [ 1435781430.000, "0" ],
- [ 1435781445.000, "0" ],
- [ 1435781460.000, "1" ]
- ]
- }
- ]
- }
-}`
-)
-
-func TestParseTime(t *testing.T) {
- fixtures := []struct {
- input string
- output string
- }{
- {"2018-04-07T05:08:53.200Z", "2018-04-07 05:08:53.2 +0000 UTC"},
- {"1523077733", "2018-04-07 05:08:53 +0000 UTC"},
- {"1523077733.2", "2018-04-07 05:08:53.200000047 +0000 UTC"},
- }
-
- for _, f := range fixtures {
- out, err := parseTime(f.input)
- if err != nil {
- t.Error(err)
- }
-
- outStr := out.UTC().String()
- if outStr != f.output {
- t.Errorf("Expected %s, got %s for input %s", f.output, outStr, f.input)
- }
- }
-}
-
-func newTestTricksterHandler(t *testing.T) (tr *TricksterHandler, close func(t *testing.T)) {
- conf := NewConfig()
-
- conf.Origins["default"] = PrometheusOriginConfig{
- OriginURL: nonexistantOrigin,
- APIPath: prometheusAPIv1Path,
- IgnoreNoCacheHeader: true,
- MaxValueAgeSecs: 86400,
- }
- tr = &TricksterHandler{
- ResponseChannels: make(map[string]chan *ClientRequestContext),
- Config: conf,
- Logger: log.NewNopLogger(),
- Metrics: NewApplicationMetrics(),
- }
-
- tr.Cacher = getCache(tr)
- if err := tr.Cacher.Connect(); err != nil {
- t.Fatal("Unable to connect to cache:", err)
- }
-
- return tr, func(t *testing.T) {
- tr.Metrics.Unregister()
- if err := tr.Cacher.Close(); err != nil {
- t.Fatal("Error closing cacher:", err)
- }
- }
-}
-
-func (t *TricksterHandler) setTestOrigin(originURL string) {
- conf := NewConfig()
- conf.Origins["default"] = PrometheusOriginConfig{
- OriginURL: originURL,
- APIPath: prometheusAPIv1Path,
- IgnoreNoCacheHeader: true,
- MaxValueAgeSecs: 86400,
- }
- t.Config = conf
-}
-
-func TestUnreachableOriginReturnsStatusBadGateway(t *testing.T) {
- tests := []struct {
- handler func(*TricksterHandler, http.ResponseWriter, *http.Request)
- path string
- }{
- {
- handler: (*TricksterHandler).promHealthCheckHandler,
- },
- {
- handler: (*TricksterHandler).promFullProxyHandler,
- },
- {
- handler: (*TricksterHandler).promQueryHandler,
- },
- {
- handler: (*TricksterHandler).promQueryRangeHandler,
- path: prometheusAPIv1Path + "query_range?start=100000000&end=200000000&step=15&query=up",
- },
- }
-
- tr, closeFn := newTestTricksterHandler(t)
- defer closeFn(t)
-
- for _, test := range tests {
- rr := httptest.NewRecorder()
- test.handler(tr, rr, httptest.NewRequest("GET", "http://trickster"+test.path, nil))
- if rr.Result().StatusCode != http.StatusBadGateway {
- t.Errorf("unexpected status code; want %d, got %d", http.StatusBadGateway, rr.Result().StatusCode)
- }
- }
-}
-
-func TestMissingRangeQueryParametersResultInStatusBadRequest(t *testing.T) {
- paramsTests := []string{
- "start=0&end=100000000&query=up",
- "end=100000000&step=15&query=up",
- "start=0&step=15&query=up",
- }
-
- tr, closeFn := newTestTricksterHandler(t)
- defer closeFn(t)
-
- for _, params := range paramsTests {
- rr := httptest.NewRecorder()
- tr.promQueryRangeHandler(rr, httptest.NewRequest("GET", "http://trickster"+prometheusAPIv1Path+"query_range?"+params, nil))
- if rr.Result().StatusCode != http.StatusBadRequest {
- t.Errorf("unexpected status code for params %q; want %d, got %d", params, http.StatusBadRequest, rr.Result().StatusCode)
- }
- }
-}
-
-func newTestServer(body string) *httptest.Server {
- handler := func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprint(w, body)
- }
- s := httptest.NewServer(http.HandlerFunc(handler))
- return s
-}
-
-func TestTricksterHandler_pingHandler(t *testing.T) {
- tr, closeFn := newTestTricksterHandler(t)
- defer closeFn(t)
- es := newTestServer("{}")
- defer es.Close()
- tr.setTestOrigin(es.URL)
-
- w := httptest.NewRecorder()
- r := httptest.NewRequest("GET", es.URL, nil)
- tr.pingHandler(w, r)
-
- resp := w.Result()
-
- // it should return 200 OK
- if resp.StatusCode != 200 {
- t.Errorf("wanted 200 got %d.", resp.StatusCode)
- }
-
- bodyBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Error(err)
- }
-
- if string(bodyBytes) != "pong" {
- t.Errorf("wanted 'pong' got %s.", bodyBytes)
- }
-
-}
-
-func TestTricksterHandler_getOrigin(t *testing.T) {
- tr, closeFn := newTestTricksterHandler(t)
- defer closeFn(t)
-
- // it should get test origin
- r := httptest.NewRequest("GET", nonexistantOrigin, nil)
- o := tr.getOrigin(r)
- if o.OriginURL != nonexistantOrigin {
- t.Errorf("wanted \"%s\" got \"%s\".", nonexistantOrigin, o.OriginURL)
- }
-}
-
-func TestTricksterHandler_promHealthCheckHandler(t *testing.T) {
- tr, closeFn := newTestTricksterHandler(t)
- defer closeFn(t)
- es := newTestServer("{}")
- defer es.Close()
- tr.setTestOrigin(es.URL)
-
- // it should proxy request
- w := httptest.NewRecorder()
- r := httptest.NewRequest("GET", es.URL, nil)
- tr.promHealthCheckHandler(w, r)
-
- if w.Result().StatusCode != 200 {
- t.Errorf("wanted 200 got %d.", w.Result().StatusCode)
- }
-}
-
-func TestTricksterHandler_promFullProxyHandler(t *testing.T) {
- tr, closeFn := newTestTricksterHandler(t)
- defer closeFn(t)
- es := newTestServer("{}")
- defer es.Close()
- tr.setTestOrigin(es.URL)
-
- // it should proxy request
- w := httptest.NewRecorder()
- r := httptest.NewRequest("GET", es.URL, nil)
- tr.promFullProxyHandler(w, r)
-
- if w.Result().StatusCode != 200 {
- t.Errorf("wanted 200 got %d.", w.Result().StatusCode)
- }
-}
-
-func TestTricksterHandler_promQueryHandler(t *testing.T) {
- tr, closeFn := newTestTricksterHandler(t)
- defer closeFn(t)
- es := newTestServer("{}")
- defer es.Close()
- tr.setTestOrigin(es.URL)
-
- // it should proxy request
- w := httptest.NewRecorder()
- r := httptest.NewRequest("GET", es.URL, nil)
- tr.promQueryHandler(w, r)
-
- if w.Result().StatusCode != 200 {
- t.Errorf("wanted 200 got %d.", w.Result().StatusCode)
- }
-}
-
-func TestTricksterHandler_promQueryRangeHandler_cacheMiss(t *testing.T) {
- tr, closeFn := newTestTricksterHandler(t)
- defer closeFn(t)
- es := newTestServer(exampleRangeResponse)
- defer es.Close()
- tr.setTestOrigin(es.URL)
-
- // it should queue the proxy request
- w := httptest.NewRecorder()
- r := httptest.NewRequest("GET", es.URL+exampleRangeQuery, nil)
- tr.promQueryRangeHandler(w, r)
-
- if w.Result().StatusCode != 200 {
- t.Errorf("wanted 200 got %d.", w.Result().StatusCode)
- }
-}
-
-func TestTricksterHandler_promQueryRangeHandler_cacheHit(t *testing.T) {
- tr, closeFn := newTestTricksterHandler(t)
- defer closeFn(t)
- es := newTestServer(exampleRangeResponse)
- defer es.Close()
- tr.setTestOrigin(es.URL)
-
- // setup cache
- r := httptest.NewRequest("GET", es.URL+exampleRangeQuery, nil)
- tr.fetchPromQuery(es.URL+prometheusAPIv1Path+exampleRangeQuery_step, r.URL.Query(), r)
-
- // it should respond from cache
- w := httptest.NewRecorder()
- r = httptest.NewRequest("GET", es.URL+exampleRangeQuery, nil)
- tr.promQueryRangeHandler(w, r)
-
- resp := w.Result()
- defer resp.Body.Close()
-
- if resp.StatusCode != 200 {
- t.Errorf("wanted 200. got %d.", resp.StatusCode)
- }
-
- bodyBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Error(err)
- }
-
- fmt.Println(string(bodyBytes))
-
- pm := PrometheusMatrixEnvelope{}
- err = json.Unmarshal(bodyBytes, &pm)
- if err != nil {
- t.Error(err)
- }
-
- if pm.getValueCount() != 6 {
- t.Errorf("wanted 6 got %d.", pm.getValueCount())
- }
-}
-
-func TestTricksterHandler_getURL(t *testing.T) {
- tr, closeFn := newTestTricksterHandler(t)
- defer closeFn(t)
- body := "{}"
- es := newTestServer(body)
- defer es.Close()
- tr.setTestOrigin(es.URL)
-
- // it should get from the echo server
- b, _, _, err := tr.getURL(tr.Config.Origins["default"], "GET", es.URL, url.Values{}, nil)
- if err != nil {
- t.Error(err)
- }
- if bytes.Compare(b, []byte(body)) != 0 {
- t.Errorf("wanted \"%s\" got \"%s\"", body, b)
- }
-}
-
-func TestTricksterHandler_getVectorFromPrometheus(t *testing.T) {
- tr, closeTr := newTestTricksterHandler(t)
- defer closeTr(t)
- es := newTestServer(exampleResponse)
- defer es.Close()
- tr.setTestOrigin(es.URL)
-
- // it should get an empty vector envelope
- r := httptest.NewRequest("GET", es.URL+exampleQuery, nil)
- pe, _, _, err := tr.getVectorFromPrometheus(es.URL, r.URL.Query(), r)
- if err != nil {
- t.Error(err)
- }
- if pe.Status != "success" {
- t.Errorf("wanted \"success\" got \"%s\".", pe.Status)
- }
-}
-
-func TestTricksterHandler_getMatrixFromPrometheus(t *testing.T) {
- tr, closeTr := newTestTricksterHandler(t)
- defer closeTr(t)
- es := newTestServer(exampleRangeResponse)
- defer es.Close()
- tr.setTestOrigin(es.URL)
-
- // it should get an empty matrix envelope
- r := httptest.NewRequest("GET", es.URL+exampleRangeQuery, nil)
- pe, _, _, _, err := tr.getMatrixFromPrometheus(es.URL, r.URL.Query(), r)
- if err != nil {
- t.Error(err)
- }
- if pe.Status != "success" {
- t.Errorf("wanted \"success\" got \"%s\".", pe.Status)
- }
-}
-
-func TestTricksterHandler_respondToCacheHit(t *testing.T) {
- tr, closeTr := newTestTricksterHandler(t)
- defer closeTr(t)
-
- w := httptest.NewRecorder()
- r := httptest.NewRequest("GET", nonexistantOrigin+exampleRangeQuery, nil)
- ctx, err := tr.buildRequestContext(w, r)
- if err != nil {
- t.Error(err)
- }
-
- // it should update the response in ctx.Writer without failing
- ctx.WaitGroup.Add(1)
- tr.respondToCacheHit(ctx)
-}
-
-func TestPrometheusMatrixEnvelope_getValueCount(t *testing.T) {
- pm := PrometheusMatrixEnvelope{}
- err := json.Unmarshal([]byte(exampleRangeResponse), &pm)
- if err != nil {
- t.Error(err)
- }
-
- // it should count the values in the matrix
- if 6 != pm.getValueCount() {
- t.Errorf("wanted 6 got %d.", pm.getValueCount())
- }
-}
-
-func TestTricksterHandler_mergeVector(t *testing.T) {
- tr, closeTr := newTestTricksterHandler(t)
- defer closeTr(t)
-
- pm := PrometheusMatrixEnvelope{}
- err := json.Unmarshal([]byte(exampleRangeResponse), &pm)
- if err != nil {
- t.Error(err)
- }
-
- pv := PrometheusVectorEnvelope{}
- err = json.Unmarshal([]byte(exampleResponse), &pv)
- if err != nil {
- t.Error(err)
- }
-
- // it should merge the values from the vector into the matrix
- pe := tr.mergeVector(pm, pv)
-
- if 8 != pe.getValueCount() {
- t.Errorf("wanted 8 got %d.", pe.getValueCount())
- }
-}
-
-func TestTricksterHandler_mergeMatrix(t *testing.T) {
- tests := []struct {
- a, b, merged PrometheusMatrixEnvelope
- }{
- // Series that adhere to rule
- {
- a: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{10, 1.5},
- },
- },
- },
- },
- },
- b: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{1, 1.5},
- model.SamplePair{5, 1.5},
- },
- },
- },
- },
- },
- merged: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{1, 1.5},
- model.SamplePair{5, 1.5},
- model.SamplePair{10, 1.5},
- },
- },
- },
- },
- },
- },
- // Empty second series
- {
- a: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{10, 1.5},
- },
- },
- },
- },
- },
- b: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{},
- },
- },
- },
- },
- merged: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{10, 1.5},
- },
- },
- },
- },
- },
- },
- // Series that have too many points in the second series
- {
- a: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{10, 1.5},
- },
- },
- },
- },
- },
- b: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{1, 1.5},
- model.SamplePair{5, 1.5},
- model.SamplePair{10, 1.5},
- model.SamplePair{15, 1.5},
- },
- },
- },
- },
- },
- merged: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{1, 1.5},
- model.SamplePair{5, 1.5},
- model.SamplePair{10, 1.5},
- },
- },
- },
- },
- },
- },
- // Series that don't adhere to rules
- {
- a: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{1, 1.5},
- },
- },
- },
- },
- },
- b: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{2, 1.5},
- },
- },
- },
- },
- },
- merged: PrometheusMatrixEnvelope{
- Status: rvSuccess,
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{1, 1.5},
- },
- },
- },
- },
- },
- },
- }
-
- tr, closeTr := newTestTricksterHandler(t)
- defer closeTr(t)
-
- for i, test := range tests {
- t.Run(strconv.Itoa(i), func(t *testing.T) {
- merged := tr.mergeMatrix(test.a, test.b)
- if !reflect.DeepEqual(merged, test.merged) {
- t.Fatalf("Mismatch\nactual=%v\nexpected=%v", merged, test.merged)
- }
- })
- }
-}
-
-func TestAlignStepBoundaries(t *testing.T) {
- tests := []struct {
- start, end, stepMS, now int64
- rangeStart, rangeEnd int64
- err bool
- }{
- // Basic test
- {
- 1, 100, 1, 1,
- 1, 100,
- false,
- },
- // Ensure that it aligns to the step interval
- {
- 1, 100, 10, 1,
- 0, 100,
- false,
- },
- // query with start after end, ensure that it returns an error
- {
- 100, 1, 10, 1,
- 0, 0,
- true,
- },
- // query with start and end after now
- {
- 2000, 3000, 10, 1,
- 0, 0,
- true,
- },
- // query with start and end after now, with start after end
- {
- 3000, 2000, 10, 1,
- 0, 0,
- true,
- },
- // query with no step
- {
- 1, 100, 0, 1,
- 0, 0,
- true,
- },
- // query with negative step
- {
- 1, 100, -10, 1,
- 0, 0,
- true,
- },
- }
-
- for i, test := range tests {
- t.Run(strconv.Itoa(i), func(t *testing.T) {
- s, e, err := alignStepBoundaries(test.start, test.end, test.stepMS, test.now)
- if hasErr := err != nil; hasErr != test.err {
- t.Fatalf("Mismatch in error: expected=%v actual=%v", test.err, hasErr)
- }
- if s != test.rangeStart {
- t.Fatalf("Mismatch in rangeStart: expected=%d actual=%d", test.rangeStart, s)
- }
- if e != test.rangeEnd {
- t.Fatalf("Mismatch in rangeStart: expected=%d actual=%d", test.rangeEnd, e)
- }
- })
- }
-}
diff --git a/internal/cache/badger/badger.go b/internal/cache/badger/badger.go
new file mode 100644
index 000000000..ff7efb68d
--- /dev/null
+++ b/internal/cache/badger/badger.go
@@ -0,0 +1,151 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package badger
+
+import (
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/dgraph-io/badger"
+)
+
+// Cache describes a Badger Cache
+type Cache struct {
+ Name string
+ Config *config.CachingConfig
+ dbh *badger.DB
+}
+
+// Configuration returns the Configuration for the Cache object
+func (c *Cache) Configuration() *config.CachingConfig {
+ return c.Config
+}
+
+// Connect opens the configured Badger key-value store
+func (c *Cache) Connect() error {
+ log.Info("badger cache setup", log.Pairs{"cacheDir": c.Config.Badger.Directory})
+
+ opts := badger.DefaultOptions(c.Config.Badger.Directory)
+ opts.ValueDir = c.Config.Badger.ValueDirectory
+
+ var err error
+ c.dbh, err = badger.Open(opts)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Store places the the data into the Badger Cache using the provided Key and TTL
+func (c *Cache) Store(cacheKey string, data []byte, ttl time.Duration) error {
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "set", "none", float64(len(data)))
+ log.Debug("badger cache store", log.Pairs{"key": cacheKey, "ttl": ttl})
+ return c.dbh.Update(func(txn *badger.Txn) error {
+ return txn.SetEntry(&badger.Entry{Key: []byte(cacheKey), Value: data, ExpiresAt: uint64(time.Now().Add(ttl).Unix())})
+ })
+}
+
+// Retrieve gets data from the Badger Cache using the provided Key
+// because Badger manages Object Expiration internally, allowExpired is not used.
+func (c *Cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.LookupStatus, error) {
+ var data []byte
+ err := c.dbh.View(func(txn *badger.Txn) error {
+ item, err := txn.Get([]byte(cacheKey))
+ if err != nil {
+ return err
+ }
+ data, err = item.ValueCopy(nil)
+ return err
+
+ })
+
+ if err == nil {
+ log.Debug("badger cache retrieve", log.Pairs{"key": cacheKey})
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "get", "hit", float64(len(data)))
+ return data, status.LookupStatusHit, nil
+ }
+
+ if err == badger.ErrKeyNotFound {
+ log.Debug("badger cache miss", log.Pairs{"key": cacheKey})
+ cache.ObserveCacheMiss(cacheKey, c.Name, c.Config.CacheType)
+ return nil, status.LookupStatusKeyMiss, err
+ }
+
+ log.Debug("badger cache retrieve failed", log.Pairs{"key": cacheKey, "reason": err.Error()})
+ cache.ObserveCacheMiss(cacheKey, c.Name, c.Config.CacheType)
+ return data, status.LookupStatusError, err
+}
+
+// Remove removes an object in cache, if present
+func (c *Cache) Remove(cacheKey string) {
+ log.Debug("badger cache remove", log.Pairs{"key": cacheKey})
+ c.dbh.Update(func(txn *badger.Txn) error {
+ return txn.Delete([]byte(cacheKey))
+ })
+ cache.ObserveCacheDel(c.Name, c.Config.CacheType, 0)
+}
+
+// BulkRemove removes a list of objects from the cache. noLock is not used for Badger
+func (c *Cache) BulkRemove(cacheKeys []string, noLock bool) {
+ log.Debug("badger cache bulk remove", log.Pairs{})
+
+ c.dbh.Update(func(txn *badger.Txn) error {
+ for _, key := range cacheKeys {
+ if err := txn.Delete([]byte(key)); err != nil {
+ return err
+ }
+ cache.ObserveCacheDel(c.Name, c.Config.CacheType, 0)
+ }
+ return nil
+ })
+}
+
+// Close closes the Badger Cache
+func (c *Cache) Close() error {
+ return c.dbh.Close()
+}
+
+// SetTTL updates the TTL for the provided cache object
+func (c *Cache) SetTTL(cacheKey string, ttl time.Duration) {
+ var data []byte
+ err := c.dbh.Update(func(txn *badger.Txn) error {
+ item, err := txn.Get([]byte(cacheKey))
+ if err != nil {
+ return nil
+ }
+ data, _ = item.ValueCopy(nil)
+ return txn.SetEntry(&badger.Entry{Key: []byte(cacheKey), Value: data, ExpiresAt: uint64(time.Now().Add(ttl).Unix())})
+ })
+ log.Debug("badger cache update-ttl", log.Pairs{"key": cacheKey, "ttl": ttl, "success": err == nil})
+ if err == nil {
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "update-ttl", "none", 0)
+ }
+}
+
+func (c *Cache) getExpires(cacheKey string) (int, error) {
+ var expires int
+ err := c.dbh.View(func(txn *badger.Txn) error {
+ item, err := txn.Get([]byte(cacheKey))
+ if err != nil {
+ return err
+ }
+ expires = int(item.ExpiresAt())
+ return nil
+ })
+ return expires, err
+}
diff --git a/internal/cache/badger/badger_test.go b/internal/cache/badger/badger_test.go
new file mode 100644
index 000000000..a3b574644
--- /dev/null
+++ b/internal/cache/badger/badger_test.go
@@ -0,0 +1,275 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package badger
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+func init() {
+ metrics.Init()
+}
+
+const cacheType = "badger"
+const cacheKey = "cacheKey"
+
+func newCacheConfig(t *testing.T) config.CachingConfig {
+ dir, err := ioutil.TempDir("/tmp", cacheType)
+ if err != nil {
+ t.Fatalf("could not create temp directory (%s): %s", dir, err)
+ }
+ return config.CachingConfig{CacheType: cacheType, Badger: config.BadgerCacheConfig{Directory: dir, ValueDirectory: dir}}
+}
+
+func TestConfiguration(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Badger.Directory)
+ bc := Cache{Config: &cacheConfig}
+
+ cfg := bc.Configuration()
+ if cfg.CacheType != cacheType {
+ t.Fatalf("expected %s got %s", cacheType, cfg.CacheType)
+ }
+}
+
+func TestBadgerCache_Connect(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Badger.Directory)
+ bc := Cache{Config: &cacheConfig}
+
+ // it should connect
+ if err := bc.Connect(); err != nil {
+ t.Error(err)
+ }
+ bc.Close()
+}
+
+func TestBadgerCache_ConnectFailed(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ cacheConfig.Badger.Directory = "/root/trickster-test-noaccess"
+ os.RemoveAll(cacheConfig.Badger.Directory)
+ bc := Cache{Config: &cacheConfig}
+
+ // it should connect
+ err := bc.Connect()
+ if err == nil {
+ t.Errorf("expected file access error for %s", cacheConfig.Badger.Directory)
+ bc.Close()
+ }
+}
+
+func TestBadgerCache_Store(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Badger.Directory)
+ bc := Cache{Config: &cacheConfig}
+
+ if err := bc.Connect(); err != nil {
+ t.Error(err)
+ }
+ defer bc.Close()
+
+ // it should store a value
+ err := bc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestBadgerCache_Remove(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Badger.Directory)
+ bc := Cache{Config: &cacheConfig}
+
+ if err := bc.Connect(); err != nil {
+ t.Error(err)
+ }
+ defer bc.Close()
+
+ // it should store a value
+ err := bc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := bc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+
+ bc.Remove(cacheKey)
+
+ // it should be a cache miss
+ _, ls, err = bc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+}
+
+func TestBadgerCache_BulkRemove(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Badger.Directory)
+ bc := Cache{Config: &cacheConfig}
+
+ if err := bc.Connect(); err != nil {
+ t.Error(err)
+ }
+ defer bc.Close()
+
+ // it should store a value
+ err := bc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := bc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ bc.BulkRemove([]string{""}, true)
+ bc.BulkRemove([]string{cacheKey}, true)
+
+ // it should be a cache miss
+ _, ls, err = bc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+}
+
+func TestBadgerCache_Retrieve(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Badger.Directory)
+ bc := Cache{Config: &cacheConfig}
+
+ if err := bc.Connect(); err != nil {
+ t.Error(err)
+ }
+ defer bc.Close()
+
+ // it should be a cache miss
+ _, ls, err := bc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+ err = bc.Store(cacheKey, []byte("data"), time.Duration(5)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := bc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+
+ exp1, err := bc.getExpires(cacheKey)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // the TTL is currently 1 second. update it to 1 hour then wait more than
+ // 1 second, to ensure it remained in cache with the correct value
+ bc.SetTTL(cacheKey, time.Duration(3600)*time.Second)
+
+ exp2, err := bc.getExpires(cacheKey)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // should be around 3595
+ diff := exp2 - exp1
+ const expected = 3590
+
+ if diff < 3590 {
+ t.Errorf("expected diff >= %d, got %d from: %d - %d", expected, diff, exp2, exp1)
+ }
+
+ // try a non-existent cacheKey
+ ck2 := cacheKey + "xxxx"
+ bc.SetTTL(ck2, time.Duration(3600)*time.Second)
+
+ // it should be a cache miss
+ _, ls, err = bc.Retrieve(ck2, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", ck2)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+ // it should also not have an expires
+ _, err = bc.getExpires(ck2)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", ck2)
+ }
+}
+
+func TestBadgerCache_Close(t *testing.T) {
+ dir, err := ioutil.TempDir("/tmp", cacheType)
+ if err != nil {
+ t.Fatalf("could not create temp directory (%s): %s", dir, err)
+ }
+ defer os.RemoveAll(dir)
+
+ cacheConfig := config.CachingConfig{CacheType: cacheType, Badger: config.BadgerCacheConfig{Directory: dir, ValueDirectory: dir}}
+ bc := Cache{Config: &cacheConfig}
+
+ if err := bc.Connect(); err != nil {
+ t.Error(err)
+ }
+
+ // it should close
+ if err := bc.Close(); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/internal/cache/bbolt/bbolt.go b/internal/cache/bbolt/bbolt.go
new file mode 100644
index 000000000..061a452a0
--- /dev/null
+++ b/internal/cache/bbolt/bbolt.go
@@ -0,0 +1,207 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package bbolt
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/coreos/bbolt"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/cache/index"
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/pkg/locks"
+)
+
+var lockPrefix string
+
+// Cache describes a BBolt Cache
+type Cache struct {
+ Name string
+ Config *config.CachingConfig
+ dbh *bbolt.DB
+ Index *index.Index
+}
+
+// Configuration returns the Configuration for the Cache object
+func (c *Cache) Configuration() *config.CachingConfig {
+ return c.Config
+}
+
+// Connect instantiates the Cache mutex map and starts the Expired Entry Reaper goroutine
+func (c *Cache) Connect() error {
+ log.Info("bbolt cache setup", log.Pairs{"name": c.Name, "cacheFile": c.Config.BBolt.Filename})
+
+ lockPrefix = c.Name + ".bbolt."
+
+ var err error
+ c.dbh, err = bbolt.Open(c.Config.BBolt.Filename, 0644, &bbolt.Options{Timeout: 1 * time.Second})
+ if err != nil {
+ return err
+ }
+
+ err = c.dbh.Update(func(tx *bbolt.Tx) error {
+ _, err2 := tx.CreateBucketIfNotExists([]byte(c.Config.BBolt.Bucket))
+ if err2 != nil {
+ return fmt.Errorf("create bucket: %s", err2)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // Load Index here and pass bytes as param2
+ indexData, _, _ := c.retrieve(index.IndexKey, false, false)
+ c.Index = index.NewIndex(c.Name, c.Config.CacheType, indexData, c.Config.Index, c.BulkRemove, c.storeNoIndex)
+ return nil
+}
+
+// Store places an object in the cache using the specified key and ttl
+func (c *Cache) Store(cacheKey string, data []byte, ttl time.Duration) error {
+ return c.store(cacheKey, data, ttl, true)
+}
+
+func (c *Cache) storeNoIndex(cacheKey string, data []byte) {
+ err := c.store(cacheKey, data, 31536000*time.Second, false)
+ if err != nil {
+ log.Error("cache failed to write non-indexed object", log.Pairs{"cacheName": c.Name, "cacheType": "bbolt", "cacheKey": cacheKey, "objectSize": len(data)})
+ }
+}
+
+func (c *Cache) store(cacheKey string, data []byte, ttl time.Duration, updateIndex bool) error {
+
+ locks.Acquire(lockPrefix + cacheKey)
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "set", "none", float64(len(data)))
+
+ o := &index.Object{Key: cacheKey, Value: data, Expiration: time.Now().Add(ttl)}
+ err := writeToBBolt(c.dbh, c.Config.BBolt.Bucket, cacheKey, o.ToBytes())
+ if err != nil {
+ locks.Release(lockPrefix + cacheKey)
+ return err
+ }
+ log.Debug("bbolt cache store", log.Pairs{"key": cacheKey, "ttl": ttl, "indexed": updateIndex})
+ if updateIndex {
+ c.Index.UpdateObject(o)
+ }
+ locks.Release(lockPrefix + cacheKey)
+ return nil
+}
+
+func writeToBBolt(dbh *bbolt.DB, bucketName, cacheKey string, data []byte) error {
+ err := dbh.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket([]byte(bucketName))
+ err2 := b.Put([]byte(cacheKey), data)
+ locks.Release(lockPrefix + cacheKey)
+ return err2
+ })
+ return err
+}
+
+// Retrieve looks for an object in cache and returns it (or an error if not found)
+func (c *Cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.LookupStatus, error) {
+ return c.retrieve(cacheKey, allowExpired, true)
+}
+
+func (c *Cache) retrieve(cacheKey string, allowExpired bool, atime bool) ([]byte, status.LookupStatus, error) {
+
+ locks.Acquire(lockPrefix + cacheKey)
+
+ var data []byte
+ err := c.dbh.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket([]byte(c.Config.BBolt.Bucket))
+ data = b.Get([]byte(cacheKey))
+ if data == nil {
+ log.Debug("bbolt cache miss", log.Pairs{"key": cacheKey})
+ _, cme := cache.ObserveCacheMiss(cacheKey, c.Name, c.Config.CacheType)
+ locks.Release(lockPrefix + cacheKey)
+ return cme
+ }
+ locks.Release(lockPrefix + cacheKey)
+ return nil
+ })
+ if err != nil {
+ locks.Release(lockPrefix + cacheKey)
+ return nil, status.LookupStatusKeyMiss, err
+ }
+
+ o, err := index.ObjectFromBytes(data)
+ if err != nil {
+ locks.Release(lockPrefix + cacheKey)
+ _, err = cache.CacheError(cacheKey, c.Name, c.Config.CacheType, "value for key [%s] could not be deserialized from cache")
+ return nil, status.LookupStatusError, err
+ }
+ o.Expiration = c.Index.GetExpiration(cacheKey)
+
+ if allowExpired || o.Expiration.IsZero() || o.Expiration.After(time.Now()) {
+ log.Debug("bbolt cache retrieve", log.Pairs{"cacheKey": cacheKey})
+ if atime {
+ c.Index.UpdateObjectAccessTime(cacheKey)
+ }
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "get", "hit", float64(len(data)))
+ locks.Release(lockPrefix + cacheKey)
+ return o.Value, status.LookupStatusHit, nil
+ }
+ // Cache Object has been expired but not reaped, go ahead and delete it
+ c.remove(cacheKey, false)
+ b, err := cache.ObserveCacheMiss(cacheKey, c.Name, c.Config.CacheType)
+ locks.Release(lockPrefix + cacheKey)
+
+ return b, status.LookupStatusKeyMiss, err
+}
+
+// SetTTL updates the TTL for the provided cache object
+func (c *Cache) SetTTL(cacheKey string, ttl time.Duration) {
+ locks.Acquire(lockPrefix + cacheKey)
+ c.Index.UpdateObjectTTL(cacheKey, ttl)
+ locks.Release(lockPrefix + cacheKey)
+}
+
+// Remove removes an object in cache, if present
+func (c *Cache) Remove(cacheKey string) {
+ locks.Acquire(lockPrefix + cacheKey)
+ c.remove(cacheKey, false)
+ locks.Release(lockPrefix + cacheKey)
+}
+
+func (c *Cache) remove(cacheKey string, noLock bool) error {
+
+ err := c.dbh.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket([]byte(c.Config.BBolt.Bucket))
+ return b.Delete([]byte(cacheKey))
+ })
+ if err != nil {
+ log.Error("bbolt cache key delete failure", log.Pairs{"cacheKey": cacheKey, "reason": err.Error()})
+ return err
+ }
+ c.Index.RemoveObject(cacheKey, noLock)
+ cache.ObserveCacheDel(c.Name, c.Config.CacheType, 0)
+ log.Debug("bbolt cache key delete", log.Pairs{"key": cacheKey})
+ return nil
+}
+
+// BulkRemove removes a list of objects from the cache
+func (c *Cache) BulkRemove(cacheKeys []string, noLock bool) {
+ for _, cacheKey := range cacheKeys {
+ c.remove(cacheKey, noLock)
+ }
+}
+
+// Close closes the Cache
+func (c *Cache) Close() error {
+ return c.dbh.Close()
+}
diff --git a/internal/cache/bbolt/bbolt_test.go b/internal/cache/bbolt/bbolt_test.go
new file mode 100644
index 000000000..f6037d416
--- /dev/null
+++ b/internal/cache/bbolt/bbolt_test.go
@@ -0,0 +1,581 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package bbolt
+
+import (
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/util/log"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+func init() {
+ metrics.Init()
+}
+
+const cacheType = "bbolt"
+const cacheKey = "cacheKey"
+
+func newCacheConfig() config.CachingConfig {
+ const testDbPath = "/tmp/test.db"
+ os.Remove(testDbPath)
+ return config.CachingConfig{CacheType: cacheType, BBolt: config.BBoltCacheConfig{Filename: testDbPath, Bucket: "trickster_test"}, Index: config.CacheIndexConfig{ReapInterval: time.Second}}
+}
+
+func storeBenchmark(b *testing.B) Cache {
+ log.Logger = log.ConsoleLogger("none")
+ testDbPath := "/tmp/test.db"
+ os.Remove(testDbPath)
+ cacheConfig := config.CachingConfig{CacheType: cacheType, BBolt: config.BBoltCacheConfig{Filename: testDbPath, Bucket: "trickster_test"}, Index: config.CacheIndexConfig{ReapInterval: time.Second}}
+ bc := Cache{Config: &cacheConfig}
+ defer os.RemoveAll(cacheConfig.BBolt.Filename)
+
+ err := bc.Connect()
+ if err != nil {
+ b.Error(err)
+ }
+
+ // it should store a value
+ for n := 0; n < b.N; n++ {
+ err = bc.Store(cacheKey+strconv.Itoa(n), []byte("data"+strconv.Itoa(n)), time.Duration(60)*time.Second)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+ return bc
+}
+
+func TestConfiguration(t *testing.T) {
+ cacheConfig := newCacheConfig()
+ bc := Cache{Config: &cacheConfig}
+ cfg := bc.Configuration()
+ if cfg.CacheType != cacheType {
+ t.Errorf("expected %s got %s", cacheType, cfg.CacheType)
+ }
+}
+
+func TestBboltCache_Connect(t *testing.T) {
+ cacheConfig := newCacheConfig()
+ defer os.RemoveAll(cacheConfig.BBolt.Filename)
+ bc := Cache{Config: &cacheConfig}
+ // it should connect
+ err := bc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ bc.Close()
+}
+
+func TestBboltCache_ConnectFailed(t *testing.T) {
+ const expected = `open /root/noaccess.bbolt:`
+ cacheConfig := newCacheConfig()
+ cacheConfig.BBolt.Filename = "/root/noaccess.bbolt"
+ bc := Cache{Config: &cacheConfig}
+ // it should connect
+ err := bc.Connect()
+ if err == nil {
+ t.Errorf("expected error for %s", expected)
+ bc.Close()
+ defer os.RemoveAll(cacheConfig.BBolt.Filename)
+ }
+ if !strings.HasPrefix(err.Error(), expected) {
+ t.Errorf("expected error '%s' got '%s'", expected, err.Error())
+ }
+}
+
+func TestBboltCache_ConnectBadBucketName(t *testing.T) {
+ const expected = `create bucket: bucket name required`
+ cacheConfig := newCacheConfig()
+ cacheConfig.BBolt.Bucket = ""
+ defer os.RemoveAll(cacheConfig.BBolt.Filename)
+ bc := Cache{Config: &cacheConfig}
+ // it should connect
+ err := bc.Connect()
+ if err == nil {
+ t.Errorf("expected error for %s", expected)
+ bc.Close()
+ }
+ if err.Error() != expected {
+ t.Errorf("expected error '%s' got '%s'", expected, err.Error())
+ }
+}
+
+func TestBboltCache_Store(t *testing.T) {
+
+ cacheConfig := newCacheConfig()
+ bc := Cache{Config: &cacheConfig}
+ defer os.RemoveAll(cacheConfig.BBolt.Filename)
+
+ err := bc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer bc.Close()
+
+ // it should store a value
+ err = bc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkCache_Store(b *testing.B) {
+ bc := storeBenchmark(b)
+ defer bc.Close()
+}
+
+func TestBboltCache_SetTTL(t *testing.T) {
+
+ cacheConfig := newCacheConfig()
+ bc := Cache{Config: &cacheConfig}
+ defer os.RemoveAll(cacheConfig.BBolt.Filename)
+
+ err := bc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer bc.Close()
+
+ exp1 := bc.Index.GetExpiration(cacheKey)
+ if !exp1.IsZero() {
+ t.Errorf("expected Zero time, got %v", exp1)
+ }
+
+ // it should store a value
+ err = bc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ exp1 = bc.Index.GetExpiration(cacheKey)
+ if exp1.IsZero() {
+ t.Errorf("expected time %d, got zero", int(time.Now().Unix())+60)
+ }
+
+ e1 := int(exp1.Unix())
+
+ bc.SetTTL(cacheKey, time.Duration(3600)*time.Second)
+
+ exp2 := bc.Index.GetExpiration(cacheKey)
+ if exp2.IsZero() {
+ t.Errorf("expected time %d, got zero", int(time.Now().Unix())+3600)
+ }
+ e2 := int(exp2.Unix())
+
+ // should be around 3595
+ diff := e2 - e1
+ const expected = 3500
+
+ if diff < expected {
+ t.Errorf("expected diff >= %d, got %d from: %d - %d", expected, diff, e2, e1)
+ }
+}
+
+func BenchmarkCache_SetTTL(b *testing.B) {
+ bc := storeBenchmark(b)
+ defer bc.Close()
+ for n := 0; n < b.N; n++ {
+ exp1 := bc.Index.GetExpiration(cacheKey + strconv.Itoa(n))
+ if exp1.IsZero() {
+ b.Errorf("expected time %d, got zero", int(time.Now().Unix())+60)
+ }
+
+ e1 := int(exp1.Unix())
+
+ bc.SetTTL(cacheKey+strconv.Itoa(n), time.Duration(3600)*time.Second)
+
+ exp2 := bc.Index.GetExpiration(cacheKey + strconv.Itoa(n))
+ if exp2.IsZero() {
+ b.Errorf("expected time %d, got zero", int(time.Now().Unix())+3600)
+ }
+ e2 := int(exp2.Unix())
+
+ // should be around 3595
+ diff := e2 - e1
+ const expected = 3500
+
+ if diff < expected {
+ b.Errorf("expected diff >= %d, got %d from: %d - %d", expected, diff, e2, e1)
+ }
+ }
+}
+
+func TestBboltCache_StoreNoIndex(t *testing.T) {
+
+ const expected = `value for key [] not in cache`
+
+ cacheConfig := newCacheConfig()
+ bc := Cache{Config: &cacheConfig}
+
+ err := bc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer bc.Close()
+
+ // it should store a value
+ bc.storeNoIndex(cacheKey, []byte("data"))
+
+ // it should retrieve a value
+ data, ls, err := bc.retrieve(cacheKey, false, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+
+ // test for error when bad key name
+ bc.storeNoIndex("", []byte("data"))
+
+ data, ls, err = bc.retrieve("", false, false)
+ if err == nil {
+ t.Errorf("expected error for %s", expected)
+ bc.Close()
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ if err.Error() != expected {
+ t.Errorf("expected error '%s' got '%s'", expected, err.Error())
+ }
+ if string(data) != "" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+}
+
+func BenchmarkCache_StoreNoIndex(b *testing.B) {
+ bc := storeBenchmark(b)
+ defer bc.Close()
+ for n := 0; n < b.N; n++ {
+ expected := `value for key [] not in cache`
+ // it should store a value
+ bc.storeNoIndex(cacheKey+strconv.Itoa(n), []byte("data"+strconv.Itoa(n)))
+
+ // it should retrieve a value
+ data, ls, err := bc.retrieve(cacheKey+strconv.Itoa(n), false, false)
+ if err != nil {
+ b.Error(err)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+ if string(data) != "data"+strconv.Itoa(n) {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+
+ // test for error when bad key name
+ bc.storeNoIndex("", []byte("data"+strconv.Itoa(n)))
+
+ data, ls, err = bc.retrieve("", false, false)
+ if err == nil {
+ b.Errorf("expected error for %s", expected)
+ bc.Close()
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ if err.Error() != expected {
+ b.Errorf("expected error '%s' got '%s'", expected, err.Error())
+ }
+ if string(data) != "" {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data"+strconv.Itoa(n), data)
+ }
+ }
+}
+
+func TestBboltCache_Remove(t *testing.T) {
+
+ cacheConfig := newCacheConfig()
+ bc := Cache{Config: &cacheConfig}
+ defer os.RemoveAll(cacheConfig.BBolt.Filename)
+
+ err := bc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer bc.Close()
+
+ // it should store a value
+ err = bc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := bc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+
+ bc.Remove(cacheKey)
+
+ // it should be a cache miss
+ _, ls, err = bc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+}
+
+func BenchmarkCache_Remove(b *testing.B) {
+ bc := storeBenchmark(b)
+ defer bc.Close()
+ for n := 0; n < b.N; n++ {
+ var data []byte
+ data, ls, err := bc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err != nil {
+ b.Error(err)
+ }
+ if string(data) != "data"+strconv.Itoa(n) {
+ b.Errorf("wanted \"%s\". got \"%s\"", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ bc.Remove(cacheKey + strconv.Itoa(n))
+
+ // this should now return error
+ data, ls, err = bc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ expectederr := `value for key [` + cacheKey + strconv.Itoa(n) + `] not in cache`
+ if err == nil {
+ b.Errorf("expected error for %s", expectederr)
+ bc.Close()
+ }
+ if err.Error() != expectederr {
+ b.Errorf("expected error '%s' got '%s'", expectederr, err.Error())
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ if string(data) != "" {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ }
+}
+
+func TestBboltCache_BulkRemove(t *testing.T) {
+
+ cacheConfig := newCacheConfig()
+ bc := Cache{Config: &cacheConfig}
+ defer os.RemoveAll(cacheConfig.BBolt.Filename)
+
+ err := bc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer bc.Close()
+
+ // it should store a value
+ err = bc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := bc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+ bc.BulkRemove([]string{cacheKey}, true)
+
+ // it should be a cache miss
+ _, ls, err = bc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+}
+
+func BenchmarkCache_BulkRemove(b *testing.B) {
+ bc := storeBenchmark(b)
+ defer bc.Close()
+
+ var keyArray []string
+ for n := 0; n < b.N; n++ {
+ keyArray = append(keyArray, cacheKey+strconv.Itoa(n))
+ }
+
+ bc.BulkRemove(keyArray, true)
+
+ // it should be a cache miss
+ for n := 0; n < b.N; n++ {
+ _, ls, err := bc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err == nil {
+ b.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ }
+}
+
+func TestBboltCache_Retrieve(t *testing.T) {
+
+ const expected1 = `value for key [cacheKey] not in cache`
+ const expected2 = `value for key [cacheKey] could not be deserialized from cache`
+
+ cacheConfig := newCacheConfig()
+ bc := Cache{Config: &cacheConfig}
+ defer os.RemoveAll(cacheConfig.BBolt.Filename)
+
+ err := bc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer bc.Close()
+
+ err = bc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := bc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ // expire the object
+ bc.SetTTL(cacheKey, -1*time.Hour)
+
+ // this should now return error
+ data, ls, err = bc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected error for %s", expected1)
+ bc.Close()
+ }
+ if err.Error() != expected1 {
+ t.Errorf("expected error '%s' got '%s'", expected1, err.Error())
+ }
+ if string(data) != "" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+ // create a corrupted cache entry and expect an error
+ writeToBBolt(bc.dbh, cacheConfig.BBolt.Bucket, cacheKey, []byte("asdasdfasf"))
+
+ // it should fail to retrieve a value
+ data, ls, err = bc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected error for %s", expected2)
+ bc.Close()
+ }
+ if err.Error() != expected2 {
+ t.Errorf("expected error '%s' got '%s'", expected2, err.Error())
+ }
+ if string(data) != "" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusError {
+ t.Errorf("expected %s got %s", status.LookupStatusError, ls)
+ }
+}
+
+func BenchmarkCache_Retrieve(b *testing.B) {
+ bc := storeBenchmark(b)
+ defer bc.Close()
+
+ for n := 0; n < b.N; n++ {
+ expected1 := `value for key [` + cacheKey + strconv.Itoa(n) + `] not in cache`
+ expected2 := `value for key [` + cacheKey + strconv.Itoa(n) + `] could not be deserialized from cache`
+
+ data, ls, err := bc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err != nil {
+ b.Error(err)
+ }
+ if string(data) != "data"+strconv.Itoa(n) {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ // expire the object
+ bc.SetTTL(cacheKey+strconv.Itoa(n), -1*time.Hour)
+
+ // this should now return error
+ data, ls, err = bc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err == nil {
+ b.Errorf("expected error for %s", expected1)
+ bc.Close()
+ }
+ if err.Error() != expected1 {
+ b.Errorf("expected error '%s' got '%s'", expected1, err.Error())
+ }
+ if string(data) != "" {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+ // create a corrupted cache entry and expect an error
+ writeToBBolt(bc.dbh, bc.Config.BBolt.Bucket, cacheKey+strconv.Itoa(n), []byte("asdasdfasf"+strconv.Itoa(n)))
+
+ // it should fail to retrieve a value
+ data, ls, err = bc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err == nil {
+ b.Errorf("expected error for %s", expected2)
+ bc.Close()
+ }
+ if err.Error() != expected2 {
+ b.Errorf("expected error '%s' got '%s'", expected2, err.Error())
+ }
+ if string(data) != "" {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ }
+}
diff --git a/internal/cache/cache.go b/internal/cache/cache.go
new file mode 100644
index 000000000..67b725f59
--- /dev/null
+++ b/internal/cache/cache.go
@@ -0,0 +1,97 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package cache
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+// ErrKNF represents the error "key not found in cache"
+var ErrKNF = errors.New("key not found in cache")
+
+// Cache is the interface for the supported caching fabrics
+// When making new cache types, Retrieve() must return an error on cache miss
+type Cache interface {
+ Connect() error
+ Store(cacheKey string, data []byte, ttl time.Duration) error
+ Retrieve(cacheKey string, allowExpired bool) ([]byte, status.LookupStatus, error)
+ SetTTL(cacheKey string, ttl time.Duration)
+ Remove(cacheKey string)
+ BulkRemove(cacheKeys []string, noLock bool)
+ Close() error
+ Configuration() *config.CachingConfig
+}
+
+// MemoryCache is the interface for an in-memory cache
+// This offers an additional method for storing references to bypass serialization
+type MemoryCache interface {
+ Connect() error
+ Store(cacheKey string, data []byte, ttl time.Duration) error
+ Retrieve(cacheKey string, allowExpired bool) ([]byte, status.LookupStatus, error)
+ SetTTL(cacheKey string, ttl time.Duration)
+ Remove(cacheKey string)
+ BulkRemove(cacheKeys []string, noLock bool)
+ Close() error
+ Configuration() *config.CachingConfig
+ StoreReference(cacheKey string, data ReferenceObject, ttl time.Duration) error
+ RetrieveReference(cacheKey string, allowExpired bool) (interface{}, status.LookupStatus, error)
+}
+
+// ReferenceObject defines an interface for a cache object possessing the ability to report
+// the approximate comprehensive byte size of its members, to assist with cache size management
+type ReferenceObject interface {
+ Size() int
+}
+
+// ObserveCacheMiss returns a standard Cache Miss response
+func ObserveCacheMiss(cacheKey, cacheName, cacheType string) ([]byte, error) {
+ ObserveCacheOperation(cacheName, cacheType, "get", "miss", 0)
+ return nil, fmt.Errorf("value for key [%s] not in cache", cacheKey)
+}
+
+// ObserveCacheDel records a cache deletion event
+func ObserveCacheDel(cache, cacheType string, count float64) {
+ ObserveCacheOperation(cache, cacheType, "del", "none", count)
+}
+
+// CacheError returns an empty cache object and the formatted error
+func CacheError(cacheKey, cacheName, cacheType string, msg string) ([]byte, error) {
+ ObserveCacheEvent(cacheName, cacheType, "error", msg)
+ return nil, fmt.Errorf(msg, cacheKey)
+}
+
+// ObserveCacheOperation increments counters as cache operations occur
+func ObserveCacheOperation(cache, cacheType, operation, status string, bytes float64) {
+ metrics.CacheObjectOperations.WithLabelValues(cache, cacheType, operation, status).Inc()
+ if bytes > 0 {
+ metrics.CacheByteOperations.WithLabelValues(cache, cacheType, operation, status).Add(float64(bytes))
+ }
+}
+
+// ObserveCacheEvent increments counters as cache events occur
+func ObserveCacheEvent(cache, cacheType, event, reason string) {
+ metrics.CacheEvents.WithLabelValues(cache, cacheType, event, reason).Inc()
+}
+
+// ObserveCacheSizeChange adjust counters and gauges as the cache size changes due to object operations
+func ObserveCacheSizeChange(cache, cacheType string, byteCount, objectCount int64) {
+ metrics.CacheObjects.WithLabelValues(cache, cacheType).Set(float64(objectCount))
+ metrics.CacheBytes.WithLabelValues(cache, cacheType).Set(float64(byteCount))
+}
diff --git a/internal/cache/cache_test.go b/internal/cache/cache_test.go
new file mode 100644
index 000000000..c7cb22183
--- /dev/null
+++ b/internal/cache/cache_test.go
@@ -0,0 +1,66 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package cache
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+var testCacheKey, testCacheName, testCacheType string
+
+func init() {
+
+ testCacheKey = "test-key"
+ testCacheName = "test-cache"
+ testCacheType = "test"
+
+ metrics.Init()
+
+}
+
+func TestObserveCacheMiss(t *testing.T) {
+ e1 := fmt.Errorf("value for key [%s] not in cache", testCacheKey)
+ _, err := ObserveCacheMiss(testCacheKey, testCacheName, testCacheType)
+ if err.Error() != e1.Error() {
+ t.Error(err)
+ }
+}
+
+// ObserveCacheDel records a cache deletion event
+func TestObserveCacheDel(t *testing.T) {
+ ObserveCacheDel(testCacheName, testCacheType, 0)
+}
+
+func TestCacheError(t *testing.T) {
+ _, err := CacheError(testCacheKey, testCacheName, testCacheType, "%s")
+ if err.Error() != testCacheKey {
+ t.Errorf("expected %s got %s", testCacheKey, err.Error())
+ }
+}
+
+func TestObserveCacheOperation(t *testing.T) {
+ ObserveCacheOperation(testCacheName, testCacheType, "set", "ok", 0)
+ ObserveCacheOperation(testCacheName, testCacheType, "set", "ok", 1)
+}
+
+func TestObserveCacheEvent(t *testing.T) {
+ ObserveCacheEvent(testCacheName, testCacheType, "test", "test")
+}
+
+func TestObserveCacheSizeChange(t *testing.T) {
+ ObserveCacheSizeChange(testCacheName, testCacheType, 0, 0)
+}
diff --git a/internal/cache/filesystem/filesystem.go b/internal/cache/filesystem/filesystem.go
new file mode 100644
index 000000000..93d45e88d
--- /dev/null
+++ b/internal/cache/filesystem/filesystem.go
@@ -0,0 +1,197 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package filesystem
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/cache/index"
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/pkg/locks"
+)
+
+var lockPrefix string
+
+// Cache describes a Filesystem Cache
+type Cache struct {
+ Name string
+ Config *config.CachingConfig
+ Index *index.Index
+}
+
+// Configuration returns the Configuration for the Cache object
+func (c *Cache) Configuration() *config.CachingConfig {
+ return c.Config
+}
+
+// Connect instantiates the Cache mutex map and starts the Expired Entry Reaper goroutine
+func (c *Cache) Connect() error {
+ log.Info("filesystem cache setup", log.Pairs{"name": c.Name, "cachePath": c.Config.Filesystem.CachePath})
+ if err := makeDirectory(c.Config.Filesystem.CachePath); err != nil {
+ return err
+ }
+ lockPrefix = c.Name + ".file."
+
+ // Load Index here and pass bytes as param2
+ indexData, _, _ := c.retrieve(index.IndexKey, false, false)
+ c.Index = index.NewIndex(c.Name, c.Config.CacheType, indexData, c.Config.Index, c.BulkRemove, c.storeNoIndex)
+ return nil
+}
+
+// Store places an object in the cache using the specified key and ttl
+func (c *Cache) Store(cacheKey string, data []byte, ttl time.Duration) error {
+ return c.store(cacheKey, data, ttl, true)
+}
+
+func (c *Cache) storeNoIndex(cacheKey string, data []byte) {
+ err := c.store(cacheKey, data, 31536000*time.Second, false)
+ if err != nil {
+ log.Error("cache failed to write non-indexed object", log.Pairs{"cacheName": c.Name, "cacheType": "filesystem", "cacheKey": cacheKey, "objectSize": len(data)})
+ }
+}
+
+func (c *Cache) store(cacheKey string, data []byte, ttl time.Duration, updateIndex bool) error {
+
+ if ttl < 1 {
+ return fmt.Errorf("invalid ttl: %d", int64(ttl.Seconds()))
+ }
+
+ if cacheKey == "" {
+ return fmt.Errorf("cacheKey required")
+ }
+
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "set", "none", float64(len(data)))
+
+ dataFile := c.getFileName(cacheKey)
+
+ locks.Acquire(lockPrefix + cacheKey)
+
+ o := &index.Object{Key: cacheKey, Value: data, Expiration: time.Now().Add(ttl)}
+ err := ioutil.WriteFile(dataFile, o.ToBytes(), os.FileMode(0777))
+ if err != nil {
+ locks.Release(lockPrefix + cacheKey)
+ return err
+ }
+ log.Debug("filesystem cache store", log.Pairs{"key": cacheKey, "dataFile": dataFile, "indexed": updateIndex})
+ if updateIndex {
+ c.Index.UpdateObject(o)
+ }
+ locks.Release(lockPrefix + cacheKey)
+ return nil
+
+}
+
+// Retrieve looks for an object in cache and returns it (or an error if not found)
+func (c *Cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.LookupStatus, error) {
+ return c.retrieve(cacheKey, allowExpired, true)
+}
+
+func (c *Cache) retrieve(cacheKey string, allowExpired bool, atime bool) ([]byte, status.LookupStatus, error) {
+
+ dataFile := c.getFileName(cacheKey)
+
+ locks.Acquire(lockPrefix + cacheKey)
+
+ data, err := ioutil.ReadFile(dataFile)
+ if err != nil {
+ log.Debug("filesystem cache miss", log.Pairs{"key": cacheKey, "dataFile": dataFile})
+ b, err2 := cache.ObserveCacheMiss(cacheKey, c.Name, c.Config.CacheType)
+ locks.Release(lockPrefix + cacheKey)
+ return b, status.LookupStatusKeyMiss, err2
+ }
+
+ o, err := index.ObjectFromBytes(data)
+ if err != nil {
+ locks.Release(lockPrefix + cacheKey)
+ _, err2 := cache.CacheError(cacheKey, c.Name, c.Config.CacheType, "value for key [%s] could not be deserialized from cache")
+ return nil, status.LookupStatusError, err2
+ }
+ o.Expiration = c.Index.GetExpiration(cacheKey)
+ if allowExpired || o.Expiration.IsZero() || o.Expiration.After(time.Now()) {
+ log.Debug("filesystem cache retrieve", log.Pairs{"key": cacheKey, "dataFile": dataFile})
+ if atime {
+ c.Index.UpdateObjectAccessTime(cacheKey)
+ }
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "get", "hit", float64(len(data)))
+ locks.Release(lockPrefix + cacheKey)
+ return o.Value, status.LookupStatusHit, nil
+ }
+ // Cache Object has been expired but not reaped, go ahead and delete it
+ c.remove(cacheKey, false)
+ b, err := cache.ObserveCacheMiss(cacheKey, c.Name, c.Config.CacheType)
+ locks.Release(lockPrefix + cacheKey)
+ return b, status.LookupStatusKeyMiss, err
+
+}
+
+// SetTTL updates the TTL for the provided cache object
+func (c *Cache) SetTTL(cacheKey string, ttl time.Duration) {
+ c.Index.UpdateObjectTTL(cacheKey, ttl)
+}
+
+// Remove removes an object from the cache
+func (c *Cache) Remove(cacheKey string) {
+ locks.Acquire(lockPrefix + cacheKey)
+ c.remove(cacheKey, false)
+ locks.Release(lockPrefix + cacheKey)
+}
+
+func (c *Cache) remove(cacheKey string, noLock bool) {
+
+ if err := os.Remove(c.getFileName(cacheKey)); err == nil {
+ c.Index.RemoveObject(cacheKey, noLock)
+ }
+ cache.ObserveCacheDel(c.Name, c.Config.CacheType, 0)
+}
+
+// BulkRemove removes a list of objects from the cache
+func (c *Cache) BulkRemove(cacheKeys []string, noLock bool) {
+ for _, cacheKey := range cacheKeys {
+ c.Remove(cacheKey)
+ }
+}
+
+// Close is not used for Cache
+func (c *Cache) Close() error {
+ return nil
+}
+
+func (c *Cache) getFileName(cacheKey string) string {
+ prefix := strings.Replace(c.Config.Filesystem.CachePath+"/"+cacheKey+".", "//", "/", 1)
+ return prefix + "data"
+}
+
+// writeable returns true if the path is writeable by the calling process.
+func writeable(path string) bool {
+ return unix.Access(path, unix.W_OK) == nil
+}
+
+// makeDirectory creates a directory on the filesystem and exits the application in the event of a failure.
+func makeDirectory(path string) error {
+ err := os.MkdirAll(path, 0755)
+ if err != nil || !writeable(path) {
+ return fmt.Errorf("[%s] directory is not writeable by trickster: %v", path, err)
+ }
+
+ return nil
+}
diff --git a/internal/cache/filesystem/filesystem_test.go b/internal/cache/filesystem/filesystem_test.go
new file mode 100644
index 000000000..aa636ffd9
--- /dev/null
+++ b/internal/cache/filesystem/filesystem_test.go
@@ -0,0 +1,588 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package filesystem
+
+import (
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/util/log"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+func init() {
+ metrics.Init()
+}
+
+const cacheType = "filesystem"
+const cacheKey = "cacheKey"
+
+func storeBenchmark(b *testing.B) Cache {
+ log.Logger = log.ConsoleLogger("none")
+ dir, _ := ioutil.TempDir("/tmp", cacheType)
+ cacheConfig := config.CachingConfig{CacheType: cacheType, Filesystem: config.FilesystemCacheConfig{CachePath: dir}, Index: config.CacheIndexConfig{ReapInterval: time.Second}}
+ fc := Cache{Config: &cacheConfig}
+ defer os.RemoveAll(cacheConfig.BBolt.Filename)
+
+ err := fc.Connect()
+ if err != nil {
+ b.Error(err)
+ }
+
+ // it should store a value
+ for n := 0; n < b.N; n++ {
+ err = fc.Store(cacheKey+strconv.Itoa(n), []byte("data"+strconv.Itoa(n)), time.Duration(60)*time.Second)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+ return fc
+}
+
+func newCacheConfig(t *testing.T) config.CachingConfig {
+ dir, err := ioutil.TempDir("/tmp", cacheType)
+ if err != nil {
+ t.Fatalf("could not create temp directory (%s): %s", dir, err)
+ }
+ return config.CachingConfig{CacheType: cacheType, Filesystem: config.FilesystemCacheConfig{CachePath: dir}, Index: config.CacheIndexConfig{ReapInterval: time.Second}}
+}
+
+func TestConfiguration(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Filesystem.CachePath)
+ fc := Cache{Config: &cacheConfig}
+ cfg := fc.Configuration()
+ if cfg.CacheType != cacheType {
+ t.Fatalf("expected %s got %s", cacheType, cfg.CacheType)
+ }
+}
+
+func TestFilesystemCache_Connect(t *testing.T) {
+
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Filesystem.CachePath)
+ fc := Cache{Config: &cacheConfig}
+
+ // it should connect
+ err := fc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestFilesystemCache_ConnectFailed(t *testing.T) {
+ const expected = `[/root/noaccess.trickster.filesystem.cache] directory is not writeable by trickster:`
+ cacheConfig := newCacheConfig(t)
+ cacheConfig.Filesystem.CachePath = "/root/noaccess.trickster.filesystem.cache"
+ fc := Cache{Config: &cacheConfig}
+ // it should connect
+ err := fc.Connect()
+ if err == nil {
+ t.Errorf("expected error for %s", expected)
+ fc.Close()
+ }
+ if !strings.HasPrefix(err.Error(), expected) {
+ t.Errorf("expected error '%s' got '%s'", expected, err.Error())
+ }
+}
+
+func TestFilesystemCache_Store(t *testing.T) {
+
+ const expected1 = "invalid ttl: -1"
+ const expected2 = "open /root/noaccess.trickster.filesystem.cache/cacheKey.data:"
+
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Filesystem.CachePath)
+ fc := Cache{Config: &cacheConfig}
+
+ err := fc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should store a value
+ err = fc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should return an error
+ err = fc.Store(cacheKey, []byte("data"), time.Duration(-1)*time.Second)
+ if err == nil {
+ t.Errorf("expected error for %s", expected1)
+ }
+ if err.Error() != expected1 {
+ t.Errorf("expected error '%s' got '%s'", expected1, err.Error())
+ }
+
+ cacheConfig.Filesystem.CachePath = "/root/noaccess.trickster.filesystem.cache"
+ // it should return an error
+ err = fc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err == nil {
+ t.Errorf("expected error for %s", expected2)
+ }
+ if !strings.HasPrefix(err.Error(), expected2) {
+ t.Errorf("expected error '%s' got '%s'", expected2, err.Error())
+ }
+
+}
+
+func BenchmarkCache_Store(b *testing.B) {
+ fc := storeBenchmark(b)
+ defer fc.Close()
+}
+
+func TestFilesystemCache_StoreNoIndex(t *testing.T) {
+
+ const expected = "value for key [] not in cache"
+
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Filesystem.CachePath)
+ fc := Cache{Config: &cacheConfig}
+
+ err := fc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer fc.Close()
+
+ // it should store a value
+ fc.storeNoIndex(cacheKey, []byte("data"))
+
+ // it should retrieve a value
+ data, ls, err := fc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ // test for error when bad key name
+ fc.storeNoIndex("", []byte("data"))
+
+ data, ls, err = fc.retrieve("", false, false)
+ if err == nil {
+ t.Errorf("expected error for %s", expected)
+ fc.Close()
+ }
+ if err.Error() != expected {
+ t.Errorf("expected error '%s' got '%s'", expected, err.Error())
+ }
+ if string(data) != "" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+}
+
+func BenchmarkCache_StoreNoIndex(b *testing.B) {
+ fc := storeBenchmark(b)
+ defer fc.Close()
+ for n := 0; n < b.N; n++ {
+ expected := `value for key [] not in cache`
+ // it should store a value
+ fc.storeNoIndex(cacheKey+strconv.Itoa(n), []byte("data"+strconv.Itoa(n)))
+
+ // it should retrieve a value
+ data, ls, err := fc.retrieve(cacheKey+strconv.Itoa(n), false, false)
+ if err != nil {
+ b.Error(err)
+ }
+ if string(data) != "data"+strconv.Itoa(n) {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+ // test for error when bad key name
+ fc.storeNoIndex("", []byte("data"+strconv.Itoa(n)))
+
+ data, ls, err = fc.retrieve("", false, false)
+ if err == nil {
+ b.Errorf("expected error for %s", expected)
+ fc.Close()
+ }
+ if err.Error() != expected {
+ b.Errorf("expected error '%s' got '%s'", expected, err.Error())
+ }
+ if string(data) != "" {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ }
+}
+
+func TestFilesystemCache_SetTTL(t *testing.T) {
+
+ cacheConfig := newCacheConfig(t)
+ fc := Cache{Config: &cacheConfig}
+ defer os.RemoveAll(cacheConfig.Filesystem.CachePath)
+
+ err := fc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer fc.Close()
+
+ exp1 := fc.Index.GetExpiration(cacheKey)
+ if !exp1.IsZero() {
+ t.Errorf("expected Zero time, got %v", exp1)
+ }
+
+ // it should store a value
+ err = fc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ exp1 = fc.Index.GetExpiration(cacheKey)
+ if exp1.IsZero() {
+ t.Errorf("expected time %d, got zero", int(time.Now().Unix())+60)
+ }
+
+ e1 := int(exp1.Unix())
+
+ fc.SetTTL(cacheKey, time.Duration(3600)*time.Second)
+
+ exp2 := fc.Index.GetExpiration(cacheKey)
+ if exp2.IsZero() {
+ t.Errorf("expected time %d, got zero", int(time.Now().Unix())+3600)
+ }
+ e2 := int(exp2.Unix())
+
+ // should be around 3595
+ diff := e2 - e1
+ const expected = 3500
+
+ if diff < expected {
+ t.Errorf("expected diff >= %d, got %d from: %d - %d", expected, diff, e2, e1)
+ }
+
+}
+
+func BenchmarkCache_SetTTL(b *testing.B) {
+ fc := storeBenchmark(b)
+ defer fc.Close()
+ for n := 0; n < b.N; n++ {
+ exp1 := fc.Index.GetExpiration(cacheKey + strconv.Itoa(n))
+ if exp1.IsZero() {
+ b.Errorf("expected time %d, got zero", int(time.Now().Unix())+60)
+ }
+
+ e1 := int(exp1.Unix())
+
+ fc.SetTTL(cacheKey+strconv.Itoa(n), time.Duration(3600)*time.Second)
+
+ exp2 := fc.Index.GetExpiration(cacheKey + strconv.Itoa(n))
+ if exp2.IsZero() {
+ b.Errorf("expected time %d, got zero", int(time.Now().Unix())+3600)
+ }
+ e2 := int(exp2.Unix())
+
+ // should be around 3595
+ diff := e2 - e1
+ const expected = 3500
+
+ if diff < expected {
+ b.Errorf("expected diff >= %d, got %d from: %d - %d", expected, diff, e2, e1)
+ }
+ }
+}
+
+func TestFilesystemCache_Retrieve(t *testing.T) {
+
+ const expected1 = `value for key [cacheKey] not in cache`
+ const expected2 = `value for key [cacheKey] could not be deserialized from cache`
+
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Filesystem.CachePath)
+ fc := Cache{Config: &cacheConfig}
+
+ err := fc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ err = fc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := fc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ // expire the object
+ fc.SetTTL(cacheKey, -1*time.Hour)
+
+ // this should now return error
+ data, ls, err = fc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected error for %s", expected1)
+ fc.Close()
+ }
+ if err.Error() != expected1 {
+ t.Errorf("expected error '%s' got '%s'", expected1, err.Error())
+ }
+ if string(data) != "" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+ // should fail
+ filename := fc.getFileName(cacheKey)
+ err = ioutil.WriteFile(filename, []byte("junk"), os.FileMode(0777))
+ if err != nil {
+ t.Error(err)
+ }
+ _, ls, err = fc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected error for %s", expected2)
+ }
+ if err.Error() != expected2 {
+ t.Errorf("expected error '%s' got '%s'", expected2, err.Error())
+ }
+ if ls != status.LookupStatusError {
+ t.Errorf("expected %s got %s", status.LookupStatusError, ls)
+ }
+}
+
+func BenchmarkCache_Retrieve(b *testing.B) {
+ fc := storeBenchmark(b)
+ defer fc.Close()
+
+ for n := 0; n < b.N; n++ {
+ expected1 := `value for key [` + cacheKey + strconv.Itoa(n) + `] not in cache`
+ expected2 := `value for key [` + cacheKey + strconv.Itoa(n) + `] could not be deserialized from cache`
+
+ data, ls, err := fc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err != nil {
+ b.Error(err)
+ }
+ if string(data) != "data"+strconv.Itoa(n) {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ // expire the object
+ fc.SetTTL(cacheKey+strconv.Itoa(n), -1*time.Hour)
+
+ // this should now return error
+ data, ls, err = fc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err == nil {
+ b.Errorf("expected error for %s", expected1)
+ fc.Close()
+ }
+ if err.Error() != expected1 {
+ b.Errorf("expected error '%s' got '%s'", expected1, err.Error())
+ }
+ if string(data) != "" {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+ filename := fc.getFileName(cacheKey + strconv.Itoa(n))
+ // create a corrupted cache entry and expect an error
+ ioutil.WriteFile(filename, []byte("junk"), os.FileMode(0777))
+
+ // it should fail to retrieve a value
+ data, ls, err = fc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err == nil {
+ b.Errorf("expected error for %s", expected2)
+ fc.Close()
+ }
+ if err.Error() != expected2 {
+ b.Errorf("expected error '%s' got '%s'", expected2, err.Error())
+ }
+ if string(data) != "" {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ }
+}
+
+func TestFilesystemCache_Remove(t *testing.T) {
+
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Filesystem.CachePath)
+ fc := Cache{Config: &cacheConfig}
+
+ err := fc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer fc.Close()
+
+ // it should store a value
+ err = fc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := fc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ fc.Remove(cacheKey)
+
+ // it should be a cache miss
+ _, ls, err = fc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+}
+
+func BenchmarkCache_Remove(b *testing.B) {
+ fc := storeBenchmark(b)
+ defer fc.Close()
+
+ for n := 0; n < b.N; n++ {
+ var data []byte
+ data, ls, err := fc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err != nil {
+ b.Error(err)
+ }
+ if string(data) != "data"+strconv.Itoa(n) {
+ b.Errorf("wanted \"%s\". got \"%s\"", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ fc.Remove(cacheKey + strconv.Itoa(n))
+
+ // this should now return error
+ data, ls, err = fc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ expectederr := `value for key [` + cacheKey + strconv.Itoa(n) + `] not in cache`
+ if err == nil {
+ b.Errorf("expected error for %s", expectederr)
+ fc.Close()
+ }
+ if err.Error() != expectederr {
+ b.Errorf("expected error '%s' got '%s'", expectederr, err.Error())
+ }
+
+ if string(data) != "" {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ }
+}
+
+func TestFilesystemCache_BulkRemove(t *testing.T) {
+
+ cacheConfig := newCacheConfig(t)
+ defer os.RemoveAll(cacheConfig.Filesystem.CachePath)
+ fc := Cache{Config: &cacheConfig}
+
+ err := fc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer fc.Close()
+
+ // it should store a value
+ err = fc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := fc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ fc.BulkRemove([]string{cacheKey}, true)
+
+ // it should be a cache miss
+ _, ls, err = fc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+}
+
+func BenchmarkCache_BulkRemove(b *testing.B) {
+ fc := storeBenchmark(b)
+ defer fc.Close()
+
+ var keyArray []string
+ for n := 0; n < b.N; n++ {
+ keyArray = append(keyArray, cacheKey+strconv.Itoa(n))
+ }
+
+ fc.BulkRemove(keyArray, true)
+
+ // it should be a cache miss
+ for n := 0; n < b.N; n++ {
+ _, ls, err := fc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err == nil {
+ b.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ }
+}
diff --git a/internal/cache/index/index.go b/internal/cache/index/index.go
new file mode 100644
index 000000000..806f1de7a
--- /dev/null
+++ b/internal/cache/index/index.go
@@ -0,0 +1,364 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package index
+
+import (
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+//go:generate msgp
+
+// IndexKey is the key under which the index will write itself to its associated cache
+const IndexKey = "cache.index"
+
+var indexLock = sync.Mutex{}
+
+// Index maintains metadata about a Cache when Retention enforcement is managed internally,
+// like memory or bbolt. It is not used for independently managed caches like Redis.
+type Index struct {
+ // CacheSize represents the size of the cache in bytes
+ CacheSize int64 `msg:"cache_size"`
+ // ObjectCount represents the count of objects in the Cache
+ ObjectCount int64 `msg:"object_count"`
+ // Objects is a map of Objects in the Cache
+ Objects map[string]*Object `msg:"objects"`
+
+ name string `msg:"-"`
+ cacheType string `msg:"-"`
+ config config.CacheIndexConfig `msg:"-"`
+ bulkRemoveFunc func([]string, bool) `msg:"-"`
+ reapInterval time.Duration `msg:"-"`
+ flushInterval time.Duration `msg:"-"`
+ flushFunc func(cacheKey string, data []byte) `msg:"-"`
+ lastWrite time.Time `msg:"-"`
+}
+
+// ToBytes returns a serialized byte slice representing the Index
+func (idx *Index) ToBytes() []byte {
+ bytes, _ := idx.MarshalMsg(nil)
+ return bytes
+}
+
+// Object contains metadata about an item in the Cache
+type Object struct {
+ // Key represents the name of the Object and is the accessor in a hashed collection of Cache Objects
+ Key string `msg:"key"`
+ // Expiration represents the time that the Object expires from Cache
+ Expiration time.Time `msg:"expiration"`
+ // LastWrite is the time the object was last Written
+ LastWrite time.Time `msg:"lastwrite"`
+ // LastAccess is the time the object was last Accessed
+ LastAccess time.Time `msg:"lastaccess"`
+ // Size the size of the Object in bytes
+ Size int64 `msg:"size"`
+ // Value is the value of the Object stored in the Cache
+ // It is used by Caches but not by the Index
+ Value []byte `msg:"value,omitempty"`
+ // DirectValue is an interface value for storing objects by reference to a memory cache
+ // Since we'd never recover a memory cache index from memory on startup, no need to msgpk
+ ReferenceValue cache.ReferenceObject `msg:"-"`
+}
+
+// ToBytes returns a serialized byte slice representing the Object
+func (o *Object) ToBytes() []byte {
+ bytes, _ := o.MarshalMsg(nil)
+ return bytes
+}
+
+// ObjectFromBytes returns a deserialized Cache Object from a seralized byte slice
+func ObjectFromBytes(data []byte) (*Object, error) {
+ o := &Object{}
+ _, err := o.UnmarshalMsg(data)
+ return o, err
+}
+
+// NewIndex returns a new Index based on the provided inputs
+func NewIndex(cacheName, cacheType string, indexData []byte, cfg config.CacheIndexConfig, bulkRemoveFunc func([]string, bool), flushFunc func(cacheKey string, data []byte)) *Index {
+ i := &Index{}
+
+ if len(indexData) > 0 {
+ i.UnmarshalMsg(indexData)
+ } else {
+ i.Objects = make(map[string]*Object)
+ }
+
+ i.name = cacheName
+ i.cacheType = cacheType
+ i.flushInterval = cfg.FlushInterval
+ i.flushFunc = flushFunc
+ i.reapInterval = cfg.ReapInterval
+ i.bulkRemoveFunc = bulkRemoveFunc
+ i.config = cfg
+
+ if flushFunc != nil {
+ if i.flushInterval > 0 {
+ go i.flusher()
+ } else {
+ log.Warn("cache index flusher did not start", log.Pairs{"cacheName": i.name, "flushInterval": i.flushInterval})
+ }
+ }
+
+ if i.reapInterval > 0 {
+ go i.reaper()
+ } else {
+ log.Warn("cache reaper did not start", log.Pairs{"cacheName": i.name, "reapInterval": i.reapInterval})
+ }
+
+ metrics.CacheMaxObjects.WithLabelValues(cacheName, cacheType).Set(float64(cfg.MaxSizeObjects))
+ metrics.CacheMaxBytes.WithLabelValues(cacheName, cacheType).Set(float64(cfg.MaxSizeBytes))
+
+ return i
+}
+
+// UpdateObjectAccessTime updates the LastAccess for the object with the provided key
+func (idx *Index) UpdateObjectAccessTime(key string) {
+ indexLock.Lock()
+ if _, ok := idx.Objects[key]; ok {
+ idx.Objects[key].LastAccess = time.Now()
+ }
+ indexLock.Unlock()
+
+}
+
+// UpdateObjectTTL updates the Expiration for the object with the provided key
+func (idx *Index) UpdateObjectTTL(key string, ttl time.Duration) {
+ indexLock.Lock()
+ if _, ok := idx.Objects[key]; ok {
+ idx.Objects[key].Expiration = time.Now().Add(ttl)
+ }
+ indexLock.Unlock()
+}
+
+// UpdateObject writes or updates the Index Metadata for the provided Object
+func (idx *Index) UpdateObject(obj *Object) {
+
+ key := obj.Key
+ if key == "" {
+ return
+ }
+
+ indexLock.Lock()
+
+ idx.lastWrite = time.Now()
+
+ if obj.ReferenceValue != nil {
+ obj.Size = int64(obj.ReferenceValue.Size())
+ } else {
+ obj.Size = int64(len(obj.Value))
+ }
+ obj.Value = nil
+ obj.LastAccess = time.Now()
+ obj.LastWrite = obj.LastAccess
+
+ if o, ok := idx.Objects[key]; ok {
+ idx.CacheSize += o.Size - idx.Objects[key].Size
+ } else {
+ idx.CacheSize += obj.Size
+ idx.ObjectCount++
+ }
+
+ cache.ObserveCacheSizeChange(idx.name, idx.cacheType, idx.CacheSize, idx.ObjectCount)
+
+ idx.Objects[key] = obj
+ indexLock.Unlock()
+}
+
+// RemoveObject removes an Object's Metadata from the Index
+func (idx *Index) RemoveObject(key string, noLock bool) {
+
+ if !noLock {
+ indexLock.Lock()
+ idx.lastWrite = time.Now()
+ }
+ if o, ok := idx.Objects[key]; ok {
+ idx.CacheSize -= o.Size
+ idx.ObjectCount--
+
+ cache.ObserveCacheOperation(idx.name, idx.cacheType, "del", "none", float64(o.Size))
+
+ delete(idx.Objects, key)
+ cache.ObserveCacheSizeChange(idx.name, idx.cacheType, idx.CacheSize, idx.ObjectCount)
+ }
+ if !noLock {
+ indexLock.Unlock()
+ }
+
+}
+
+// GetExpiration returns the cache index's expiration for the object of the given key
+func (idx *Index) GetExpiration(cacheKey string) time.Time {
+ indexLock.Lock()
+ if o, ok := idx.Objects[cacheKey]; ok {
+ indexLock.Unlock()
+ return o.Expiration
+ }
+ indexLock.Unlock()
+ return time.Time{}
+}
+
+// flusher periodically calls the cache's index flush func that writes the cache index to disk
+func (idx *Index) flusher() {
+ var lastFlush time.Time
+ for {
+ time.Sleep(idx.flushInterval)
+ if idx.lastWrite.Before(lastFlush) {
+ continue
+ }
+ idx.flushOnce()
+ lastFlush = time.Now()
+ }
+}
+
+func (idx *Index) flushOnce() {
+ indexLock.Lock()
+ bytes, err := idx.MarshalMsg(nil)
+ indexLock.Unlock()
+ if err != nil {
+ log.Warn("unable to serialize index for flushing", log.Pairs{"cacheName": idx.name, "detail": err.Error()})
+ return
+ }
+ idx.flushFunc(IndexKey, bytes)
+}
+
+// reaper continually iterates through the cache to find expired elements and removes them
+func (idx *Index) reaper() {
+ for {
+ idx.reap()
+ time.Sleep(idx.reapInterval)
+ }
+}
+
+type objectsAtime []*Object
+
+// reap makes a single iteration through the cache index to to find and remove expired elements
+// and evict least-recently-accessed elements to maintain the Maximum allowed Cache Size
+func (idx *Index) reap() {
+
+ indexLock.Lock()
+ defer indexLock.Unlock()
+
+ removals := make([]string, 0)
+ remainders := make(objectsAtime, 0, idx.ObjectCount)
+
+ var cacheChanged bool
+
+ now := time.Now()
+
+ for _, o := range idx.Objects {
+ if o.Key == IndexKey {
+ continue
+ }
+ if o.Expiration.Before(now) && !o.Expiration.IsZero() {
+ removals = append(removals, o.Key)
+ } else {
+ remainders = append(remainders, o)
+ }
+ }
+
+ if len(removals) > 0 {
+ cache.ObserveCacheEvent(idx.name, idx.cacheType, "eviction", "ttl")
+ idx.bulkRemoveFunc(removals, true)
+ cacheChanged = true
+ }
+
+ if ((idx.config.MaxSizeBytes > 0 && idx.CacheSize > idx.config.MaxSizeBytes) || (idx.config.MaxSizeObjects > 0 && idx.ObjectCount > idx.config.MaxSizeObjects)) && len(remainders) > 0 {
+
+ var evictionType string
+ if idx.config.MaxSizeBytes > 0 && idx.CacheSize > idx.config.MaxSizeBytes {
+ evictionType = "size_bytes"
+ } else if idx.config.MaxSizeObjects > 0 && idx.ObjectCount > idx.config.MaxSizeObjects {
+ evictionType = "size_objects"
+ } else {
+ return
+ }
+
+ log.Debug("max cache size reached. evicting least-recently-accessed records",
+ log.Pairs{
+ "reason": evictionType,
+ "cacheSizeBytes": idx.CacheSize, "maxSizeBytes": idx.config.MaxSizeBytes,
+ "cacheSizeObjects": idx.ObjectCount, "maxSizeObjects": idx.config.MaxSizeObjects,
+ },
+ )
+
+ removals = make([]string, 0)
+
+ sort.Sort(remainders)
+
+ i := 0
+ j := len(remainders)
+
+ if evictionType == "size_bytes" {
+ bytesNeeded := (idx.CacheSize - idx.config.MaxSizeBytes)
+ if idx.config.MaxSizeBytes > idx.config.MaxSizeBackoffBytes {
+ bytesNeeded += idx.config.MaxSizeBackoffBytes
+ }
+ bytesSelected := int64(0)
+ for bytesSelected < bytesNeeded && i < j {
+ removals = append(removals, remainders[i].Key)
+ bytesSelected += remainders[i].Size
+ i++
+ }
+ } else {
+ objectsNeeded := (idx.ObjectCount - idx.config.MaxSizeObjects)
+ if idx.config.MaxSizeObjects > idx.config.MaxSizeBackoffObjects {
+ objectsNeeded += idx.config.MaxSizeBackoffObjects
+ }
+ objectsSelected := int64(0)
+ for objectsSelected < objectsNeeded && i < j {
+ removals = append(removals, remainders[i].Key)
+ objectsSelected++
+ i++
+ }
+ }
+
+ if len(removals) > 0 {
+ cache.ObserveCacheEvent(idx.name, idx.cacheType, "eviction", evictionType)
+ idx.bulkRemoveFunc(removals, true)
+ cacheChanged = true
+ }
+
+ log.Debug("size-based cache eviction exercise completed",
+ log.Pairs{
+ "reason": evictionType,
+ "cacheSizeBytes": idx.CacheSize, "maxSizeBytes": idx.config.MaxSizeBytes,
+ "cacheSizeObjects": idx.ObjectCount, "maxSizeObjects": idx.config.MaxSizeObjects,
+ })
+
+ }
+ if cacheChanged {
+ idx.lastWrite = time.Now()
+ }
+}
+
+// Len returns the length of an array of Prometheus model.Times
+func (o objectsAtime) Len() int {
+ return len(o)
+}
+
+// Less returns true if i comes before j
+func (o objectsAtime) Less(i, j int) bool {
+ return o[i].LastAccess.Before(o[j].LastAccess)
+}
+
+// Swap modifies an array by of Prometheus model.Times swapping the values in indexes i and j
+func (o objectsAtime) Swap(i, j int) {
+ o[i], o[j] = o[j], o[i]
+}
diff --git a/internal/cache/index/index_gen.go b/internal/cache/index/index_gen.go
new file mode 100644
index 000000000..46b6217a5
--- /dev/null
+++ b/internal/cache/index/index_gen.go
@@ -0,0 +1,467 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package index
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *Index) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "CacheSize":
+ z.CacheSize, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ case "ObjectCount":
+ z.ObjectCount, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ case "Objects":
+ var zb0002 uint32
+ zb0002, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ if z.Objects == nil {
+ z.Objects = make(map[string]*Object, zb0002)
+ } else if len(z.Objects) > 0 {
+ for key := range z.Objects {
+ delete(z.Objects, key)
+ }
+ }
+ for zb0002 > 0 {
+ zb0002--
+ var za0001 string
+ var za0002 *Object
+ za0001, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ return
+ }
+ za0002 = nil
+ } else {
+ if za0002 == nil {
+ za0002 = new(Object)
+ }
+ err = za0002.DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ }
+ z.Objects[za0001] = za0002
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *Index) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 3
+ // write "CacheSize"
+ err = en.Append(0x83, 0xa9, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z.CacheSize)
+ if err != nil {
+ return
+ }
+ // write "ObjectCount"
+ err = en.Append(0xab, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z.ObjectCount)
+ if err != nil {
+ return
+ }
+ // write "Objects"
+ err = en.Append(0xa7, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
+ if err != nil {
+ return
+ }
+ err = en.WriteMapHeader(uint32(len(z.Objects)))
+ if err != nil {
+ return
+ }
+ for za0001, za0002 := range z.Objects {
+ err = en.WriteString(za0001)
+ if err != nil {
+ return
+ }
+ if za0002 == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ err = za0002.EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *Index) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "CacheSize"
+ o = append(o, 0x83, 0xa9, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65)
+ o = msgp.AppendInt64(o, z.CacheSize)
+ // string "ObjectCount"
+ o = append(o, 0xab, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74)
+ o = msgp.AppendInt64(o, z.ObjectCount)
+ // string "Objects"
+ o = append(o, 0xa7, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
+ o = msgp.AppendMapHeader(o, uint32(len(z.Objects)))
+ for za0001, za0002 := range z.Objects {
+ o = msgp.AppendString(o, za0001)
+ if za0002 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o, err = za0002.MarshalMsg(o)
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *Index) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "CacheSize":
+ z.CacheSize, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "ObjectCount":
+ z.ObjectCount, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "Objects":
+ var zb0002 uint32
+ zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if z.Objects == nil {
+ z.Objects = make(map[string]*Object, zb0002)
+ } else if len(z.Objects) > 0 {
+ for key := range z.Objects {
+ delete(z.Objects, key)
+ }
+ }
+ for zb0002 > 0 {
+ var za0001 string
+ var za0002 *Object
+ zb0002--
+ za0001, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ za0002 = nil
+ } else {
+ if za0002 == nil {
+ za0002 = new(Object)
+ }
+ bts, err = za0002.UnmarshalMsg(bts)
+ if err != nil {
+ return
+ }
+ }
+ z.Objects[za0001] = za0002
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *Index) Msgsize() (s int) {
+ s = 1 + 10 + msgp.Int64Size + 12 + msgp.Int64Size + 8 + msgp.MapHeaderSize
+ if z.Objects != nil {
+ for za0001, za0002 := range z.Objects {
+ _ = za0002
+ s += msgp.StringPrefixSize + len(za0001)
+ if za0002 == nil {
+ s += msgp.NilSize
+ } else {
+ s += za0002.Msgsize()
+ }
+ }
+ }
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *Object) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "key":
+ z.Key, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "expiration":
+ z.Expiration, err = dc.ReadTime()
+ if err != nil {
+ return
+ }
+ case "lastwrite":
+ z.LastWrite, err = dc.ReadTime()
+ if err != nil {
+ return
+ }
+ case "lastaccess":
+ z.LastAccess, err = dc.ReadTime()
+ if err != nil {
+ return
+ }
+ case "size":
+ z.Size, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ case "value":
+ z.Value, err = dc.ReadBytes(z.Value)
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *Object) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 6
+ // write "key"
+ err = en.Append(0x86, 0xa3, 0x6b, 0x65, 0x79)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.Key)
+ if err != nil {
+ return
+ }
+ // write "expiration"
+ err = en.Append(0xaa, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
+ if err != nil {
+ return
+ }
+ err = en.WriteTime(z.Expiration)
+ if err != nil {
+ return
+ }
+ // write "lastwrite"
+ err = en.Append(0xa9, 0x6c, 0x61, 0x73, 0x74, 0x77, 0x72, 0x69, 0x74, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteTime(z.LastWrite)
+ if err != nil {
+ return
+ }
+ // write "lastaccess"
+ err = en.Append(0xaa, 0x6c, 0x61, 0x73, 0x74, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73)
+ if err != nil {
+ return
+ }
+ err = en.WriteTime(z.LastAccess)
+ if err != nil {
+ return
+ }
+ // write "size"
+ err = en.Append(0xa4, 0x73, 0x69, 0x7a, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z.Size)
+ if err != nil {
+ return
+ }
+ // write "value"
+ err = en.Append(0xa5, 0x76, 0x61, 0x6c, 0x75, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteBytes(z.Value)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *Object) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 6
+ // string "key"
+ o = append(o, 0x86, 0xa3, 0x6b, 0x65, 0x79)
+ o = msgp.AppendString(o, z.Key)
+ // string "expiration"
+ o = append(o, 0xaa, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
+ o = msgp.AppendTime(o, z.Expiration)
+ // string "lastwrite"
+ o = append(o, 0xa9, 0x6c, 0x61, 0x73, 0x74, 0x77, 0x72, 0x69, 0x74, 0x65)
+ o = msgp.AppendTime(o, z.LastWrite)
+ // string "lastaccess"
+ o = append(o, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73)
+ o = msgp.AppendTime(o, z.LastAccess)
+ // string "size"
+ o = append(o, 0xa4, 0x73, 0x69, 0x7a, 0x65)
+ o = msgp.AppendInt64(o, z.Size)
+ // string "value"
+ o = append(o, 0xa5, 0x76, 0x61, 0x6c, 0x75, 0x65)
+ o = msgp.AppendBytes(o, z.Value)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *Object) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "key":
+ z.Key, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ case "expiration":
+ z.Expiration, bts, err = msgp.ReadTimeBytes(bts)
+ if err != nil {
+ return
+ }
+ case "lastwrite":
+ z.LastWrite, bts, err = msgp.ReadTimeBytes(bts)
+ if err != nil {
+ return
+ }
+ case "lastaccess":
+ z.LastAccess, bts, err = msgp.ReadTimeBytes(bts)
+ if err != nil {
+ return
+ }
+ case "size":
+ z.Size, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "value":
+ z.Value, bts, err = msgp.ReadBytesBytes(bts, z.Value)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *Object) Msgsize() (s int) {
+ s = 1 + 4 + msgp.StringPrefixSize + len(z.Key) + 11 + msgp.TimeSize + 10 + msgp.TimeSize + 11 + msgp.TimeSize + 5 + msgp.Int64Size + 6 + msgp.BytesPrefixSize + len(z.Value)
+ return
+}
diff --git a/internal/cache/index/index_gen_test.go b/internal/cache/index/index_gen_test.go
new file mode 100644
index 000000000..bd932c02b
--- /dev/null
+++ b/internal/cache/index/index_gen_test.go
@@ -0,0 +1,249 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package index
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
+func TestMarshalUnmarshalIndex(t *testing.T) {
+ v := Index{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func BenchmarkMarshalMsgIndex(b *testing.B) {
+ v := Index{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgIndex(b *testing.B) {
+ v := Index{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalIndex(b *testing.B) {
+ v := Index{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestEncodeDecodeIndex(t *testing.T) {
+ v := Index{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+
+ m := v.Msgsize()
+ if buf.Len() > m {
+ t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
+ }
+
+ vn := Index{}
+ err := msgp.Decode(&buf, &vn)
+ if err != nil {
+ t.Error(err)
+ }
+
+ buf.Reset()
+ msgp.Encode(&buf, &v)
+ err = msgp.NewReader(&buf).Skip()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkEncodeIndex(b *testing.B) {
+ v := Index{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ en := msgp.NewWriter(msgp.Nowhere)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.EncodeMsg(en)
+ }
+ en.Flush()
+}
+
+func BenchmarkDecodeIndex(b *testing.B) {
+ v := Index{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ rd := msgp.NewEndlessReader(buf.Bytes(), b)
+ dc := msgp.NewReader(rd)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := v.DecodeMsg(dc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalObject(t *testing.T) {
+ v := Object{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func BenchmarkMarshalMsgObject(b *testing.B) {
+ v := Object{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgObject(b *testing.B) {
+ v := Object{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalObject(b *testing.B) {
+ v := Object{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestEncodeDecodeObject(t *testing.T) {
+ v := Object{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+
+ m := v.Msgsize()
+ if buf.Len() > m {
+ t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
+ }
+
+ vn := Object{}
+ err := msgp.Decode(&buf, &vn)
+ if err != nil {
+ t.Error(err)
+ }
+
+ buf.Reset()
+ msgp.Encode(&buf, &v)
+ err = msgp.NewReader(&buf).Skip()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkEncodeObject(b *testing.B) {
+ v := Object{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ en := msgp.NewWriter(msgp.Nowhere)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.EncodeMsg(en)
+ }
+ en.Flush()
+}
+
+func BenchmarkDecodeObject(b *testing.B) {
+ v := Object{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ rd := msgp.NewEndlessReader(buf.Bytes(), b)
+ dc := msgp.NewReader(rd)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := v.DecodeMsg(dc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/internal/cache/index/index_test.go b/internal/cache/index/index_test.go
new file mode 100644
index 000000000..ea0f451fc
--- /dev/null
+++ b/internal/cache/index/index_test.go
@@ -0,0 +1,286 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package index
+
+import (
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+func init() {
+ metrics.Init()
+}
+
+var testBulkIndex *Index
+
+func testBulkRemoveFunc(cacheKeys []string, noLock bool) {
+ for _, cacheKey := range cacheKeys {
+ testBulkIndex.RemoveObject(cacheKey, noLock)
+ }
+}
+func fakeFlusherFunc(string, []byte) {}
+
+type testReferenceObject struct {
+}
+
+func (r *testReferenceObject) Size() int {
+ return 1
+}
+
+func TestNewIndex(t *testing.T) {
+ cacheConfig := &config.CachingConfig{CacheType: "test", Index: config.CacheIndexConfig{ReapInterval: time.Second * time.Duration(10), FlushInterval: time.Second * time.Duration(10)}}
+ idx := NewIndex("test", "test", nil, cacheConfig.Index, testBulkRemoveFunc, fakeFlusherFunc)
+
+ // this gives a chance for the reaper to run through for test coverage
+ time.Sleep(1 * time.Second)
+
+ if idx.name != "test" {
+ t.Errorf("expected test got %s", idx.name)
+ }
+
+ idx.flushOnce()
+
+ idx2 := NewIndex("test", "test", idx.ToBytes(), cacheConfig.Index, testBulkRemoveFunc, fakeFlusherFunc)
+ if idx2 == nil {
+ t.Errorf("nil cache index")
+ }
+
+ cacheConfig.Index.FlushInterval = 0
+ cacheConfig.Index.ReapInterval = 0
+ idx3 := NewIndex("test", "test", nil, cacheConfig.Index, testBulkRemoveFunc, fakeFlusherFunc)
+ if idx3 == nil {
+ t.Errorf("nil cache index")
+ }
+
+}
+
+func TestReap(t *testing.T) {
+
+ cacheConfig := &config.CachingConfig{CacheType: "test", Index: config.CacheIndexConfig{ReapInterval: time.Second * time.Duration(10), FlushInterval: time.Second * time.Duration(10)}}
+ cacheConfig.Index.MaxSizeObjects = 5
+ cacheConfig.Index.MaxSizeBackoffObjects = 3
+ cacheConfig.Index.MaxSizeBytes = 100
+ cacheConfig.Index.MaxSizeBackoffBytes = 30
+
+ idx := NewIndex("test", "test", nil, cacheConfig.Index, testBulkRemoveFunc, fakeFlusherFunc)
+ if idx.name != "test" {
+ t.Errorf("expected test got %s", idx.name)
+ }
+
+ testBulkIndex = idx
+
+ // add fake index key to cover the case that the reaper must skip it
+ idx.UpdateObject(&Object{Key: "cache.index", Value: []byte("test_value")})
+
+ // add expired key to cover the case that the reaper remove it
+ idx.UpdateObject(&Object{Key: "test.1", Value: []byte("test_value"), Expiration: time.Now().Add(-time.Minute)})
+
+ // add key with no expiration which should not be reaped
+ idx.UpdateObject(&Object{Key: "test.2", Value: []byte("test_value")})
+
+ // add key with future expiration which should not be reaped
+ idx.UpdateObject(&Object{Key: "test.3", Value: []byte("test_value"), Expiration: time.Now().Add(time.Minute)})
+
+ // trigger a reap that will only remove expired elements but not size down the full cache
+ idx.reap()
+
+ // add key with future expiration which should not be reaped
+ idx.UpdateObject(&Object{Key: "test.4", Value: []byte("test_value"), Expiration: time.Now().Add(time.Minute)})
+
+ // add key with future expiration which should not be reaped
+ idx.UpdateObject(&Object{Key: "test.5", Value: []byte("test_value"), Expiration: time.Now().Add(time.Minute)})
+
+ // add key with future expiration which should not be reaped
+ idx.UpdateObject(&Object{Key: "test.6", Value: []byte("test_value"), Expiration: time.Now().Add(time.Minute)})
+
+ // trigger size-based reap eviction of some elements
+ idx.reap()
+
+ if _, ok := idx.Objects["test.1"]; ok {
+ t.Errorf("expected key %s to be missing", "test.1")
+ }
+
+ if _, ok := idx.Objects["test.2"]; ok {
+ t.Errorf("expected key %s to be missing", "test.2")
+ }
+
+ if _, ok := idx.Objects["test.3"]; ok {
+ t.Errorf("expected key %s to be missing", "test.3")
+ }
+
+ if _, ok := idx.Objects["test.4"]; ok {
+ t.Errorf("expected key %s to be missing", "test.4")
+ }
+
+ if _, ok := idx.Objects["test.5"]; ok {
+ t.Errorf("expected key %s to be missing", "test.5")
+ }
+
+ if _, ok := idx.Objects["test.6"]; !ok {
+ t.Errorf("expected key %s to be present", "test.6")
+ }
+
+ // add key with large body to reach byte size threshold
+ idx.UpdateObject(&Object{Key: "test.7", Value: []byte("test_value00000000000000000000000000000000000000000000000000000000000000000000000000000"), Expiration: time.Now().Add(time.Minute)})
+
+ // trigger a byte-based reap
+ idx.reap()
+
+ // only cache index should be left
+
+ if _, ok := idx.Objects["test.6"]; ok {
+ t.Errorf("expected key %s to be missing", "test.6")
+ }
+
+ if _, ok := idx.Objects["test.7"]; ok {
+ t.Errorf("expected key %s to be missing", "test.7")
+ }
+
+}
+
+func TestObjectFromBytes(t *testing.T) {
+
+ obj := &Object{}
+ b := obj.ToBytes()
+ obj2, err := ObjectFromBytes(b)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if obj2 == nil {
+ t.Errorf("nil cache index")
+ }
+
+}
+
+func TestUpdateObject(t *testing.T) {
+
+ obj := Object{Key: "", Value: []byte("test_value")}
+ cacheConfig := &config.CachingConfig{CacheType: "test", Index: config.CacheIndexConfig{ReapInterval: time.Second * time.Duration(10), FlushInterval: time.Second * time.Duration(10)}}
+ idx := NewIndex("test", "test", nil, cacheConfig.Index, testBulkRemoveFunc, fakeFlusherFunc)
+
+ idx.UpdateObject(&obj)
+ if _, ok := idx.Objects["test"]; ok {
+ t.Errorf("test object should be missing from index")
+ }
+
+ obj.Key = "test"
+
+ idx.UpdateObject(&obj)
+ if _, ok := idx.Objects["test"]; !ok {
+ t.Errorf("test object missing from index")
+ }
+
+ // do it again to cover the index hit case
+ idx.UpdateObject(&obj)
+ if _, ok := idx.Objects["test"]; !ok {
+ t.Errorf("test object missing from index")
+ }
+
+ idx.Objects["test"].LastAccess = time.Time{}
+ idx.UpdateObjectAccessTime("test")
+
+ if idx.Objects["test"].LastAccess.IsZero() {
+ t.Errorf("test object last access time is wrong")
+ }
+
+ obj = Object{Key: "test2", ReferenceValue: &testReferenceObject{}}
+
+ idx.UpdateObject(&obj)
+ if _, ok := idx.Objects["test2"]; !ok {
+ t.Errorf("test object missing from index")
+ }
+
+}
+
+func TestRemoveObject(t *testing.T) {
+
+ obj := Object{Key: "test", Value: []byte("test_value")}
+ cacheConfig := &config.CachingConfig{CacheType: "test", Index: config.CacheIndexConfig{ReapInterval: time.Second * time.Duration(10), FlushInterval: time.Second * time.Duration(10)}}
+ idx := NewIndex("test", "test", nil, cacheConfig.Index, testBulkRemoveFunc, fakeFlusherFunc)
+
+ idx.UpdateObject(&obj)
+ if _, ok := idx.Objects["test"]; !ok {
+ t.Errorf("test object missing from index")
+ }
+
+ idx.RemoveObject("test", false)
+ if _, ok := idx.Objects["test"]; ok {
+ t.Errorf("test object should be missing from index")
+ }
+
+}
+
+func TestSort(t *testing.T) {
+
+ o := objectsAtime{
+ &Object{
+ Key: "3",
+ LastAccess: time.Unix(3, 0),
+ },
+ &Object{
+ Key: "1",
+ LastAccess: time.Unix(1, 0),
+ },
+ &Object{
+ Key: "2",
+ LastAccess: time.Unix(2, 0),
+ },
+ }
+ sort.Sort(o)
+
+ if o[0].Key != "1" {
+ t.Errorf("expected %s got %s", "1", o[0].Key)
+ }
+
+ if o[1].Key != "2" {
+ t.Errorf("expected %s got %s", "2", o[1].Key)
+ }
+
+ if o[2].Key != "3" {
+ t.Errorf("expected %s got %s", "3", o[2].Key)
+ }
+
+}
+
+func TestUpdateObjectTTL(t *testing.T) {
+
+ cacheKey := "test-ttl-key"
+ obj := Object{Key: cacheKey, Value: []byte("test_value")}
+ cacheConfig := &config.CachingConfig{CacheType: "test", Index: config.CacheIndexConfig{ReapInterval: time.Second * time.Duration(10), FlushInterval: time.Second * time.Duration(10)}}
+ idx := NewIndex("test", "test", nil, cacheConfig.Index, testBulkRemoveFunc, fakeFlusherFunc)
+
+ exp := idx.GetExpiration(cacheKey)
+ if !exp.IsZero() {
+ t.Errorf("expected zero time, got %v", exp)
+ }
+
+ idx.UpdateObject(&obj)
+
+ idx.UpdateObjectTTL(cacheKey, time.Duration(3600)*time.Second)
+
+ if obj.Expiration.IsZero() {
+ t.Errorf("expected non-zero time, got %v", obj.Expiration)
+ }
+
+ exp = idx.GetExpiration(cacheKey)
+ if exp.IsZero() {
+ t.Errorf("expected non-zero time, got %v", obj.Expiration)
+ }
+
+}
diff --git a/internal/cache/memory/memory.go b/internal/cache/memory/memory.go
new file mode 100644
index 000000000..0e0e0860d
--- /dev/null
+++ b/internal/cache/memory/memory.go
@@ -0,0 +1,173 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package memory
+
+import (
+ "sync"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/cache/index"
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/pkg/locks"
+)
+
+var lockPrefix string
+
+// Cache defines a a Memory Cache client that conforms to the Cache interface
+type Cache struct {
+ Name string
+ client sync.Map
+ Config *config.CachingConfig
+ Index *index.Index
+}
+
+// Configuration returns the Configuration for the Cache object
+func (c *Cache) Configuration() *config.CachingConfig {
+ return c.Config
+}
+
+// Connect initializes the Cache
+func (c *Cache) Connect() error {
+ log.Info("memorycache setup", log.Pairs{"name": c.Name, "maxSizeBytes": c.Config.Index.MaxSizeBytes, "maxSizeObjects": c.Config.Index.MaxSizeObjects})
+ lockPrefix = c.Name + ".memory."
+ c.client = sync.Map{}
+ c.Index = index.NewIndex(c.Name, c.Config.CacheType, nil, c.Config.Index, c.BulkRemove, nil)
+ return nil
+}
+
+// StoreReference stores an object directly to the memory cache without requiring serialization
+func (c *Cache) StoreReference(cacheKey string, data cache.ReferenceObject, ttl time.Duration) error {
+ return c.store(cacheKey, nil, data, ttl, true)
+}
+
+// Store places an object in the cache using the specified key and ttl
+func (c *Cache) Store(cacheKey string, data []byte, ttl time.Duration) error {
+ return c.store(cacheKey, data, nil, ttl, true)
+}
+
+func (c *Cache) store(cacheKey string, byteData []byte, refData cache.ReferenceObject, ttl time.Duration, updateIndex bool) error {
+
+ locks.Acquire(lockPrefix + cacheKey)
+
+ var o1, o2 *index.Object
+ var l int
+ isDirect := byteData == nil && refData != nil
+ if byteData != nil {
+ l = len(byteData)
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "set", "none", float64(l))
+ o1 = &index.Object{Key: cacheKey, Value: byteData, Expiration: time.Now().Add(ttl)}
+ o2 = &index.Object{Key: cacheKey, Value: byteData, Expiration: time.Now().Add(ttl)}
+ } else if refData != nil {
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "setDirect", "none", 0)
+ o1 = &index.Object{Key: cacheKey, ReferenceValue: refData, Expiration: time.Now().Add(ttl)}
+ o2 = &index.Object{Key: cacheKey, ReferenceValue: refData, Expiration: time.Now().Add(ttl)}
+ }
+
+ go log.Debug("memorycache cache store", log.Pairs{"cacheKey": cacheKey, "length": l, "ttl": ttl, "is_direct": isDirect})
+
+ if o1 != nil && o2 != nil {
+ c.client.Store(cacheKey, o1)
+ if updateIndex {
+ c.Index.UpdateObject(o2)
+ }
+ }
+
+ locks.Release(lockPrefix + cacheKey)
+ return nil
+}
+
+// RetrieveReference looks for an object in cache and returns it (or an error if not found)
+func (c *Cache) RetrieveReference(cacheKey string, allowExpired bool) (interface{}, status.LookupStatus, error) {
+ o, s, err := c.retrieve(cacheKey, allowExpired, true)
+ if err != nil {
+ return nil, s, err
+ }
+ if o != nil {
+ return o.ReferenceValue, s, nil
+ }
+ return nil, s, nil
+}
+
+// Retrieve looks for an object in cache and returns it (or an error if not found)
+func (c *Cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.LookupStatus, error) {
+ o, s, err := c.retrieve(cacheKey, allowExpired, true)
+ if err != nil {
+ return nil, s, err
+ }
+ if o != nil {
+ return o.Value, s, nil
+ }
+ return nil, s, nil
+}
+
+func (c *Cache) retrieve(cacheKey string, allowExpired bool, atime bool) (*index.Object, status.LookupStatus, error) {
+
+ locks.Acquire(lockPrefix + cacheKey)
+
+ record, ok := c.client.Load(cacheKey)
+
+ if ok {
+ o := record.(*index.Object)
+ o.Expiration = c.Index.GetExpiration(cacheKey)
+
+ if allowExpired || o.Expiration.IsZero() || o.Expiration.After(time.Now()) {
+ log.Debug("memory cache retrieve", log.Pairs{"cacheKey": cacheKey})
+ if atime {
+ c.Index.UpdateObjectAccessTime(cacheKey)
+ }
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "get", "hit", float64(len(o.Value)))
+ locks.Release(lockPrefix + cacheKey)
+ return o, status.LookupStatusHit, nil
+ }
+ // Cache Object has been expired but not reaped, go ahead and delete it
+ go c.remove(cacheKey, false)
+ }
+ locks.Release(lockPrefix + cacheKey)
+ _, err := cache.ObserveCacheMiss(cacheKey, c.Name, c.Config.CacheType)
+ return nil, status.LookupStatusKeyMiss, err
+
+}
+
+// SetTTL updates the TTL for the provided cache object
+func (c *Cache) SetTTL(cacheKey string, ttl time.Duration) {
+ c.Index.UpdateObjectTTL(cacheKey, ttl)
+}
+
+// Remove removes an object from the cache
+func (c *Cache) Remove(cacheKey string) {
+ c.remove(cacheKey, false)
+}
+
+func (c *Cache) remove(cacheKey string, noLock bool) {
+ locks.Acquire(lockPrefix + cacheKey)
+ c.client.Delete(cacheKey)
+ c.Index.RemoveObject(cacheKey, noLock)
+ cache.ObserveCacheDel(c.Name, c.Config.CacheType, 0)
+ locks.Release(lockPrefix + cacheKey)
+}
+
+// BulkRemove removes a list of objects from the cache
+func (c *Cache) BulkRemove(cacheKeys []string, noLock bool) {
+ for _, cacheKey := range cacheKeys {
+ c.remove(cacheKey, noLock)
+ }
+}
+
+// Close is not used for Cache, and is here to fully prototype the Cache Interface
+func (c *Cache) Close() error {
+ return nil
+}
diff --git a/internal/cache/memory/memory_test.go b/internal/cache/memory/memory_test.go
new file mode 100644
index 000000000..b32d9f250
--- /dev/null
+++ b/internal/cache/memory/memory_test.go
@@ -0,0 +1,460 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package memory
+
+import (
+ "io/ioutil"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/util/log"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+func init() {
+ metrics.Init()
+}
+
+const cacheType = "memory"
+const cacheKey = "cacheKey"
+
+type testReferenceObject struct {
+}
+
+func (r *testReferenceObject) Size() int {
+ return 1
+}
+
+func storeBenchmark(b *testing.B) Cache {
+ log.Logger = log.ConsoleLogger("none")
+ cacheConfig := config.CachingConfig{CacheType: cacheType, Index: config.CacheIndexConfig{ReapInterval: 0}}
+ mc := Cache{Config: &cacheConfig}
+
+ err := mc.Connect()
+ if err != nil {
+ b.Error(err)
+ }
+ defer mc.Close()
+ for n := 0; n < b.N; n++ {
+ err = mc.Store(cacheKey+strconv.Itoa(n), []byte("data"+strconv.Itoa(n)), time.Duration(60)*time.Second)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+ return mc
+}
+
+func newCacheConfig(t *testing.T) config.CachingConfig {
+ dir, err := ioutil.TempDir("/tmp", cacheType)
+ if err != nil {
+ t.Fatalf("could not create temp directory (%s): %s", dir, err)
+ }
+ return config.CachingConfig{CacheType: cacheType, Index: config.CacheIndexConfig{ReapInterval: 0}}
+}
+
+func TestConfiguration(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ mc := Cache{Config: &cacheConfig}
+ cfg := mc.Configuration()
+ if cfg.CacheType != cacheType {
+ t.Fatalf("expected %s got %s", cacheType, cfg.CacheType)
+ }
+}
+
+func TestCache_Connect(t *testing.T) {
+
+ cacheConfig := newCacheConfig(t)
+ mc := Cache{Config: &cacheConfig}
+
+ // it should connect
+ err := mc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestCache_StoreReferenceDirect(t *testing.T) {
+
+ cacheConfig := newCacheConfig(t)
+ mc := Cache{Config: &cacheConfig}
+
+ err := mc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ // it should store a value
+ err = mc.store("test", nil, &testReferenceObject{}, 1*time.Second, true)
+
+ r, _, _ := mc.RetrieveReference("test", true)
+ if r == nil {
+ t.Errorf("expected %s got nil", r)
+ }
+
+ _, _, err = mc.RetrieveReference("test2", true)
+ if err == nil {
+ t.Errorf("expected nil got %s", err.Error())
+ }
+
+}
+
+func TestCache_StoreReference(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ mc := Cache{Config: &cacheConfig}
+
+ err := mc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ // it should store a value
+ err = mc.StoreReference(cacheKey, nil, time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestCache_Store(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ mc := Cache{Config: &cacheConfig}
+
+ err := mc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ // it should store a value
+ err = mc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkCache_Store(b *testing.B) {
+ storeBenchmark(b)
+}
+
+func TestCache_Retrieve(t *testing.T) {
+
+ const expected1 = `value for key [cacheKey] not in cache`
+
+ cacheConfig := newCacheConfig(t)
+ mc := Cache{Config: &cacheConfig}
+
+ err := mc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = mc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ var data []byte
+ var ls status.LookupStatus
+ data, ls, err = mc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\"", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ // expire the object
+ mc.SetTTL(cacheKey, -1*time.Hour)
+
+ // this should now return error
+ data, ls, err = mc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected error for %s", expected1)
+ mc.Close()
+ }
+ if err.Error() != expected1 {
+ t.Errorf("expected error '%s' got '%s'", expected1, err.Error())
+ }
+ if string(data) != "" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+}
+
+func BenchmarkCache_Retrieve(b *testing.B) {
+ mc := storeBenchmark(b)
+
+ for n := 0; n < b.N; n++ {
+ var data []byte
+ data, ls, err := mc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err != nil {
+ b.Error(err)
+ }
+ if string(data) != "data"+strconv.Itoa(n) {
+ b.Errorf("wanted \"%s\". got \"%s\"", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ // expire the object
+ mc.SetTTL(cacheKey+strconv.Itoa(n), -1*time.Hour)
+
+ // this should now return error
+ data, ls, err = mc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ expectederr := `value for key [` + cacheKey + strconv.Itoa(n) + `] not in cache`
+ if err == nil {
+ b.Errorf("expected error for %s", expectederr)
+ mc.Close()
+ }
+ if err.Error() != expectederr {
+ b.Errorf("expected error '%s' got '%s'", expectederr, err.Error())
+ }
+
+ if string(data) != "" {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ }
+}
+
+func TestCache_Close(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ mc := Cache{Config: &cacheConfig}
+ mc.Close()
+}
+
+func TestCache_Remove(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ mc := Cache{Config: &cacheConfig}
+
+ err := mc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer mc.Close()
+
+ // it should store a value
+ err = mc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := mc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ mc.Remove(cacheKey)
+
+ // it should be a cache miss
+ _, ls, err = mc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+}
+
+func BenchmarkCache_Remove(b *testing.B) {
+ mc := storeBenchmark(b)
+
+ for n := 0; n < b.N; n++ {
+ var data []byte
+ data, ls, err := mc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err != nil {
+ b.Error(err)
+ }
+ if string(data) != "data"+strconv.Itoa(n) {
+ b.Errorf("wanted \"%s\". got \"%s\"", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ mc.Remove(cacheKey + strconv.Itoa(n))
+
+ // this should now return error
+ data, ls, err = mc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ expectederr := `value for key [` + cacheKey + strconv.Itoa(n) + `] not in cache`
+ if err == nil {
+ b.Errorf("expected error for %s", expectederr)
+ mc.Close()
+ }
+ if err.Error() != expectederr {
+ b.Errorf("expected error '%s' got '%s'", expectederr, err.Error())
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+ if string(data) != "" {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ }
+}
+
+func TestCache_BulkRemove(t *testing.T) {
+ cacheConfig := newCacheConfig(t)
+ mc := Cache{Config: &cacheConfig}
+
+ err := mc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer mc.Close()
+
+ // it should store a value
+ err = mc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := mc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ mc.BulkRemove([]string{cacheKey}, true)
+
+ // it should be a cache miss
+ _, ls, err = mc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+
+}
+
+func BenchmarkCache_BulkRemove(b *testing.B) {
+ var keyArray []string
+ for n := 0; n < b.N; n++ {
+ keyArray = append(keyArray, cacheKey+strconv.Itoa(n))
+ }
+
+ mc := storeBenchmark(b)
+
+ mc.BulkRemove(keyArray, true)
+
+ // it should be a cache miss
+ for n := 0; n < b.N; n++ {
+ _, ls, err := mc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err == nil {
+ b.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ }
+}
+
+func TestMemoryCache_SetTTL(t *testing.T) {
+
+ cacheConfig := newCacheConfig(t)
+ mc := Cache{Config: &cacheConfig}
+
+ err := mc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer mc.Close()
+
+ exp1 := mc.Index.GetExpiration(cacheKey)
+ if !exp1.IsZero() {
+ t.Errorf("expected Zero time, got %v", exp1)
+ }
+
+ // it should store a value
+ err = mc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ exp1 = mc.Index.GetExpiration(cacheKey)
+ if exp1.IsZero() {
+ t.Errorf("expected time %d, got zero", int(time.Now().Unix())+60)
+ }
+
+ e1 := int(exp1.Unix())
+
+ mc.SetTTL(cacheKey, time.Duration(3600)*time.Second)
+
+ exp2 := mc.Index.GetExpiration(cacheKey)
+ if exp2.IsZero() {
+ t.Errorf("expected time %d, got zero", int(time.Now().Unix())+3600)
+ }
+ e2 := int(exp2.Unix())
+
+ // should be around 3595
+ diff := e2 - e1
+ const expected = 3500
+
+ if diff < expected {
+ t.Errorf("expected diff >= %d, got %d from: %d - %d", expected, diff, e2, e1)
+ }
+
+}
+
+func BenchmarkCache_SetTTL(b *testing.B) {
+ mc := storeBenchmark(b)
+
+ for n := 0; n < b.N; n++ {
+ exp1 := mc.Index.GetExpiration(cacheKey + strconv.Itoa(n))
+ if exp1.IsZero() {
+ b.Errorf("expected time %d, got zero", int(time.Now().Unix())+60)
+ }
+
+ e1 := int(exp1.Unix())
+
+ mc.SetTTL(cacheKey+strconv.Itoa(n), time.Duration(3600)*time.Second)
+
+ exp2 := mc.Index.GetExpiration(cacheKey + strconv.Itoa(n))
+ if exp2.IsZero() {
+ b.Errorf("expected time %d, got zero", int(time.Now().Unix())+3600)
+ }
+ e2 := int(exp2.Unix())
+
+ // should be around 3595
+ diff := e2 - e1
+ const expected = 3500
+
+ if diff < expected {
+ b.Errorf("expected diff >= %d, got %d from: %d - %d", expected, diff, e2, e1)
+ }
+ }
+}
diff --git a/internal/cache/redis/client_types.go b/internal/cache/redis/client_types.go
new file mode 100644
index 000000000..e219dad00
--- /dev/null
+++ b/internal/cache/redis/client_types.go
@@ -0,0 +1,44 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package redis
+
+import "strconv"
+
+// clientType enumerates the methodologies for maintaining time series cache data
+type clientType int
+
+const (
+ clientTypeStandard = clientType(iota)
+ clientTypeCluster
+ clientTypeSentinel
+)
+
+var clientTypeNames = map[string]clientType{
+ "standard": clientTypeStandard,
+ "cluster": clientTypeCluster,
+ "sentinel": clientTypeSentinel,
+}
+
+var clientTypeValues = map[clientType]string{
+ clientTypeStandard: "standard",
+ clientTypeCluster: "cluster",
+ clientTypeSentinel: "sentinel",
+}
+
+func (t clientType) String() string {
+ if v, ok := clientTypeValues[t]; ok {
+ return v
+ }
+ return strconv.Itoa(int(t))
+}
diff --git a/internal/cache/redis/client_types_test.go b/internal/cache/redis/client_types_test.go
new file mode 100644
index 000000000..a4b467ab2
--- /dev/null
+++ b/internal/cache/redis/client_types_test.go
@@ -0,0 +1,33 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package redis
+
+import (
+ "testing"
+)
+
+func TestClientTypeString(t *testing.T) {
+
+ t1 := clientTypeStandard
+ var t2 clientType = 20
+
+ if t1.String() != "standard" {
+ t.Errorf("expected %s got %s", "standard", t1.String())
+ }
+
+ if t2.String() != "20" {
+ t.Errorf("expected %s got %s", "20", t2.String())
+ }
+
+}
diff --git a/internal/cache/redis/cluster.go b/internal/cache/redis/cluster.go
new file mode 100644
index 000000000..02ffa08ca
--- /dev/null
+++ b/internal/cache/redis/cluster.go
@@ -0,0 +1,85 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package redis
+
+import (
+ "fmt"
+
+ "github.com/go-redis/redis"
+)
+
+func (c *Cache) clusterOpts() (*redis.ClusterOptions, error) {
+
+ if len(c.Config.Redis.Endpoints) == 0 {
+ return nil, fmt.Errorf("Invalid 'endpoints' config")
+ }
+
+ o := &redis.ClusterOptions{
+ Addrs: c.Config.Redis.Endpoints,
+ }
+
+ if c.Config.Redis.Password != "" {
+ o.Password = c.Config.Redis.Password
+ }
+
+ if c.Config.Redis.MaxRetries != 0 {
+ o.MaxRetries = c.Config.Redis.MaxRetries
+ }
+
+ if c.Config.Redis.MinRetryBackoffMS != 0 {
+ o.MinRetryBackoff = durationFromMS(c.Config.Redis.MinRetryBackoffMS)
+ }
+
+ if c.Config.Redis.MaxRetryBackoffMS != 0 {
+ o.MaxRetryBackoff = durationFromMS(c.Config.Redis.MaxRetryBackoffMS)
+ }
+
+ if c.Config.Redis.DialTimeoutMS != 0 {
+ o.DialTimeout = durationFromMS(c.Config.Redis.DialTimeoutMS)
+ }
+
+ if c.Config.Redis.ReadTimeoutMS != 0 {
+ o.ReadTimeout = durationFromMS(c.Config.Redis.ReadTimeoutMS)
+ }
+
+ if c.Config.Redis.WriteTimeoutMS != 0 {
+ o.WriteTimeout = durationFromMS(c.Config.Redis.WriteTimeoutMS)
+ }
+
+ if c.Config.Redis.PoolSize != 0 {
+ o.PoolSize = c.Config.Redis.PoolSize
+ }
+
+ if c.Config.Redis.MinIdleConns != 0 {
+ o.MinIdleConns = c.Config.Redis.MinIdleConns
+ }
+
+ if c.Config.Redis.MaxConnAgeMS != 0 {
+ o.MaxConnAge = durationFromMS(c.Config.Redis.MaxConnAgeMS)
+ }
+
+ if c.Config.Redis.PoolTimeoutMS != 0 {
+ o.PoolTimeout = durationFromMS(c.Config.Redis.PoolTimeoutMS)
+ }
+
+ if c.Config.Redis.IdleTimeoutMS != 0 {
+ o.IdleTimeout = durationFromMS(c.Config.Redis.IdleTimeoutMS)
+ }
+
+ if c.Config.Redis.IdleCheckFrequencyMS != 0 {
+ o.IdleCheckFrequency = durationFromMS(c.Config.Redis.IdleCheckFrequencyMS)
+ }
+
+ return o, nil
+}
diff --git a/internal/cache/redis/redis.go b/internal/cache/redis/redis.go
new file mode 100644
index 000000000..d1ae8a2dc
--- /dev/null
+++ b/internal/cache/redis/redis.go
@@ -0,0 +1,134 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package redis
+
+import (
+ "time"
+
+ "github.com/go-redis/redis"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/log"
+)
+
+// Redis is the string "redis"
+const Redis = "redis"
+
+// Cache represents a redis cache object that conforms to the Cache interface
+type Cache struct {
+ Name string
+ Config *config.CachingConfig
+
+ client redis.Cmdable
+ closer func() error
+}
+
+// Configuration returns the Configuration for the Cache object
+func (c *Cache) Configuration() *config.CachingConfig {
+ return c.Config
+}
+
+// Connect connects to the configured Redis endpoint
+func (c *Cache) Connect() error {
+ log.Info("connecting to redis", log.Pairs{"protocol": c.Config.Redis.Protocol, "Endpoint": c.Config.Redis.Endpoint})
+
+ switch c.Config.Redis.ClientType {
+ case "sentinel":
+ opts, err := c.sentinelOpts()
+ if err != nil {
+ return err
+ }
+ client := redis.NewFailoverClient(opts)
+ c.closer = client.Close
+ c.client = client
+ case "cluster":
+ opts, err := c.clusterOpts()
+ if err != nil {
+ return err
+ }
+ client := redis.NewClusterClient(opts)
+ c.closer = client.Close
+ c.client = client
+ default:
+ opts, err := c.clientOpts()
+ if err != nil {
+ return err
+ }
+ client := redis.NewClient(opts)
+ c.closer = client.Close
+ c.client = client
+ }
+ return c.client.Ping().Err()
+}
+
+// Store places the the data into the Redis Cache using the provided Key and TTL
+func (c *Cache) Store(cacheKey string, data []byte, ttl time.Duration) error {
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "set", "none", float64(len(data)))
+ log.Debug("redis cache store", log.Pairs{"key": cacheKey})
+ return c.client.Set(cacheKey, data, ttl).Err()
+}
+
+// Retrieve gets data from the Redis Cache using the provided Key
+// because Redis manages Object Expiration internally, allowExpired is not used.
+func (c *Cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.LookupStatus, error) {
+ res, err := c.client.Get(cacheKey).Result()
+
+ if err == nil {
+ data := []byte(res)
+ log.Debug("redis cache retrieve", log.Pairs{"key": cacheKey})
+ cache.ObserveCacheOperation(c.Name, c.Config.CacheType, "get", "hit", float64(len(data)))
+ return data, status.LookupStatusHit, nil
+ }
+
+ if err == redis.Nil {
+ log.Debug("redis cache miss", log.Pairs{"key": cacheKey})
+ cache.ObserveCacheMiss(cacheKey, c.Name, c.Config.CacheType)
+ return nil, status.LookupStatusKeyMiss, err
+ }
+
+ log.Debug("redis cache retrieve failed", log.Pairs{"key": cacheKey, "reason": err.Error()})
+ cache.ObserveCacheMiss(cacheKey, c.Name, c.Config.CacheType)
+ return nil, status.LookupStatusError, err
+}
+
+// Remove removes an object in cache, if present
+func (c *Cache) Remove(cacheKey string) {
+ log.Debug("redis cache remove", log.Pairs{"key": cacheKey})
+ c.client.Del(cacheKey)
+ cache.ObserveCacheDel(c.Name, c.Config.CacheType, 0)
+}
+
+// SetTTL updates the TTL for the provided cache object
+func (c *Cache) SetTTL(cacheKey string, ttl time.Duration) {
+ c.client.Expire(cacheKey, ttl)
+}
+
+// BulkRemove removes a list of objects from the cache. noLock is not used for Redis
+func (c *Cache) BulkRemove(cacheKeys []string, noLock bool) {
+ log.Debug("redis cache bulk remove", log.Pairs{})
+ c.client.Del(cacheKeys...)
+ cache.ObserveCacheDel(c.Name, c.Config.CacheType, float64(len(cacheKeys)))
+}
+
+// Close disconnects from the Redis Cache
+func (c *Cache) Close() error {
+ log.Info("closing redis connection", log.Pairs{})
+ return c.closer()
+}
+
+func durationFromMS(input int) time.Duration {
+ return time.Duration(int64(input)) * time.Millisecond
+}
diff --git a/internal/cache/redis/redis_test.go b/internal/cache/redis/redis_test.go
new file mode 100644
index 000000000..66632993e
--- /dev/null
+++ b/internal/cache/redis/redis_test.go
@@ -0,0 +1,519 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package redis
+
+import (
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/util/log"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/metrics"
+
+ "github.com/alicebob/miniredis"
+)
+
+func init() {
+ metrics.Init()
+}
+
+const cacheKey = `cacheKey`
+
+func storeBenchmark(b *testing.B) (*Cache, func()) {
+ log.Logger = log.ConsoleLogger("none")
+ rc, close := setupRedisCache(clientTypeStandard)
+ err := rc.Connect()
+ if err != nil {
+ b.Error(err)
+ }
+ for n := 0; n < b.N; n++ {
+ err := rc.Store(cacheKey+strconv.Itoa(n), []byte("data"+strconv.Itoa(n)), time.Duration(60)*time.Second)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+ return rc, close
+}
+
+func setupRedisCache(ct clientType) (*Cache, func()) {
+ s, err := miniredis.Run()
+ if err != nil {
+ panic(err)
+ }
+ config.Config = config.NewConfig()
+ rcfg := config.RedisCacheConfig{Endpoint: s.Addr(), ClientType: ct.String()}
+ if ct != clientTypeStandard {
+ rcfg.Endpoint = ""
+ rcfg.Endpoints = []string{s.Addr()}
+ if ct == clientTypeSentinel {
+ rcfg.SentinelMaster = s.Addr()
+ }
+ }
+ close := func() {
+ s.Close()
+ }
+ cacheConfig := &config.CachingConfig{CacheType: "redis", Redis: rcfg}
+ config.Caches = map[string]*config.CachingConfig{"default": cacheConfig}
+
+ return &Cache{Config: cacheConfig}, close
+}
+
+func TestClientSelectionSentinel(t *testing.T) {
+ const expected1 = "ERR unknown command `sentinel`"
+ args := []string{"-config", "../../../testdata/test.redis-sentinel.conf",
+ "-origin-url", "http://0.0.0.0", "-origin-type", "rpc", "-log-level", "info"}
+ err := config.Load("trickster", "test", args)
+ if err != nil {
+ t.Error(err)
+ }
+ const cacheName = "test"
+ cfg, ok := config.Caches[cacheName]
+ if !ok {
+ t.Errorf("expected cache named %s", cacheName)
+ }
+ cache := Cache{Name: cacheName, Config: cfg}
+ if err != nil {
+ t.Error(err)
+ }
+ err = cache.Connect()
+ if err == nil {
+ t.Errorf("expected error for %s", expected1)
+ }
+}
+
+func TestSentinelOpts(t *testing.T) {
+
+ const expected1 = `Invalid 'endpoints' config`
+ const expected2 = `Invalid 'sentinel_master' config`
+
+ rc, close := setupRedisCache(clientTypeSentinel)
+ defer close()
+
+ // test empty endpoint
+ rc.Configuration().Redis.Endpoints = nil
+ err := rc.Connect()
+ if err == nil || err.Error() != expected1 {
+ t.Errorf("expected error for %s", expected1)
+ }
+
+ rc.Configuration().Redis.Endpoints = []string{"test"}
+ rc.Configuration().Redis.SentinelMaster = ""
+
+ // test empty SentinelMaster
+ err = rc.Connect()
+ if err == nil || err.Error() != expected2 {
+ t.Errorf("expected error for %s", expected2)
+ }
+}
+
+func TestClusterOpts(t *testing.T) {
+
+ const expected1 = `Invalid 'endpoints' config`
+
+ rc, close := setupRedisCache(clientTypeCluster)
+ defer close()
+
+ // test empty endpoint
+ rc.Configuration().Redis.Endpoints = nil
+ err := rc.Connect()
+ if err == nil || err.Error() != expected1 {
+ t.Errorf("expected error for %s", expected1)
+ }
+}
+
+func TestClientOpts(t *testing.T) {
+
+ const expected1 = `invalid endpoint: `
+
+ rc, close := setupRedisCache(clientTypeStandard)
+ defer close()
+
+ // test empty endpoint
+ rc.Configuration().Redis.Endpoint = ""
+ err := rc.Connect()
+ if err == nil || err.Error() != expected1 {
+ t.Errorf("expected error for %s", expected1)
+ }
+}
+
+func TestClientSelectionCluster(t *testing.T) {
+ expected1 := "invalid endpoint"
+ args := []string{"-config", "../../../testdata/test.redis-cluster.conf",
+ "-origin-url", "http://0.0.0.0", "-origin-type", "rpc", "-log-level", "info"}
+ err := config.Load("trickster", "test", args)
+ if err != nil {
+ t.Error(err)
+ }
+ const cacheName = "test"
+ cfg, ok := config.Caches[cacheName]
+ if !ok {
+ t.Errorf("expected cache named %s", cacheName)
+ }
+ cache := Cache{Name: cacheName, Config: cfg}
+ if err != nil {
+ t.Error(err)
+ }
+ err = cache.Connect()
+ if err == nil {
+ t.Errorf("expected error for %s", expected1)
+ }
+}
+
+func TestClientSelectionStandard(t *testing.T) {
+ expected1 := "invalid endpoint"
+ args := []string{"-config", "../../../testdata/test.redis-standard.conf",
+ "-origin-url", "http://0.0.0.0", "-origin-type", "rpc", "-log-level", "info"}
+ err := config.Load("trickster", "test", args)
+ if err != nil {
+ t.Error(err)
+ }
+ const cacheName = "test"
+ cfg, ok := config.Caches[cacheName]
+ if !ok {
+ t.Errorf("expected cache named %s", cacheName)
+ }
+ cache := Cache{Name: cacheName, Config: cfg}
+ if err != nil {
+ t.Error(err)
+ }
+ err = cache.Connect()
+ if err == nil {
+ t.Errorf("expected error for %s", expected1)
+ }
+}
+
+func TestDurationFromMS(t *testing.T) {
+
+ tests := []struct {
+ input int
+ expected time.Duration
+ }{
+ {0, time.Duration(0)},
+ {5000, time.Duration(5000) * time.Millisecond},
+ {60000, time.Duration(60000) * time.Millisecond},
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+
+ res := durationFromMS(test.input)
+
+ if res != test.expected {
+ t.Fatalf("Mismatch in durationFromMS: expected=%f actual=%f", test.expected.Seconds(), res.Seconds())
+ }
+ })
+ }
+
+}
+
+func TestRedisCache_SetTTL(t *testing.T) {
+
+ const expected = "data"
+
+ cache, closer := setupRedisCache(clientTypeStandard)
+ defer closer()
+
+ err := cache.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer cache.Close()
+
+ // it should store a value
+ err = cache.Store(cacheKey, []byte(expected), time.Duration(1)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+ cache.SetTTL(cacheKey, time.Duration(3600)*time.Second)
+
+ // since the TTL is updated to 1 hour, waiting more than the original TTL of 1s
+ // should not matter
+ time.Sleep(1010 * time.Millisecond)
+
+ val, ls, err := cache.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ if string(val) != expected {
+ t.Errorf("expected %s got %s", expected, string(val))
+ }
+
+}
+
+func BenchmarkCache_SetTTL(b *testing.B) {
+ rc, close := storeBenchmark(b)
+ defer close()
+
+ for n := 0; n < b.N; n++ {
+ expected := "data" + strconv.Itoa(n)
+ rc.SetTTL(cacheKey+strconv.Itoa(n), time.Duration(3600)*time.Second)
+ //time.Sleep(1010 * time.Millisecond)
+ val, ls, err := rc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err != nil {
+ b.Error(err)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+ if string(val) != expected {
+ b.Errorf("expected %s got %s", expected, string(val))
+ }
+ }
+}
+
+func TestConfiguration(t *testing.T) {
+ rc, close := setupRedisCache(clientTypeStandard)
+ defer close()
+
+ cfg := rc.Configuration()
+ if cfg.Redis.ClientType != clientTypeStandard.String() {
+ t.Fatalf("expected %s got %s", clientTypeStandard.String(), cfg.Redis.ClientType)
+ }
+}
+
+func TestRedisCache_Connect(t *testing.T) {
+ rc, close := setupRedisCache(clientTypeStandard)
+ defer close()
+
+ // it should connect
+ err := rc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestRedisCache_Store(t *testing.T) {
+ rc, close := setupRedisCache(clientTypeStandard)
+ defer close()
+
+ err := rc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should store a value
+ err = rc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkCache_Store(b *testing.B) {
+ rc, close := storeBenchmark(b)
+ if rc == nil {
+ b.Error("Could not create the redis cache")
+ }
+ defer close()
+}
+
+func TestRedisCache_Retrieve(t *testing.T) {
+ rc, close := setupRedisCache(clientTypeStandard)
+ defer close()
+
+ err := rc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ err = rc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := rc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\"", "data", data)
+ }
+}
+
+func BenchmarkCache_Retrieve(b *testing.B) {
+ rc, close := storeBenchmark(b)
+ defer close()
+
+ for n := 0; n < b.N; n++ {
+ data, ls, err := rc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err != nil {
+ b.Error(err)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+ if string(data) != "data"+strconv.Itoa(n) {
+ b.Errorf("wanted \"%s\". got \"%s\".", "data"+strconv.Itoa(n), data)
+ }
+ }
+}
+
+func TestRedisCache_Close(t *testing.T) {
+ rc, close := setupRedisCache(clientTypeStandard)
+ defer close()
+
+ err := rc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should close
+ err = rc.Close()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestCache_Remove(t *testing.T) {
+
+ rc, close := setupRedisCache(clientTypeStandard)
+ defer close()
+
+ err := rc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer rc.Close()
+
+ // it should store a value
+ err = rc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := rc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ rc.Remove(cacheKey)
+
+ // it should be a cache miss
+ _, ls, err = rc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+}
+
+func BenchmarkCache_Remove(b *testing.B) {
+ rc, close := storeBenchmark(b)
+ defer close()
+
+ for n := 0; n < b.N; n++ {
+ var data []byte
+ data, ls, err := rc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err != nil {
+ b.Error(err)
+ }
+ if string(data) != "data"+strconv.Itoa(n) {
+ b.Errorf("wanted \"%s\". got \"%s\"", "data"+strconv.Itoa(n), data)
+ }
+ if ls != status.LookupStatusHit {
+ b.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ rc.Remove(cacheKey + strconv.Itoa(n))
+
+ _, ls, err = rc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err == nil {
+ b.Errorf("expected key not found error for %s", cacheKey+strconv.Itoa(n))
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ }
+}
+
+func TestCache_BulkRemove(t *testing.T) {
+
+ rc, close := setupRedisCache(clientTypeStandard)
+ defer close()
+
+ err := rc.Connect()
+ if err != nil {
+ t.Error(err)
+ }
+ defer rc.Close()
+
+ // it should store a value
+ err = rc.Store(cacheKey, []byte("data"), time.Duration(60)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // it should retrieve a value
+ data, ls, err := rc.Retrieve(cacheKey, false)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(data) != "data" {
+ t.Errorf("wanted \"%s\". got \"%s\".", "data", data)
+ }
+ if ls != status.LookupStatusHit {
+ t.Errorf("expected %s got %s", status.LookupStatusHit, ls)
+ }
+
+ rc.BulkRemove([]string{cacheKey}, true)
+
+ // it should be a cache miss
+ _, ls, err = rc.Retrieve(cacheKey, false)
+ if err == nil {
+ t.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+}
+
+func BenchmarkCache_BulkRemove(b *testing.B) {
+ rc, close := storeBenchmark(b)
+ defer close()
+
+ var keyArray []string
+ for n := 0; n < b.N; n++ {
+ keyArray = append(keyArray, cacheKey+strconv.Itoa(n))
+ }
+
+ rc.BulkRemove(keyArray, true)
+
+ // it should be a cache miss
+ for n := 0; n < b.N; n++ {
+ _, ls, err := rc.Retrieve(cacheKey+strconv.Itoa(n), false)
+ if err == nil {
+ b.Errorf("expected key not found error for %s", cacheKey)
+ }
+ if ls != status.LookupStatusKeyMiss {
+ b.Errorf("expected %s got %s", status.LookupStatusKeyMiss, ls)
+ }
+ }
+}
diff --git a/internal/cache/redis/sentinel.go b/internal/cache/redis/sentinel.go
new file mode 100644
index 000000000..794d0cffc
--- /dev/null
+++ b/internal/cache/redis/sentinel.go
@@ -0,0 +1,94 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package redis
+
+import (
+ "fmt"
+
+ "github.com/go-redis/redis"
+)
+
+func (c *Cache) sentinelOpts() (*redis.FailoverOptions, error) {
+
+ if len(c.Config.Redis.Endpoints) == 0 {
+ return nil, fmt.Errorf("Invalid 'endpoints' config")
+ }
+
+ if c.Config.Redis.SentinelMaster == "" {
+ return nil, fmt.Errorf("Invalid 'sentinel_master' config")
+ }
+
+ o := &redis.FailoverOptions{
+ SentinelAddrs: c.Config.Redis.Endpoints,
+ MasterName: c.Config.Redis.SentinelMaster,
+ }
+
+ if c.Config.Redis.Password != "" {
+ o.Password = c.Config.Redis.Password
+ }
+
+ if c.Config.Redis.DB != 0 {
+ o.DB = c.Config.Redis.DB
+ }
+
+ if c.Config.Redis.MaxRetries != 0 {
+ o.MaxRetries = c.Config.Redis.MaxRetries
+ }
+
+ if c.Config.Redis.MinRetryBackoffMS != 0 {
+ o.MinRetryBackoff = durationFromMS(c.Config.Redis.MinRetryBackoffMS)
+ }
+
+ if c.Config.Redis.MaxRetryBackoffMS != 0 {
+ o.MaxRetryBackoff = durationFromMS(c.Config.Redis.MaxRetryBackoffMS)
+ }
+
+ if c.Config.Redis.DialTimeoutMS != 0 {
+ o.DialTimeout = durationFromMS(c.Config.Redis.DialTimeoutMS)
+ }
+
+ if c.Config.Redis.ReadTimeoutMS != 0 {
+ o.ReadTimeout = durationFromMS(c.Config.Redis.ReadTimeoutMS)
+ }
+
+ if c.Config.Redis.WriteTimeoutMS != 0 {
+ o.WriteTimeout = durationFromMS(c.Config.Redis.WriteTimeoutMS)
+ }
+
+ if c.Config.Redis.PoolSize != 0 {
+ o.PoolSize = c.Config.Redis.PoolSize
+ }
+
+ if c.Config.Redis.MinIdleConns != 0 {
+ o.MinIdleConns = c.Config.Redis.MinIdleConns
+ }
+
+ if c.Config.Redis.MaxConnAgeMS != 0 {
+ o.MaxConnAge = durationFromMS(c.Config.Redis.MaxConnAgeMS)
+ }
+
+ if c.Config.Redis.PoolTimeoutMS != 0 {
+ o.PoolTimeout = durationFromMS(c.Config.Redis.PoolTimeoutMS)
+ }
+
+ if c.Config.Redis.IdleTimeoutMS != 0 {
+ o.IdleTimeout = durationFromMS(c.Config.Redis.IdleTimeoutMS)
+ }
+
+ if c.Config.Redis.IdleCheckFrequencyMS != 0 {
+ o.IdleCheckFrequency = durationFromMS(c.Config.Redis.IdleCheckFrequencyMS)
+ }
+
+ return o, nil
+}
diff --git a/internal/cache/redis/standard.go b/internal/cache/redis/standard.go
new file mode 100644
index 000000000..cfe2a1ac8
--- /dev/null
+++ b/internal/cache/redis/standard.go
@@ -0,0 +1,93 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package redis
+
+import (
+ "fmt"
+
+ "github.com/go-redis/redis"
+)
+
+func (c *Cache) clientOpts() (*redis.Options, error) {
+
+ if c.Config.Redis.Endpoint == "" {
+ return nil, fmt.Errorf("invalid endpoint: %s", c.Config.Redis.Endpoint)
+ }
+
+ o := &redis.Options{
+ Addr: c.Config.Redis.Endpoint,
+ }
+
+ if c.Config.Redis.Protocol != "" {
+ o.Network = c.Config.Redis.Protocol
+ }
+
+ if c.Config.Redis.Password != "" {
+ o.Password = c.Config.Redis.Password
+ }
+
+ if c.Config.Redis.DB != 0 {
+ o.DB = c.Config.Redis.DB
+ }
+
+ if c.Config.Redis.MaxRetries != 0 {
+ o.MaxRetries = c.Config.Redis.MaxRetries
+ }
+
+ if c.Config.Redis.MinRetryBackoffMS != 0 {
+ o.MinRetryBackoff = durationFromMS(c.Config.Redis.MinRetryBackoffMS)
+ }
+
+ if c.Config.Redis.MaxRetryBackoffMS != 0 {
+ o.MaxRetryBackoff = durationFromMS(c.Config.Redis.MaxRetryBackoffMS)
+ }
+
+ if c.Config.Redis.DialTimeoutMS != 0 {
+ o.DialTimeout = durationFromMS(c.Config.Redis.DialTimeoutMS)
+ }
+
+ if c.Config.Redis.ReadTimeoutMS != 0 {
+ o.ReadTimeout = durationFromMS(c.Config.Redis.ReadTimeoutMS)
+ }
+
+ if c.Config.Redis.WriteTimeoutMS != 0 {
+ o.WriteTimeout = durationFromMS(c.Config.Redis.WriteTimeoutMS)
+ }
+
+ if c.Config.Redis.PoolSize != 0 {
+ o.PoolSize = c.Config.Redis.PoolSize
+ }
+
+ if c.Config.Redis.MinIdleConns != 0 {
+ o.MinIdleConns = c.Config.Redis.MinIdleConns
+ }
+
+ if c.Config.Redis.MaxConnAgeMS != 0 {
+ o.MaxConnAge = durationFromMS(c.Config.Redis.MaxConnAgeMS)
+ }
+
+ if c.Config.Redis.PoolTimeoutMS != 0 {
+ o.PoolTimeout = durationFromMS(c.Config.Redis.PoolTimeoutMS)
+ }
+
+ if c.Config.Redis.IdleTimeoutMS != 0 {
+ o.IdleTimeout = durationFromMS(c.Config.Redis.IdleTimeoutMS)
+ }
+
+ if c.Config.Redis.IdleCheckFrequencyMS != 0 {
+ o.IdleCheckFrequency = durationFromMS(c.Config.Redis.IdleCheckFrequencyMS)
+ }
+
+ return o, nil
+}
diff --git a/internal/cache/registration/registration.go b/internal/cache/registration/registration.go
new file mode 100644
index 000000000..afba3e826
--- /dev/null
+++ b/internal/cache/registration/registration.go
@@ -0,0 +1,76 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package registration
+
+import (
+ "fmt"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/cache/badger"
+ "github.com/Comcast/trickster/internal/cache/bbolt"
+ "github.com/Comcast/trickster/internal/cache/filesystem"
+ "github.com/Comcast/trickster/internal/cache/memory"
+ "github.com/Comcast/trickster/internal/cache/redis"
+ "github.com/Comcast/trickster/internal/config"
+)
+
+// Cache Interface Types
+const (
+ ctFilesystem = "filesystem"
+ ctRedis = "redis"
+ ctBBolt = "bbolt"
+ ctBadger = "badger"
+)
+
+// Caches maintains a list of active caches
+var Caches = make(map[string]cache.Cache)
+
+// GetCache returns the Cache named cacheName if it exists
+func GetCache(cacheName string) (cache.Cache, error) {
+ if c, ok := Caches[cacheName]; ok {
+ return c, nil
+ }
+ return nil, fmt.Errorf("Could not find Cache named [%s]", cacheName)
+}
+
+// LoadCachesFromConfig iterates the Caching Confi and Connects/Maps each Cache
+func LoadCachesFromConfig() {
+ for k, v := range config.Caches {
+ c := NewCache(k, v)
+ Caches[k] = c
+ }
+}
+
+// NewCache returns a Cache object based on the provided config.CachingConfig
+func NewCache(cacheName string, cfg *config.CachingConfig) cache.Cache {
+
+ var c cache.Cache
+
+ switch cfg.CacheType {
+ case ctFilesystem:
+ c = &filesystem.Cache{Name: cacheName, Config: cfg}
+ case ctRedis:
+ c = &redis.Cache{Name: cacheName, Config: cfg}
+ case ctBBolt:
+ c = &bbolt.Cache{Name: cacheName, Config: cfg}
+ case ctBadger:
+ c = &badger.Cache{Name: cacheName, Config: cfg}
+ default:
+ // Default to MemoryCache
+ c = &memory.Cache{Name: cacheName, Config: cfg}
+ }
+
+ c.Connect()
+ return c
+}
diff --git a/internal/cache/registration/registration_test.go b/internal/cache/registration/registration_test.go
new file mode 100644
index 000000000..5f7ce6625
--- /dev/null
+++ b/internal/cache/registration/registration_test.go
@@ -0,0 +1,109 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package registration
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+func init() {
+ metrics.Init()
+}
+
+func TestLoadCachesFromConfig(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-log-level", "debug", "-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ for key, v := range config.CacheTypeNames {
+ cfg := newCacheConfig(t, key)
+ config.Caches[key] = cfg
+ switch v {
+ case config.CacheTypeBbolt:
+ defer os.RemoveAll(cfg.BBolt.Filename)
+ case config.CacheTypeFilesystem:
+ defer os.RemoveAll(cfg.Filesystem.CachePath)
+ case config.CacheTypeBadgerDB:
+ defer os.RemoveAll(cfg.Badger.Directory)
+ }
+ }
+
+ LoadCachesFromConfig()
+ _, err = GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ for key := range config.CacheTypeNames {
+ _, err = GetCache(key)
+ if err != nil {
+ t.Error(err)
+ }
+ }
+
+ _, err = GetCache("foo")
+ if err == nil {
+ t.Errorf("expected error")
+ }
+
+}
+
+func newCacheConfig(t *testing.T, cacheType string) *config.CachingConfig {
+
+ bd := "."
+ fd := "."
+ var err error
+
+ ctid, ok := config.CacheTypeNames[cacheType]
+ if !ok {
+ ctid = config.CacheTypeMemory
+ }
+
+ switch ctid {
+ case config.CacheTypeBadgerDB:
+ bd, err = ioutil.TempDir("/tmp", cacheType)
+ if err != nil {
+ t.Error(err)
+ }
+
+ case config.CacheTypeFilesystem:
+ fd, err = ioutil.TempDir("/tmp", cacheType)
+ if err != nil {
+ t.Error(err)
+ }
+ }
+
+ return &config.CachingConfig{
+ CacheType: cacheType,
+ Redis: config.RedisCacheConfig{Protocol: "tcp", Endpoint: "redis:6379", Endpoints: []string{"redis:6379"}},
+ Filesystem: config.FilesystemCacheConfig{CachePath: fd},
+ BBolt: config.BBoltCacheConfig{Filename: "/tmp/test.db", Bucket: "trickster_test"},
+ Badger: config.BadgerCacheConfig{Directory: bd, ValueDirectory: bd},
+ Index: config.CacheIndexConfig{
+ ReapIntervalSecs: 3,
+ FlushIntervalSecs: 5,
+ MaxSizeBytes: 536870912,
+ MaxSizeBackoffBytes: 16777216,
+ MaxSizeObjects: 0,
+ MaxSizeBackoffObjects: 100,
+ },
+ }
+}
diff --git a/internal/cache/status/lookup_status.go b/internal/cache/status/lookup_status.go
new file mode 100644
index 000000000..b84bf40ca
--- /dev/null
+++ b/internal/cache/status/lookup_status.go
@@ -0,0 +1,79 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package status
+
+import "strconv"
+
+// LookupStatus defines the possible status of a cache lookup
+type LookupStatus int
+
+const (
+ // LookupStatusHit indicates a full cache hit on lookup
+ LookupStatusHit = LookupStatus(iota)
+ // LookupStatusPartialHit indicates a partial cache hit (key exists and has some data
+ // for requested time range, but not all) on lookup
+ LookupStatusPartialHit
+ // LookupStatusRangeMiss indicates a range miss (key exists but no data for requested time range) on lookup
+ LookupStatusRangeMiss
+ // LookupStatusKeyMiss indicates a full key miss (cache key does not exist) on lookup
+ LookupStatusKeyMiss
+ // LookupStatusRevalidated indicates the cached object execeeded the freshness lifetime but
+ // was revalidated against the upstream server and is treated as a cache hit
+ LookupStatusRevalidated
+ // LookupStatusPurge indicates the cache key, if it existed, was purged as directed
+ // in upstream response or down stream request http headers
+ LookupStatusPurge
+ // LookupStatusProxyError indicates that a proxy error occurred retrieving a cacheable dataset
+ // in upstream response or down stream request http headers
+ LookupStatusProxyError
+ // LookupStatusProxyOnly indicates that the request was fully proxied to the origin without using the cache
+ LookupStatusProxyOnly
+ // LookupStatusNegativeCacheHit indicates that the request was served as a hit from the Negative Response Cache
+ LookupStatusNegativeCacheHit
+ // LookupStatusError indicates that there was an error looking up the object in the cache
+ LookupStatusError
+)
+
+var cacheLookupStatusNames = map[string]LookupStatus{
+ "hit": LookupStatusHit,
+ "phit": LookupStatusPartialHit,
+ "rhit": LookupStatusRevalidated,
+ "rmiss": LookupStatusRangeMiss,
+ "kmiss": LookupStatusKeyMiss,
+ "purge": LookupStatusPurge,
+ "proxy-error": LookupStatusProxyError,
+ "proxy-only": LookupStatusProxyOnly,
+ "nchit": LookupStatusNegativeCacheHit,
+ "error": LookupStatusError,
+}
+
+var cacheLookupStatusValues = map[LookupStatus]string{
+ LookupStatusHit: "hit",
+ LookupStatusPartialHit: "phit",
+ LookupStatusRevalidated: "rhit",
+ LookupStatusRangeMiss: "rmiss",
+ LookupStatusKeyMiss: "kmiss",
+ LookupStatusPurge: "purge",
+ LookupStatusProxyError: "proxy-error",
+ LookupStatusProxyOnly: "proxy-only",
+ LookupStatusNegativeCacheHit: "nchit",
+ LookupStatusError: "error",
+}
+
+func (s LookupStatus) String() string {
+ if v, ok := cacheLookupStatusValues[s]; ok {
+ return v
+ }
+ return strconv.Itoa(int(s))
+}
diff --git a/internal/cache/status/lookup_status_test.go b/internal/cache/status/lookup_status_test.go
new file mode 100644
index 000000000..592fdac93
--- /dev/null
+++ b/internal/cache/status/lookup_status_test.go
@@ -0,0 +1,36 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package status
+
+import "testing"
+
+func TestLookupStatusString(t *testing.T) {
+
+ t1 := LookupStatusHit
+ t2 := LookupStatusKeyMiss
+
+ var t3 LookupStatus = 10
+
+ if t1.String() != "hit" {
+ t.Errorf("expected %s got %s", "hit", t1.String())
+ }
+
+ if t2.String() != "kmiss" {
+ t.Errorf("expected %s got %s", "kmiss", t2.String())
+ }
+
+ if t3.String() != "10" {
+ t.Errorf("expected %s got %s", "9", t3.String())
+ }
+}
diff --git a/internal/config/cache_type.go b/internal/config/cache_type.go
new file mode 100644
index 000000000..38ff5860f
--- /dev/null
+++ b/internal/config/cache_type.go
@@ -0,0 +1,57 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import "strconv"
+
+// CacheType enumerates the methodologies for maintaining time series cache data
+type CacheType int
+
+const (
+ // CacheTypeMemory indicates a memory cache
+ CacheTypeMemory = CacheType(iota)
+ // CacheTypeFilesystem indicates a filesystem cache
+ CacheTypeFilesystem
+ // CacheTypeRedis indicates a Redis cache
+ CacheTypeRedis
+ // CacheTypeBbolt indicates a Bbolt cache
+ CacheTypeBbolt
+ // CacheTypeBadgerDB indicates a BadgerDB cache
+ CacheTypeBadgerDB
+)
+
+// CacheTypeNames is a map of cache types keyed by name
+var CacheTypeNames = map[string]CacheType{
+ "memory": CacheTypeMemory,
+ "filesystem": CacheTypeFilesystem,
+ "redis": CacheTypeRedis,
+ "bbolt": CacheTypeBbolt,
+ "badger": CacheTypeBadgerDB,
+}
+
+// CacheTypeValues is a map of cache types keyed by internal id
+var CacheTypeValues = map[CacheType]string{
+ CacheTypeMemory: "memory",
+ CacheTypeFilesystem: "filesystem",
+ CacheTypeRedis: "redis",
+ CacheTypeBbolt: "bbolt",
+ CacheTypeBadgerDB: "badger",
+}
+
+func (t CacheType) String() string {
+ if v, ok := CacheTypeValues[t]; ok {
+ return v
+ }
+ return strconv.Itoa(int(t))
+}
diff --git a/internal/config/cache_type_test.go b/internal/config/cache_type_test.go
new file mode 100644
index 000000000..75f9488ab
--- /dev/null
+++ b/internal/config/cache_type_test.go
@@ -0,0 +1,38 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "testing"
+)
+
+func TestCacheTypeString(t *testing.T) {
+
+ t1 := CacheTypeMemory
+ t2 := CacheTypeFilesystem
+ var t3 CacheType = 13
+
+ if t1.String() != "memory" {
+ t.Errorf("expected %s got %s", "memory", t1.String())
+ }
+
+ if t2.String() != "filesystem" {
+ t.Errorf("expected %s got %s", "filesystem", t2.String())
+ }
+
+ if t3.String() != "13" {
+ t.Errorf("expected %s got %s", "13", t3.String())
+ }
+
+}
diff --git a/internal/config/coll_fwd_type.go b/internal/config/coll_fwd_type.go
new file mode 100644
index 000000000..b9b847575
--- /dev/null
+++ b/internal/config/coll_fwd_type.go
@@ -0,0 +1,45 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import "strconv"
+
+// CollapsedForwardingType enumerates the methodologies for maintaining time series cache data
+type CollapsedForwardingType int
+
+const (
+ // CFTypeBasic indicates a basic cache
+ CFTypeBasic = CollapsedForwardingType(iota)
+ // CFTypeProgressive indicates a progressive cache
+ CFTypeProgressive
+)
+
+// CollapsedForwardingTypeNames is a map of cache types keyed by name
+var CollapsedForwardingTypeNames = map[string]CollapsedForwardingType{
+ "basic": CFTypeBasic,
+ "progressive": CFTypeProgressive,
+}
+
+// CollapsedForwardingTypeValues is a map of cache types keyed by internal id
+var CollapsedForwardingTypeValues = map[CollapsedForwardingType]string{
+ CFTypeBasic: "basic",
+ CFTypeProgressive: "progressive",
+}
+
+func (t CollapsedForwardingType) String() string {
+ if v, ok := CollapsedForwardingTypeValues[t]; ok {
+ return v
+ }
+ return strconv.Itoa(int(t))
+}
diff --git a/internal/config/coll_fwd_type_test.go b/internal/config/coll_fwd_type_test.go
new file mode 100644
index 000000000..dafbad30d
--- /dev/null
+++ b/internal/config/coll_fwd_type_test.go
@@ -0,0 +1,38 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "testing"
+)
+
+func TestCollapsedForwardingTypeString(t *testing.T) {
+
+ t1 := CFTypeBasic
+ t2 := CFTypeProgressive
+ var t3 CollapsedForwardingType = 13
+
+ if t1.String() != "basic" {
+ t.Errorf("expected %s got %s", "basic", t1.String())
+ }
+
+ if t2.String() != "progressive" {
+ t.Errorf("expected %s got %s", "progressive", t2.String())
+ }
+
+ if t3.String() != "13" {
+ t.Errorf("expected %s got %s", "13", t3.String())
+ }
+
+}
diff --git a/internal/config/config.go b/internal/config/config.go
new file mode 100644
index 000000000..9d17a4861
--- /dev/null
+++ b/internal/config/config.go
@@ -0,0 +1,1028 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/BurntSushi/toml"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+// Config is the Running Configuration for Trickster
+var Config *TricksterConfig
+
+// Main is the Main subsection of the Running Configuration
+var Main *MainConfig
+
+// Origins is the Origin Map subsection of the Running Configuration
+var Origins map[string]*OriginConfig
+
+// Caches is the Cache Map subsection of the Running Configuration
+var Caches map[string]*CachingConfig
+
+// Frontend is the Proxy Server subsection of the Running Configuration
+var Frontend *FrontendConfig
+
+// Logging is the Logging subsection of the Running Configuration
+var Logging *LoggingConfig
+
+// Metrics is the Metrics subsection of the Running Configuration
+var Metrics *MetricsConfig
+
+// NegativeCacheConfigs is the NegativeCacheConfig subsection of the Running Configuration
+var NegativeCacheConfigs map[string]NegativeCacheConfig
+
+// Flags is a collection of command line flags that Trickster loads.
+var Flags = TricksterFlags{}
+var providedOriginURL string
+var providedOriginType string
+
+// LoaderWarnings holds warnings generated during config load (before the logger is initialized),
+// so they can be logged at the end of the loading process
+var LoaderWarnings = make([]string, 0)
+
+// TricksterConfig is the main configuration object
+type TricksterConfig struct {
+ // Main is the primary MainConfig section
+ Main *MainConfig `toml:"main"`
+ // Origins is a map of OriginConfigs
+ Origins map[string]*OriginConfig `toml:"origins"`
+ // Caches is a map of CacheConfigs
+ Caches map[string]*CachingConfig `toml:"caches"`
+ // ProxyServer is provides configurations about the Proxy Front End
+ Frontend *FrontendConfig `toml:"frontend"`
+ // Logging provides configurations that affect logging behavior
+ Logging *LoggingConfig `toml:"logging"`
+ // Metrics provides configurations for collecting Metrics about the application
+ Metrics *MetricsConfig `toml:"metrics"`
+ // NegativeCacheConfigs is a map of NegativeCacheConfigs
+ NegativeCacheConfigs map[string]NegativeCacheConfig `toml:"negative_caches"`
+
+ activeCaches map[string]bool
+}
+
+// MainConfig is a collection of general configuration values.
+type MainConfig struct {
+ // InstanceID represents a unique ID for the current instance, when multiple instances on the same host
+ InstanceID int `toml:"instance_id"`
+ // ConfigHandlerPath provides the path to register the Config Handler for outputting the running configuration
+ ConfigHandlerPath string `toml:"config_handler_path"`
+ // PingHandlerPath provides the path to register the Ping Handler for checking that Trickster is running
+ PingHandlerPath string `toml:"ping_handler_path"`
+}
+
+// OriginConfig is a collection of configurations for prometheus origins proxied by Trickster
+type OriginConfig struct {
+
+ // HTTP and Proxy Configurations
+ //
+ // IsDefault indicates if this is the default origin for any request not matching a configured route
+ IsDefault bool `toml:"is_default"`
+ // OriginType describes the type of origin (e.g., 'prometheus')
+ OriginType string `toml:"origin_type"`
+ // OriginURL provides the base upstream URL for all proxied requests to this origin.
+ // it can be as simple as http://example.com or as complex as https://example.com:8443/path/prefix
+ OriginURL string `toml:"origin_url"`
+ // TimeoutSecs defines how long the HTTP request will wait for a response before timing out
+ TimeoutSecs int64 `toml:"timeout_secs"`
+ // KeepAliveTimeoutSecs defines how long an open keep-alive HTTP connection remains idle before closing
+ KeepAliveTimeoutSecs int64 `toml:"keep_alive_timeout_secs"`
+ // MaxIdleConns defines maximum number of open keep-alive connections to maintain
+ MaxIdleConns int `toml:"max_idle_conns"`
+ // CacheName provides the name of the configured cache where the origin client will store it's cache data
+ CacheName string `toml:"cache_name"`
+ // CacheKeyPrefix defines the cache key prefix the origin will use when writing objects to the cache
+ CacheKeyPrefix string `toml:"cache_key_prefix"`
+ // HealthCheckUpstreamPath provides the URL path for the upstream health check
+ HealthCheckUpstreamPath string `toml:"health_check_upstream_path"`
+ // HealthCheckVerb provides the HTTP verb to use when making an upstream health check
+ HealthCheckVerb string `toml:"health_check_verb"`
+ // HealthCheckQuery provides the HTTP query parameters to use when making an upstream health check
+ HealthCheckQuery string `toml:"health_check_query"`
+ // HealthCheckHeaders provides the HTTP Headers to apply when making an upstream health check
+ HealthCheckHeaders map[string]string `toml:"health_check_headers"`
+ // Object Proxy Cache and Delta Proxy Cache Configurations
+ // TimeseriesRetentionFactor limits the maximum the number of chronological timestamps worth of data to store in cache for each query
+ TimeseriesRetentionFactor int `toml:"timeseries_retention_factor"`
+ // TimeseriesEvictionMethodName specifies which methodology ("oldest", "lru") is used to identify timeseries to evict from a full cache object
+ TimeseriesEvictionMethodName string `toml:"timeseries_eviction_method"`
+ // FastForwardDisable indicates whether the FastForward feature should be disabled for this origin
+ FastForwardDisable bool `toml:"fast_forward_disable"`
+ // BackfillToleranceSecs prevents values with timestamps newer than the provided number of seconds from being cached
+ // this allows propagation of upstream backfill operations that modify recently-served data
+ BackfillToleranceSecs int64 `toml:"backfill_tolerance_secs"`
+ // PathList is a list of PathConfigs that control the behavior of the given paths when requested
+ Paths map[string]*PathConfig `toml:"paths"`
+ // NegativeCacheName provides the name of the Negative Cache Config to be used by this Origin
+ NegativeCacheName string `toml:"negative_cache_name"`
+ // TimeseriesTTLSecs specifies the cache TTL of timeseries objects
+ TimeseriesTTLSecs int `toml:"timeseries_ttl_secs"`
+ // TimeseriesTTLSecs specifies the cache TTL of fast forward data
+ FastForwardTTLSecs int `toml:"fastforward_ttl_secs"`
+ // MaxTTLSecs specifies the maximum allowed TTL for any cache object
+ MaxTTLSecs int `toml:"max_ttl_secs"`
+ // RevalidationFactor specifies how many times to multiply the object freshness lifetime by to calculate an absolute cache TTL
+ RevalidationFactor float64 `toml:"revalidation_factor"`
+ // MaxObjectSizeBytes specifies the max objectsize to be accepted for any given cache object
+ MaxObjectSizeBytes int `toml:"max_object_size_bytes"`
+ // CompressableTypeList specifies the HTTP Object Content Types that will be compressed internally when stored in the Trickster cache
+ CompressableTypeList []string `toml:"compressable_types"`
+
+ // TLS is the TLS Configuration for the Frontend and Backend
+ TLS *TLSConfig `toml:"tls"`
+ // RequireTLS, when true, indicates this Origin Config's paths must only be registered with the TLS Router
+ RequireTLS bool `toml:"require_tls"`
+
+ // MultipartRangesDisabled, when true, indicates that if a downstream client requests multiple ranges in a single Range request,
+ // Trickster will instead request and return a 200 OK with the full object body
+ MultipartRangesDisabled bool `toml:"multipart_ranges_disabled"`
+ // DearticulateUpstreamRanges, when true, indicates that when Trickster requests multiple ranges from the origin,
+ // that they be requested as individual upstream requests instead of a single request that expects a multipart response
+ // this optimizes Trickster to request as few bytes as possible when fronting origins that only support single range requests
+ DearticulateUpstreamRanges bool `toml:"dearticulate_upstream_ranges"`
+
+ // Synthesized Configurations
+ // These configurations are parsed versions of those defined above, and are what Trickster uses internally
+ //
+ // Name is the Name of the origin, taken from the Key in the Origins map[string]*OriginConfig
+ Name string `toml:"-"`
+ // Timeout is the time.Duration representation of TimeoutSecs
+ Timeout time.Duration `toml:"-"`
+ // BackfillTolerance is the time.Duration representation of BackfillToleranceSecs
+ BackfillTolerance time.Duration `toml:"-"`
+ // ValueRetention is the time.Duration representation of ValueRetentionSecs
+ ValueRetention time.Duration `toml:"-"`
+ // Scheme is the layer 7 protocol indicator (e.g. 'http'), derived from OriginURL
+ Scheme string `toml:"-"`
+ // Host is the upstream hostname/IP[:port] the origin client will connect to when fetching uncached data, derived from OriginURL
+ Host string `toml:"-"`
+ // PathPrefix provides any prefix added to the front of the requested path when constructing the upstream request url, derived from OriginURL
+ PathPrefix string `toml:"-"`
+ // NegativeCache provides a map for the negative cache, with TTLs converted to time.Durations
+ NegativeCache map[int]time.Duration `toml:"-"`
+ // TimeseriesRetention when subtracted from time.Now() represents the oldest allowable timestamp in a timeseries when EvictionMethod is 'oldest'
+ TimeseriesRetention time.Duration `toml:"-"`
+ // TimeseriesEvictionMethod is the parsed value of TimeseriesEvictionMethodName
+ TimeseriesEvictionMethod TimeseriesEvictionMethod `toml:"-"`
+ // TimeseriesTTL is the parsed value of TimeseriesTTLSecs
+ TimeseriesTTL time.Duration `toml:"-"`
+ // FastForwardTTL is the parsed value of FastForwardTTL
+ FastForwardTTL time.Duration `toml:"-"`
+ // FastForwardPath is the PathConfig to use for upstream Fast Forward Requests
+ FastForwardPath *PathConfig `toml:"-"`
+ // MaxTTL is the parsed value of MaxTTLSecs
+ MaxTTL time.Duration `toml:"-"`
+ // HTTPClient is the Client used by trickster to communicate with this origin
+ HTTPClient *http.Client `toml:"-"`
+ // CompressableTypes is the map version of CompressableTypeList for fast lookup
+ CompressableTypes map[string]bool `toml:"-"`
+}
+
+// CachingConfig is a collection of defining the Trickster Caching Behavior
+type CachingConfig struct {
+ // Name is the Name of the cache, taken from the Key in the Caches map[string]*CacheConfig
+ Name string `toml:"-"`
+ // Type represents the type of cache that we wish to use: "boltdb", "memory", "filesystem", or "redis"
+ CacheType string `toml:"cache_type"`
+ // Index provides options for the Cache Index
+ Index CacheIndexConfig `toml:"index"`
+ // Redis provides options for Redis caching
+ Redis RedisCacheConfig `toml:"redis"`
+ // Filesystem provides options for Filesystem caching
+ Filesystem FilesystemCacheConfig `toml:"filesystem"`
+ // BBolt provides options for BBolt caching
+ BBolt BBoltCacheConfig `toml:"bbolt"`
+ // Badger provides options for BadgerDB caching
+ Badger BadgerCacheConfig `toml:"badger"`
+
+ // Synthetic Values
+
+ // CacheTypeID represents the internal constant for the provided CacheType string
+ // and is automatically populated at startup
+ CacheTypeID CacheType `toml:"-"`
+}
+
+// CacheIndexConfig defines the operation of the Cache Indexer
+type CacheIndexConfig struct {
+ // ReapIntervalSecs defines how long the Cache Index reaper sleeps between reap cycles
+ ReapIntervalSecs int `toml:"reap_interval_secs"`
+ // FlushIntervalSecs sets how often the Cache Index saves its metadata to the cache from application memory
+ FlushIntervalSecs int `toml:"flush_interval_secs"`
+ // MaxSizeBytes indicates how large the cache can grow in bytes before the Index evicts
+ // least-recently-accessed items.
+ MaxSizeBytes int64 `toml:"max_size_bytes"`
+ // MaxSizeBackoffBytes indicates how far below max_size_bytes the cache size must be
+ // to complete a byte-size-based eviction exercise.
+ MaxSizeBackoffBytes int64 `toml:"max_size_backoff_bytes"`
+ // MaxSizeObjects indicates how large the cache can grow in objects before the Index
+ // evicts least-recently-accessed items.
+ MaxSizeObjects int64 `toml:"max_size_objects"`
+ // MaxSizeBackoffObjects indicates how far under max_size_objects the cache size must
+ // be to complete object-size-based eviction exercise.
+ MaxSizeBackoffObjects int64 `toml:"max_size_backoff_objects"`
+
+ ReapInterval time.Duration `toml:"-"`
+ FlushInterval time.Duration `toml:"-"`
+}
+
+// RedisCacheConfig is a collection of Configurations for Connecting to Redis
+type RedisCacheConfig struct {
+ // ClientType defines the type of Redis Client ("standard", "cluster", "sentinel")
+ ClientType string `toml:"client_type"`
+ // Protocol represents the connection method (e.g., "tcp", "unix", etc.)
+ Protocol string `toml:"protocol"`
+ // Endpoint represents FQDN:port or IPAddress:Port of the Redis Endpoint
+ Endpoint string `toml:"endpoint"`
+ // Endpoints represents FQDN:port or IPAddress:Port collection of a Redis Cluster or Sentinel Nodes
+ Endpoints []string `toml:"endpoints"`
+ // Password can be set when using password protected redis instance.
+ Password string `toml:"password"`
+ // SentinelMaster should be set when using Redis Sentinel to indicate the Master Node
+ SentinelMaster string `toml:"sentinel_master"`
+ // DB is the Database to be selected after connecting to the server.
+ DB int `toml:"db"`
+ // MaxRetries is the maximum number of retries before giving up on the command
+ MaxRetries int `toml:"max_retries"`
+ // MinRetryBackoffMS is the minimum backoff between each retry.
+ MinRetryBackoffMS int `toml:"min_retry_backoff_ms"`
+ // MaxRetryBackoffMS is the Maximum backoff between each retry.
+ MaxRetryBackoffMS int `toml:"max_retry_backoff_ms"`
+ // DialTimeoutMS is the timeout for establishing new connections.
+ DialTimeoutMS int `toml:"dial_timeout_ms"`
+ // ReadTimeoutMS is the timeout for socket reads. If reached, commands will fail with a timeout instead of blocking.
+ ReadTimeoutMS int `toml:"read_timeout_ms"`
+ // WriteTimeoutMS is the timeout for socket writes. If reached, commands will fail with a timeout instead of blocking.
+ WriteTimeoutMS int `toml:"write_timeout_ms"`
+ // PoolSize is the maximum number of socket connections.
+ PoolSize int `toml:"pool_size"`
+ // MinIdleConns is the minimum number of idle connections which is useful when establishing new connection is slow.
+ MinIdleConns int `toml:"min_idle_conns"`
+ // MaxConnAgeMS is the connection age at which client retires (closes) the connection.
+ MaxConnAgeMS int `toml:"max_conn_age_ms"`
+ // PoolTimeoutMS is the amount of time client waits for connection if all connections are busy before returning an error.
+ PoolTimeoutMS int `toml:"pool_timeout_ms"`
+ // IdleTimeoutMS is the amount of time after which client closes idle connections.
+ IdleTimeoutMS int `toml:"idle_timeout_ms"`
+ // IdleCheckFrequencyMS is the frequency of idle checks made by idle connections reaper.
+ IdleCheckFrequencyMS int `toml:"idle_check_frequency_ms"`
+}
+
+// BadgerCacheConfig is a collection of Configurations for storing cached data on the Filesystem in a Badger key-value store
+type BadgerCacheConfig struct {
+ // Directory represents the path on disk where the Badger database should store data
+ Directory string `toml:"directory"`
+ // ValueDirectory represents the path on disk where the Badger database will store its value log.
+ ValueDirectory string `toml:"value_directory"`
+}
+
+// BBoltCacheConfig is a collection of Configurations for storing cached data on the Filesystem
+type BBoltCacheConfig struct {
+ // Filename represents the filename (including path) of the BotlDB database
+ Filename string `toml:"filename"`
+ // Bucket represents the name of the bucket within BBolt under which Trickster's keys will be stored.
+ Bucket string `toml:"bucket"`
+}
+
+// FilesystemCacheConfig is a collection of Configurations for storing cached data on the Filesystem
+type FilesystemCacheConfig struct {
+ // CachePath represents the path on disk where our cache will live
+ CachePath string `toml:"cache_path"`
+}
+
+// FrontendConfig is a collection of configurations for the main http frontend for the application
+type FrontendConfig struct {
+ // ListenAddress is IP address for the main http listener for the application
+ ListenAddress string `toml:"listen_address"`
+ // ListenPort is TCP Port for the main http listener for the application
+ ListenPort int `toml:"listen_port"`
+ // TLSListenAddress is IP address for the tls http listener for the application
+ TLSListenAddress string `toml:"tls_listen_address"`
+ // TLSListenPort is the TCP Port for the tls http listener for the application
+ TLSListenPort int `toml:"tls_listen_port"`
+ // ConnectionsLimit indicates how many concurrent front end connections trickster will handle at any time
+ ConnectionsLimit int `toml:"connections_limit"`
+
+ // ServeTLS indicates whether to listen and serve on the TLS port, meaning
+ // at least one origin configuration has a valid certificate and key file configured.
+ ServeTLS bool `toml:"-"`
+}
+
+// LoggingConfig is a collection of Logging configurations
+type LoggingConfig struct {
+ // LogFile provides the filepath to the instances's logfile. Set as empty string to Log to Console
+ LogFile string `toml:"log_file"`
+ // LogLevel provides the most granular level (e.g., DEBUG, INFO, ERROR) to log
+ LogLevel string `toml:"log_level"`
+}
+
+// MetricsConfig is a collection of Metrics Collection configurations
+type MetricsConfig struct {
+ // ListenAddress is IP address from which the Application Metrics are available for pulling at /metrics
+ ListenAddress string `toml:"listen_address"`
+ // ListenPort is TCP Port from which the Application Metrics are available for pulling at /metrics
+ ListenPort int `toml:"listen_port"`
+}
+
+// NegativeCacheConfig is a collection of response codes and their TTLs
+type NegativeCacheConfig map[string]int
+
+// Clone returns an exact copy of a NegativeCacheConfig
+func (nc NegativeCacheConfig) Clone() NegativeCacheConfig {
+ nc2 := make(NegativeCacheConfig)
+ for k, v := range nc {
+ nc2[k] = v
+ }
+ return nc2
+}
+
+// NewConfig returns a Config initialized with default values.
+func NewConfig() *TricksterConfig {
+ return &TricksterConfig{
+ Caches: map[string]*CachingConfig{
+ "default": NewCacheConfig(),
+ },
+ Logging: &LoggingConfig{
+ LogFile: defaultLogFile,
+ LogLevel: defaultLogLevel,
+ },
+ Main: &MainConfig{
+ ConfigHandlerPath: defaultConfigHandlerPath,
+ PingHandlerPath: defaultPingHandlerPath,
+ },
+ Metrics: &MetricsConfig{
+ ListenPort: defaultMetricsListenPort,
+ },
+ Origins: map[string]*OriginConfig{
+ "default": NewOriginConfig(),
+ },
+ Frontend: &FrontendConfig{
+ ListenPort: defaultProxyListenPort,
+ },
+ NegativeCacheConfigs: map[string]NegativeCacheConfig{
+ "default": NewNegativeCacheConfig(),
+ },
+ }
+}
+
+// NewNegativeCacheConfig returns an empty NegativeCacheConfig
+func NewNegativeCacheConfig() NegativeCacheConfig {
+ return NegativeCacheConfig{}
+}
+
+// NewCacheConfig will return a pointer to an OriginConfig with the default configuration settings
+func NewCacheConfig() *CachingConfig {
+
+ return &CachingConfig{
+ CacheType: defaultCacheType,
+ CacheTypeID: defaultCacheTypeID,
+ Redis: RedisCacheConfig{ClientType: defaultRedisClientType, Protocol: defaultRedisProtocol, Endpoint: defaultRedisEndpoint, Endpoints: []string{defaultRedisEndpoint}},
+ Filesystem: FilesystemCacheConfig{CachePath: defaultCachePath},
+ BBolt: BBoltCacheConfig{Filename: defaultBBoltFile, Bucket: defaultBBoltBucket},
+ Badger: BadgerCacheConfig{Directory: defaultCachePath, ValueDirectory: defaultCachePath},
+ Index: CacheIndexConfig{
+ ReapIntervalSecs: defaultCacheIndexReap,
+ FlushIntervalSecs: defaultCacheIndexFlush,
+ MaxSizeBytes: defaultCacheMaxSizeBytes,
+ MaxSizeBackoffBytes: defaultMaxSizeBackoffBytes,
+ MaxSizeObjects: defaultMaxSizeObjects,
+ MaxSizeBackoffObjects: defaultMaxSizeBackoffObjects,
+ },
+ }
+}
+
+// NewOriginConfig will return a pointer to an OriginConfig with the default configuration settings
+func NewOriginConfig() *OriginConfig {
+ return &OriginConfig{
+ BackfillTolerance: defaultBackfillToleranceSecs,
+ BackfillToleranceSecs: defaultBackfillToleranceSecs,
+ CacheName: defaultOriginCacheName,
+ CacheKeyPrefix: "",
+ HealthCheckQuery: defaultHealthCheckQuery,
+ HealthCheckUpstreamPath: defaultHealthCheckPath,
+ HealthCheckVerb: defaultHealthCheckVerb,
+ HealthCheckHeaders: make(map[string]string),
+ KeepAliveTimeoutSecs: defaultKeepAliveTimeoutSecs,
+ MaxIdleConns: defaultMaxIdleConns,
+ NegativeCache: make(map[int]time.Duration),
+ NegativeCacheName: defaultOriginNegativeCacheName,
+ Paths: make(map[string]*PathConfig),
+ Timeout: time.Second * defaultOriginTimeoutSecs,
+ TimeoutSecs: defaultOriginTimeoutSecs,
+ TimeseriesEvictionMethod: defaultOriginTEM,
+ TimeseriesEvictionMethodName: defaultOriginTEMName,
+ TimeseriesRetention: defaultOriginTRF,
+ TimeseriesRetentionFactor: defaultOriginTRF,
+ TimeseriesTTLSecs: defaultTimeseriesTTLSecs,
+ FastForwardTTLSecs: defaultFastForwardTTLSecs,
+ TimeseriesTTL: defaultTimeseriesTTLSecs * time.Second,
+ FastForwardTTL: defaultFastForwardTTLSecs * time.Second,
+ MaxTTLSecs: defaultMaxTTLSecs,
+ MaxTTL: defaultMaxTTLSecs * time.Second,
+ RevalidationFactor: defaultRevalidationFactor,
+ MaxObjectSizeBytes: defaultMaxObjectSizeBytes,
+ TLS: &TLSConfig{},
+ CompressableTypeList: defaultCompressableTypes(),
+ }
+}
+
+// loadFile loads application configuration from a TOML-formatted file.
+func (c *TricksterConfig) loadFile() error {
+ md, err := toml.DecodeFile(Flags.ConfigPath, c)
+ if err != nil {
+ c.setDefaults(&toml.MetaData{})
+ return err
+ }
+ err = c.setDefaults(&md)
+ return err
+}
+
+func (c *TricksterConfig) setDefaults(metadata *toml.MetaData) error {
+
+ c.processOriginConfigs(metadata)
+ c.processCachingConfigs(metadata)
+ err := c.validateConfigMappings()
+ if err != nil {
+ return err
+ }
+
+ err = c.verifyTLSConfigs()
+
+ return err
+}
+
+var pathMembers = []string{"path", "match_type", "handler", "methods", "cache_key_params", "cache_key_headers", "default_ttl_secs",
+ "request_headers", "response_headers", "response_headers", "response_code", "response_body", "no_metrics", "progressive_collapsed_forwarding"}
+
+func (c *TricksterConfig) validateConfigMappings() error {
+ for k, oc := range c.Origins {
+ if _, ok := c.Caches[oc.CacheName]; !ok {
+ return fmt.Errorf("invalid cache name [%s] provided in origin config [%s]", oc.CacheName, k)
+ }
+ }
+ return nil
+}
+
+func (c *TricksterConfig) processOriginConfigs(metadata *toml.MetaData) {
+
+ c.activeCaches = make(map[string]bool)
+
+ for k, v := range c.Origins {
+
+ oc := NewOriginConfig()
+ oc.Name = k
+
+ if metadata.IsDefined("origins", k, "origin_type") {
+ oc.OriginType = v.OriginType
+ }
+
+ if metadata.IsDefined("origins", k, "is_default") {
+ oc.IsDefault = v.IsDefault
+ }
+ // If there is only one origin and is_default is not explicitly false, make it true
+ if len(c.Origins) == 1 && (!metadata.IsDefined("origins", k, "is_default")) {
+ oc.IsDefault = true
+ }
+
+ if metadata.IsDefined("origins", k, "require_tls") {
+ oc.RequireTLS = v.RequireTLS
+ }
+
+ if metadata.IsDefined("origins", k, "cache_name") {
+ oc.CacheName = v.CacheName
+ }
+ c.activeCaches[oc.CacheName] = true
+
+ if metadata.IsDefined("origins", k, "cache_key_prefix") {
+ oc.CacheKeyPrefix = v.CacheKeyPrefix
+ }
+
+ if metadata.IsDefined("origins", k, "origin_url") {
+ oc.OriginURL = v.OriginURL
+ }
+
+ if metadata.IsDefined("origins", k, "compressable_types") {
+ oc.CompressableTypeList = v.CompressableTypeList
+ }
+
+ if metadata.IsDefined("origins", k, "timeout_secs") {
+ oc.TimeoutSecs = v.TimeoutSecs
+ }
+
+ if metadata.IsDefined("origins", k, "max_idle_conns") {
+ oc.MaxIdleConns = v.MaxIdleConns
+ }
+
+ if metadata.IsDefined("origins", k, "keep_alive_timeout_secs") {
+ oc.KeepAliveTimeoutSecs = v.KeepAliveTimeoutSecs
+ }
+
+ if metadata.IsDefined("origins", k, "timeseries_retention_factor") {
+ oc.TimeseriesRetentionFactor = v.TimeseriesRetentionFactor
+ }
+
+ if metadata.IsDefined("origins", k, "timeseries_eviction_method") {
+ oc.TimeseriesEvictionMethodName = strings.ToLower(v.TimeseriesEvictionMethodName)
+ if p, ok := timeseriesEvictionMethodNames[oc.TimeseriesEvictionMethodName]; ok {
+ oc.TimeseriesEvictionMethod = p
+ }
+ }
+
+ if metadata.IsDefined("origins", k, "timeseries_ttl_secs") {
+ oc.TimeseriesTTLSecs = v.TimeseriesTTLSecs
+ }
+
+ if metadata.IsDefined("origins", k, "max_ttl_secs") {
+ oc.MaxTTLSecs = v.MaxTTLSecs
+ }
+
+ if metadata.IsDefined("origins", k, "fastforward_ttl_secs") {
+ oc.FastForwardTTLSecs = v.FastForwardTTLSecs
+ }
+
+ if metadata.IsDefined("origins", k, "fast_forward_disable") {
+ oc.FastForwardDisable = v.FastForwardDisable
+ }
+
+ if metadata.IsDefined("origins", k, "backfill_tolerance_secs") {
+ oc.BackfillToleranceSecs = v.BackfillToleranceSecs
+ }
+
+ if metadata.IsDefined("origins", k, "paths") {
+ var j = 0
+ for l, p := range v.Paths {
+ if len(p.Methods) == 0 {
+ p.Methods = []string{http.MethodGet, http.MethodHead}
+ }
+ p.custom = make([]string, 0)
+ for _, pm := range pathMembers {
+ if metadata.IsDefined("origins", k, "paths", l, pm) {
+ p.custom = append(p.custom, pm)
+ }
+ }
+ if metadata.IsDefined("origins", k, "paths", l, "response_body") {
+ p.ResponseBodyBytes = []byte(p.ResponseBody)
+ p.HasCustomResponseBody = true
+ }
+
+ if mt, ok := pathMatchTypeNames[strings.ToLower(p.MatchTypeName)]; ok {
+ p.MatchType = mt
+ p.MatchTypeName = p.MatchType.String()
+ } else {
+ p.MatchType = PathMatchTypeExact
+ p.MatchTypeName = p.MatchType.String()
+ }
+ oc.Paths[p.Path+"-"+strings.Join(p.Methods, "-")] = p
+ j++
+ }
+ }
+
+ if metadata.IsDefined("origins", k, "negative_cache_name") {
+ oc.NegativeCacheName = v.NegativeCacheName
+ }
+
+ if metadata.IsDefined("origins", k, "health_check_upstream_path") {
+ oc.HealthCheckUpstreamPath = v.HealthCheckUpstreamPath
+ }
+
+ if metadata.IsDefined("origins", k, "health_check_verb") {
+ oc.HealthCheckVerb = v.HealthCheckVerb
+ }
+
+ if metadata.IsDefined("origins", k, "health_check_query") {
+ oc.HealthCheckQuery = v.HealthCheckQuery
+ }
+
+ if metadata.IsDefined("origins", k, "health_check_headers") {
+ oc.HealthCheckHeaders = v.HealthCheckHeaders
+ }
+
+ if metadata.IsDefined("origins", k, "max_object_size_bytes") {
+ oc.MaxObjectSizeBytes = v.MaxObjectSizeBytes
+ }
+
+ if metadata.IsDefined("origins", k, "revalidation_factor") {
+ oc.RevalidationFactor = v.RevalidationFactor
+ }
+
+ if metadata.IsDefined("origins", k, "multipart_ranges_disabled") {
+ oc.MultipartRangesDisabled = v.MultipartRangesDisabled
+ }
+
+ if metadata.IsDefined("origins", k, "dearticulate_upstream_ranges") {
+ oc.DearticulateUpstreamRanges = v.DearticulateUpstreamRanges
+ }
+
+ if metadata.IsDefined("origins", k, "tls") {
+ oc.TLS = &TLSConfig{
+ InsecureSkipVerify: v.TLS.InsecureSkipVerify,
+ CertificateAuthorityPaths: v.TLS.CertificateAuthorityPaths,
+ PrivateKeyPath: v.TLS.PrivateKeyPath,
+ FullChainCertPath: v.TLS.FullChainCertPath,
+ ClientCertPath: v.TLS.ClientCertPath,
+ ClientKeyPath: v.TLS.ClientKeyPath,
+ }
+ }
+
+ c.Origins[k] = oc
+ }
+}
+
+func (c *TricksterConfig) processCachingConfigs(metadata *toml.MetaData) {
+
+ // setCachingDefaults assumes that processOriginConfigs was just ran
+
+ for k, v := range c.Caches {
+
+ if _, ok := c.activeCaches[k]; !ok {
+ // a configured cache was not used by any origin. don't even instantiate it
+ delete(c.Caches, k)
+ continue
+ }
+
+ cc := NewCacheConfig()
+ cc.Name = k
+
+ if metadata.IsDefined("caches", k, "cache_type") {
+ cc.CacheType = strings.ToLower(v.CacheType)
+ if n, ok := CacheTypeNames[cc.CacheType]; ok {
+ cc.CacheTypeID = n
+ }
+ }
+
+ if metadata.IsDefined("caches", k, "index", "reap_interval_secs") {
+ cc.Index.ReapIntervalSecs = v.Index.ReapIntervalSecs
+ }
+
+ if metadata.IsDefined("caches", k, "index", "flush_interval_secs") {
+ cc.Index.FlushIntervalSecs = v.Index.FlushIntervalSecs
+ }
+
+ if metadata.IsDefined("caches", k, "index", "max_size_bytes") {
+ cc.Index.MaxSizeBytes = v.Index.MaxSizeBytes
+ }
+
+ if metadata.IsDefined("caches", k, "index", "max_size_backoff_bytes") {
+ cc.Index.MaxSizeBackoffBytes = v.Index.MaxSizeBackoffBytes
+ }
+
+ if metadata.IsDefined("caches", k, "index", "max_size_objects") {
+ cc.Index.MaxSizeObjects = v.Index.MaxSizeObjects
+ }
+
+ if metadata.IsDefined("caches", k, "index", "max_size_backoff_objects") {
+ cc.Index.MaxSizeBackoffObjects = v.Index.MaxSizeBackoffObjects
+ }
+
+ if cc.CacheTypeID == CacheTypeRedis {
+
+ var hasEndpoint, hasEndpoints bool
+
+ ct := strings.ToLower(v.Redis.ClientType)
+ if metadata.IsDefined("caches", k, "redis", "client_type") {
+ cc.Redis.ClientType = ct
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "protocol") {
+ cc.Redis.Protocol = v.Redis.Protocol
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "endpoint") {
+ cc.Redis.Endpoint = v.Redis.Endpoint
+ hasEndpoint = true
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "endpoints") {
+ cc.Redis.Endpoints = v.Redis.Endpoints
+ hasEndpoints = true
+ }
+
+ if cc.Redis.ClientType == "standard" {
+ if hasEndpoints && !hasEndpoint {
+ LoaderWarnings = append(LoaderWarnings, "'standard' redis type configured, but 'endpoints' value is provided instead of 'endpoint'")
+ }
+ } else {
+ if hasEndpoint && !hasEndpoints {
+ LoaderWarnings = append(LoaderWarnings, fmt.Sprintf("'%s' redis type configured, but 'endpoint' value is provided instead of 'endpoints'", cc.Redis.ClientType))
+ }
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "sentinel_master") {
+ cc.Redis.SentinelMaster = v.Redis.SentinelMaster
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "password") {
+ cc.Redis.Password = v.Redis.Password
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "db") {
+ cc.Redis.DB = v.Redis.DB
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "max_retries") {
+ cc.Redis.MaxRetries = v.Redis.MaxRetries
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "min_retry_backoff_ms") {
+ cc.Redis.MinRetryBackoffMS = v.Redis.MinRetryBackoffMS
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "max_retry_backoff_ms") {
+ cc.Redis.MaxRetryBackoffMS = v.Redis.MaxRetryBackoffMS
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "dial_timeout_ms") {
+ cc.Redis.DialTimeoutMS = v.Redis.DialTimeoutMS
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "read_timeout_ms") {
+ cc.Redis.ReadTimeoutMS = v.Redis.ReadTimeoutMS
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "write_timeout_ms") {
+ cc.Redis.WriteTimeoutMS = v.Redis.WriteTimeoutMS
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "pool_size") {
+ cc.Redis.PoolSize = v.Redis.PoolSize
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "min_idle_conns") {
+ cc.Redis.MinIdleConns = v.Redis.MinIdleConns
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "max_conn_age_ms") {
+ cc.Redis.MaxConnAgeMS = v.Redis.MaxConnAgeMS
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "pool_timeout_ms") {
+ cc.Redis.PoolTimeoutMS = v.Redis.PoolTimeoutMS
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "idle_timeout_ms") {
+ cc.Redis.IdleTimeoutMS = v.Redis.IdleTimeoutMS
+ }
+
+ if metadata.IsDefined("caches", k, "redis", "idle_check_frequency_ms") {
+ cc.Redis.IdleCheckFrequencyMS = v.Redis.IdleCheckFrequencyMS
+ }
+ }
+
+ if metadata.IsDefined("caches", k, "filesystem", "cache_path") {
+ cc.Filesystem.CachePath = v.Filesystem.CachePath
+ }
+
+ if metadata.IsDefined("caches", k, "bbolt", "filename") {
+ cc.BBolt.Filename = v.BBolt.Filename
+ }
+
+ if metadata.IsDefined("caches", k, "bbolt", "bucket") {
+ cc.BBolt.Bucket = v.BBolt.Bucket
+ }
+
+ if metadata.IsDefined("caches", k, "badger", "directory") {
+ cc.Badger.Directory = v.Badger.Directory
+ }
+
+ if metadata.IsDefined("caches", k, "badger", "value_directory") {
+ cc.Badger.ValueDirectory = v.Badger.ValueDirectory
+ }
+
+ c.Caches[k] = cc
+ }
+}
+
+func (c *TricksterConfig) copy() *TricksterConfig {
+
+ nc := NewConfig()
+ delete(nc.Caches, "default")
+ delete(nc.Origins, "default")
+
+ nc.Main.ConfigHandlerPath = c.Main.ConfigHandlerPath
+ nc.Main.InstanceID = c.Main.InstanceID
+ nc.Main.PingHandlerPath = c.Main.PingHandlerPath
+
+ nc.Logging.LogFile = c.Logging.LogFile
+ nc.Logging.LogLevel = c.Logging.LogLevel
+
+ nc.Metrics.ListenAddress = c.Metrics.ListenAddress
+ nc.Metrics.ListenPort = c.Metrics.ListenPort
+
+ nc.Frontend.ListenAddress = c.Frontend.ListenAddress
+ nc.Frontend.ListenPort = c.Frontend.ListenPort
+ nc.Frontend.TLSListenAddress = c.Frontend.TLSListenAddress
+ nc.Frontend.TLSListenPort = c.Frontend.TLSListenPort
+ nc.Frontend.ConnectionsLimit = c.Frontend.ConnectionsLimit
+ nc.Frontend.ServeTLS = c.Frontend.ServeTLS
+
+ for k, v := range c.Origins {
+ nc.Origins[k] = v.Clone()
+ }
+
+ for k, v := range c.Caches {
+ nc.Caches[k] = v.Clone()
+ }
+
+ for k, v := range c.NegativeCacheConfigs {
+ nc.NegativeCacheConfigs[k] = v.Clone()
+ }
+
+ return nc
+}
+
+func (c *TricksterConfig) String() string {
+ cp := c.copy()
+
+ // the toml library will panic if the Handler is assigned,
+ // even though this field is annotated as skip ("-") in the prototype
+ // so we'll iterate the paths and set to nil the Handler (in our local copy only)
+ if cp.Origins != nil {
+ for _, v := range cp.Origins {
+ if v != nil {
+ for _, w := range v.Paths {
+ w.Handler = nil
+ w.KeyHasher = nil
+ }
+ }
+ // also strip out potentially sensitive headers
+ hideAuthorizationCredentials(v.HealthCheckHeaders)
+
+ if v.Paths != nil {
+ for _, p := range v.Paths {
+ hideAuthorizationCredentials(p.RequestHeaders)
+ hideAuthorizationCredentials(p.ResponseHeaders)
+ }
+ }
+ }
+ }
+
+ // strip Redis password
+ for k, v := range cp.Caches {
+ if v != nil && cp.Caches[k].Redis.Password != "" {
+ cp.Caches[k].Redis.Password = "*****"
+ }
+ }
+
+ var buf bytes.Buffer
+ e := toml.NewEncoder(&buf)
+ e.Encode(cp)
+ return buf.String()
+}
+
+var sensitiveCredentials = map[string]bool{headers.NameAuthorization: true}
+
+func hideAuthorizationCredentials(headers map[string]string) {
+ // strip Authorization Headers
+ for k := range headers {
+ if _, ok := sensitiveCredentials[k]; ok {
+ headers[k] = "*****"
+ }
+ }
+}
+
+// Clone returns an exact copy of an *OriginConfig
+func (oc *OriginConfig) Clone() *OriginConfig {
+
+ o := &OriginConfig{}
+ o.DearticulateUpstreamRanges = oc.DearticulateUpstreamRanges
+ o.BackfillTolerance = oc.BackfillTolerance
+ o.BackfillToleranceSecs = oc.BackfillToleranceSecs
+ o.CacheName = oc.CacheName
+ o.CacheKeyPrefix = oc.CacheKeyPrefix
+ o.FastForwardDisable = oc.FastForwardDisable
+ o.FastForwardTTL = oc.FastForwardTTL
+ o.FastForwardTTLSecs = oc.FastForwardTTLSecs
+ o.HealthCheckUpstreamPath = oc.HealthCheckUpstreamPath
+ o.HealthCheckVerb = oc.HealthCheckVerb
+ o.HealthCheckQuery = oc.HealthCheckQuery
+ o.Host = oc.Host
+ o.Name = oc.Name
+ o.IsDefault = oc.IsDefault
+ o.KeepAliveTimeoutSecs = oc.KeepAliveTimeoutSecs
+ o.MaxIdleConns = oc.MaxIdleConns
+ o.MaxTTLSecs = oc.MaxTTLSecs
+ o.MaxTTL = oc.MaxTTL
+ o.MaxObjectSizeBytes = oc.MaxObjectSizeBytes
+ o.MultipartRangesDisabled = oc.MultipartRangesDisabled
+ o.OriginType = oc.OriginType
+ o.OriginURL = oc.OriginURL
+ o.PathPrefix = oc.PathPrefix
+ o.RevalidationFactor = oc.RevalidationFactor
+ o.Scheme = oc.Scheme
+ o.Timeout = oc.Timeout
+ o.TimeoutSecs = oc.TimeoutSecs
+ o.TimeseriesRetention = oc.TimeseriesRetention
+ o.TimeseriesRetentionFactor = oc.TimeseriesRetentionFactor
+ o.TimeseriesEvictionMethodName = oc.TimeseriesEvictionMethodName
+ o.TimeseriesEvictionMethod = oc.TimeseriesEvictionMethod
+ o.TimeseriesTTL = oc.TimeseriesTTL
+ o.TimeseriesTTLSecs = oc.TimeseriesTTLSecs
+ o.ValueRetention = oc.ValueRetention
+
+ if oc.CompressableTypeList != nil {
+ o.CompressableTypeList = make([]string, len(oc.CompressableTypeList))
+ copy(o.CompressableTypeList, oc.CompressableTypeList)
+ }
+
+ if oc.CompressableTypes != nil {
+ o.CompressableTypes = make(map[string]bool)
+ for k := range oc.CompressableTypes {
+ o.CompressableTypes[k] = true
+ }
+ }
+
+ o.HealthCheckHeaders = make(map[string]string)
+ for k, v := range oc.HealthCheckHeaders {
+ o.HealthCheckHeaders[k] = v
+ }
+
+ o.Paths = make(map[string]*PathConfig)
+ for l, p := range oc.Paths {
+ o.Paths[l] = p.Clone()
+ }
+
+ o.NegativeCacheName = oc.NegativeCacheName
+ if oc.NegativeCache != nil {
+ m := make(map[int]time.Duration)
+ for c, t := range oc.NegativeCache {
+ m[c] = t
+ }
+ o.NegativeCache = m
+ }
+
+ if oc.TLS != nil {
+ o.TLS = oc.TLS.Clone()
+ }
+ o.RequireTLS = oc.RequireTLS
+
+ if oc.FastForwardPath != nil {
+ o.FastForwardPath = oc.FastForwardPath.Clone()
+ }
+
+ return o
+
+}
+
+// Clone returns an exact copy of a *CachingConfig
+func (cc *CachingConfig) Clone() *CachingConfig {
+
+ c := NewCacheConfig()
+ c.Name = cc.Name
+ c.CacheType = cc.CacheType
+ c.CacheTypeID = cc.CacheTypeID
+
+ c.Index.FlushInterval = cc.Index.FlushInterval
+ c.Index.FlushIntervalSecs = cc.Index.FlushIntervalSecs
+ c.Index.MaxSizeBackoffBytes = cc.Index.MaxSizeBackoffBytes
+ c.Index.MaxSizeBackoffObjects = cc.Index.MaxSizeBackoffObjects
+ c.Index.MaxSizeBytes = cc.Index.MaxSizeBytes
+ c.Index.MaxSizeObjects = cc.Index.MaxSizeObjects
+ c.Index.ReapInterval = cc.Index.ReapInterval
+ c.Index.ReapIntervalSecs = cc.Index.ReapIntervalSecs
+
+ c.Badger.Directory = cc.Badger.Directory
+ c.Badger.ValueDirectory = cc.Badger.ValueDirectory
+
+ c.Filesystem.CachePath = cc.Filesystem.CachePath
+
+ c.BBolt.Bucket = cc.BBolt.Bucket
+ c.BBolt.Filename = cc.BBolt.Filename
+
+ c.Redis.ClientType = cc.Redis.ClientType
+ c.Redis.DB = cc.Redis.DB
+ c.Redis.DialTimeoutMS = cc.Redis.DialTimeoutMS
+ c.Redis.Endpoint = cc.Redis.Endpoint
+ c.Redis.Endpoints = cc.Redis.Endpoints
+ c.Redis.IdleCheckFrequencyMS = cc.Redis.IdleCheckFrequencyMS
+ c.Redis.IdleTimeoutMS = cc.Redis.IdleTimeoutMS
+ c.Redis.MaxConnAgeMS = cc.Redis.MaxConnAgeMS
+ c.Redis.MaxRetries = cc.Redis.MaxRetries
+ c.Redis.MaxRetryBackoffMS = cc.Redis.MaxRetryBackoffMS
+ c.Redis.MinIdleConns = cc.Redis.MinIdleConns
+ c.Redis.MinRetryBackoffMS = cc.Redis.MinRetryBackoffMS
+ c.Redis.Password = cc.Redis.Password
+ c.Redis.PoolSize = cc.Redis.PoolSize
+ c.Redis.PoolTimeoutMS = cc.Redis.PoolTimeoutMS
+ c.Redis.Protocol = cc.Redis.Protocol
+ c.Redis.ReadTimeoutMS = cc.Redis.ReadTimeoutMS
+ c.Redis.SentinelMaster = cc.Redis.SentinelMaster
+ c.Redis.WriteTimeoutMS = cc.Redis.WriteTimeoutMS
+
+ return c
+
+}
diff --git a/internal/config/config_test.go b/internal/config/config_test.go
new file mode 100644
index 000000000..3e627f835
--- /dev/null
+++ b/internal/config/config_test.go
@@ -0,0 +1,64 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+func TestCopy(t *testing.T) {
+ c1 := NewConfig()
+
+ oc := c1.Origins["default"]
+ c1.NegativeCacheConfigs["default"]["404"] = 10
+
+ oc.CompressableTypeList = []string{"text/plain"}
+ oc.CompressableTypes = map[string]bool{"text/plain": true}
+ oc.NegativeCacheName = "default"
+ oc.NegativeCache = map[int]time.Duration{404: time.Duration(10) * time.Second}
+ oc.FastForwardPath = NewPathConfig()
+ oc.TLS = &TLSConfig{CertificateAuthorityPaths: []string{"foo"}}
+ oc.HealthCheckHeaders = map[string]string{headers.NameAuthorization: "Basic SomeHash"}
+
+ c2 := c1.copy()
+ if !reflect.DeepEqual(c1, c2) {
+ t.Errorf("copy mistmatch")
+ }
+}
+
+func TestString(t *testing.T) {
+ c1 := NewConfig()
+
+ c1.Origins["default"].Paths["test"] = &PathConfig{}
+
+ c1.Caches["default"].Redis.Password = "plaintext-password"
+
+ s := c1.String()
+ if !strings.Contains(s, `password = "*****"`) {
+ t.Errorf("missing password mask: %s", "*****")
+ }
+}
+
+func TestHideAuthorizationCredentials(t *testing.T) {
+ hdrs := map[string]string{headers.NameAuthorization: "Basic SomeHash"}
+ hideAuthorizationCredentials(hdrs)
+ if hdrs[headers.NameAuthorization] != "*****" {
+ t.Errorf("expected '*****' got '%s'", hdrs[headers.NameAuthorization])
+ }
+}
diff --git a/internal/config/defaults.go b/internal/config/defaults.go
new file mode 100644
index 000000000..51d34eebf
--- /dev/null
+++ b/internal/config/defaults.go
@@ -0,0 +1,81 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+const (
+ defaultLogFile = ""
+ defaultLogLevel = "INFO"
+
+ defaultProxyListenPort = 9090
+ defaultProxyListenAddress = ""
+
+ defaultMetricsListenPort = 8082
+ defaultMetricsListenAddress = ""
+
+ defaultCacheType = "memory"
+ defaultCacheTypeID = CacheTypeMemory
+
+ defaultTimeseriesTTLSecs = 21600
+ defaultFastForwardTTLSecs = 15
+ defaultMaxTTLSecs = 86400
+ defaultRevalidationFactor = 2
+
+ defaultCachePath = "/tmp/trickster"
+
+ defaultRedisClientType = "standard"
+ defaultRedisProtocol = "tcp"
+ defaultRedisEndpoint = "redis:6379"
+
+ defaultBBoltFile = "trickster.db"
+ defaultBBoltBucket = "trickster"
+
+ defaultCacheIndexReap = 3
+ defaultCacheIndexFlush = 5
+ defaultCacheMaxSizeBytes = 536870912
+ defaultMaxSizeBackoffBytes = 16777216
+ defaultMaxSizeObjects = 0
+ defaultMaxSizeBackoffObjects = 100
+ defaultMaxObjectSizeBytes = 524288
+
+ defaultOriginTRF = 1024
+ defaultOriginTEM = EvictionMethodOldest
+ defaultOriginTEMName = "oldest"
+ defaultOriginTimeoutSecs = 180
+ defaultOriginCacheName = "default"
+ defaultOriginNegativeCacheName = "default"
+ defaultBackfillToleranceSecs = 0
+ defaultKeepAliveTimeoutSecs = 300
+ defaultMaxIdleConns = 20
+
+ defaultHealthCheckPath = "-"
+ defaultHealthCheckQuery = "-"
+ defaultHealthCheckVerb = "-"
+
+ defaultConfigHandlerPath = "/trickster/config"
+ defaultPingHandlerPath = "/trickster/ping"
+)
+
+func defaultCompressableTypes() []string {
+ return []string{
+ "text/html",
+ "text/javascript",
+ "text/css",
+ "text/plain",
+ "text/xml",
+ "text/json",
+ "application/json",
+ "application/javascript",
+ "application/xml",
+ }
+}
diff --git a/internal/config/env.go b/internal/config/env.go
new file mode 100644
index 000000000..02c035730
--- /dev/null
+++ b/internal/config/env.go
@@ -0,0 +1,59 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "os"
+ "strconv"
+)
+
+const (
+ // Environment variables
+ evOriginURL = "TRK_ORIGIN_URL"
+ evOriginType = "TRK_ORIGIN_TYPE"
+ evProxyPort = "TRK_PROXY_PORT"
+ evMetricsPort = "TRK_METRICS_PORT"
+ evLogLevel = "TRK_LOG_LEVEL"
+)
+
+func (c *TricksterConfig) loadEnvVars() {
+ // Origin
+ if x := os.Getenv(evOriginURL); x != "" {
+ providedOriginURL = x
+ }
+
+ if x := os.Getenv(evOriginType); x != "" {
+ providedOriginType = x
+ }
+
+ // Proxy Port
+ if x := os.Getenv(evProxyPort); x != "" {
+ if y, err := strconv.ParseInt(x, 10, 64); err == nil {
+ c.Frontend.ListenPort = int(y)
+ }
+ }
+
+ // Metrics Port
+ if x := os.Getenv(evMetricsPort); x != "" {
+ if y, err := strconv.ParseInt(x, 10, 64); err == nil {
+ c.Metrics.ListenPort = int(y)
+ }
+ }
+
+ // LogLevel
+ if x := os.Getenv(evLogLevel); x != "" {
+ c.Logging.LogLevel = x
+ }
+
+}
diff --git a/internal/config/env_test.go b/internal/config/env_test.go
new file mode 100644
index 000000000..9e06a6a06
--- /dev/null
+++ b/internal/config/env_test.go
@@ -0,0 +1,71 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestLoadEnvVars(t *testing.T) {
+
+ os.Setenv(evOriginURL, "http://1.1.1.1:9090/some/path")
+ os.Setenv(evOriginType, "testing")
+ os.Setenv(evProxyPort, "4001")
+ os.Setenv(evMetricsPort, "4002")
+ os.Setenv(evLogLevel, "info")
+
+ a := []string{}
+ err := Load("trickster-test", "0", a)
+ if err != nil {
+ t.Error(err)
+ }
+
+ d := Origins["default"]
+ if d.OriginType != "testing" {
+ t.Errorf("expected %s got %s", "testing", d.OriginType)
+ }
+
+ if Frontend.ListenPort != 4001 {
+ t.Errorf("expected %d got %d", 4001, Frontend.ListenPort)
+ }
+
+ if Metrics.ListenPort != 4002 {
+ t.Errorf("expected %d got %d", 4002, Metrics.ListenPort)
+ }
+
+ if d.Scheme != "http" {
+ t.Errorf("expected %s got %s", "http", d.Scheme)
+ }
+
+ if d.Host != "1.1.1.1:9090" {
+ t.Errorf("expected %s got %s", "1.1.1.1:9090", d.Host)
+ }
+
+ if d.PathPrefix != "/some/path" {
+ t.Errorf("expected %s got %s", "/some/path", d.PathPrefix)
+ }
+
+ if strings.ToUpper(Logging.LogLevel) != "INFO" {
+ t.Errorf("expected %s got %s", "INFO", Logging.LogLevel)
+ }
+
+ os.Unsetenv(evOriginURL)
+ os.Unsetenv(evOriginType)
+ os.Unsetenv(evProxyPort)
+ os.Unsetenv(evMetricsPort)
+ os.Unsetenv(evLogLevel)
+
+}
diff --git a/internal/config/flags.go b/internal/config/flags.go
new file mode 100644
index 000000000..8b44a2c6f
--- /dev/null
+++ b/internal/config/flags.go
@@ -0,0 +1,90 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "flag"
+)
+
+const (
+ // Command-line flags
+ cfConfig = "config"
+ cfVersion = "version"
+ cfLogLevel = "log-level"
+ cfInstanceID = "instance-id"
+ cfOrigin = "origin-url"
+ cfOriginType = "origin-type"
+ cfProxyPort = "proxy-port"
+ cfMetricsPort = "metrics-port"
+
+ // DefaultConfigPath defines the default location of the Trickster config file
+ DefaultConfigPath = "/etc/trickster/trickster.conf"
+)
+
+// TricksterFlags holds the values for whitelisted flags
+type TricksterFlags struct {
+ PrintVersion bool
+ ConfigPath string
+ customPath bool
+ Origin string
+ OriginType string
+ ProxyListenPort int
+ MetricsListenPort int
+ LogLevel string
+ InstanceID int
+}
+
+// loadFlags loads configuration from command line flags.
+func (c *TricksterConfig) parseFlags(applicationName string, arguments []string) {
+
+ Flags = TricksterFlags{}
+
+ f := flag.NewFlagSet(applicationName, flag.ExitOnError)
+ f.BoolVar(&Flags.PrintVersion, cfVersion, false, "Prints trickster version")
+ f.StringVar(&Flags.ConfigPath, cfConfig, "", "Path to Trickster Config File")
+ f.StringVar(&Flags.LogLevel, cfLogLevel, "", "Level of Logging to use (debug, info, warn, error)")
+ f.IntVar(&Flags.InstanceID, cfInstanceID, 0, "Instance ID is for running multiple Trickster processes from the same config while logging to their own files.")
+ f.StringVar(&Flags.Origin, cfOrigin, "", "URL to the Origin. Enter it like you would in grafana, e.g., http://prometheus:9090")
+ f.StringVar(&Flags.OriginType, cfOriginType, "", "Type of origin (prometheus, influxdb)")
+ f.IntVar(&Flags.ProxyListenPort, cfProxyPort, 0, "Port that the primary Proxy server will listen on.")
+ f.IntVar(&Flags.MetricsListenPort, cfMetricsPort, 0, "Port that the /metrics endpoint will listen on.")
+ f.Parse(arguments)
+
+ if Flags.ConfigPath != "" {
+ Flags.customPath = true
+ } else {
+ Flags.ConfigPath = DefaultConfigPath
+ }
+}
+
+func (c *TricksterConfig) loadFlags() {
+ if len(Flags.Origin) > 0 {
+ providedOriginURL = Flags.Origin
+ }
+ if len(Flags.OriginType) > 0 {
+ providedOriginType = Flags.OriginType
+ }
+ if Flags.ProxyListenPort > 0 {
+ c.Frontend.ListenPort = Flags.ProxyListenPort
+ }
+ if Flags.MetricsListenPort > 0 {
+ c.Metrics.ListenPort = Flags.MetricsListenPort
+ }
+ if Flags.LogLevel != "" {
+ c.Logging.LogLevel = Flags.LogLevel
+ }
+ if Flags.InstanceID > 0 {
+ c.Main.InstanceID = Flags.InstanceID
+ }
+}
diff --git a/flags_test.go b/internal/config/flags_test.go
similarity index 65%
rename from flags_test.go
rename to internal/config/flags_test.go
index 6e4a0de16..87b617e8b 100644
--- a/flags_test.go
+++ b/internal/config/flags_test.go
@@ -11,45 +11,43 @@
* limitations under the License.
*/
-package main
+package config
import (
"testing"
)
func TestLoadFlags(t *testing.T) {
- c := Config{}
+ c := NewConfig()
a := []string{
- "-origin",
+ "-origin-url",
"http://prometheus.example.com:9090",
"-proxy-port",
"9091",
"-metrics-port",
"9092",
+ "-origin-type",
+ "prometheus",
+ "-log-level",
+ "info",
+ "-instance-id",
+ "1",
}
// it should read command line flags
- loadFlags(&c, a)
+ c.parseFlags("trickster-test", a)
+ c.loadFlags()
- if c.DefaultOriginURL != a[1] {
- t.Errorf("wanted \"%s\". got \"%s\".", a[1], c.DefaultOriginURL)
+ if providedOriginURL != a[1] {
+ t.Errorf("wanted \"%s\". got \"%s\".", a[1], providedOriginURL)
}
- if c.ProxyServer.ListenPort != 9091 {
- t.Errorf("wanted \"%d\". got \"%d\".", 9091, c.ProxyServer.ListenPort)
+ if providedOriginType != a[7] {
+ t.Errorf("wanted \"%s\". got \"%s\".", a[1], providedOriginType)
+ }
+ if c.Frontend.ListenPort != 9091 {
+ t.Errorf("wanted \"%d\". got \"%d\".", 9091, c.Frontend.ListenPort)
}
if c.Metrics.ListenPort != 9092 {
t.Errorf("wanted \"%d\". got \"%d\".", 9092, c.Metrics.ListenPort)
}
}
-
-func TestLoadConfiguration(t *testing.T) {
- c := Config{}
- a := []string{}
-
- // it should not error if config path is not set
- err := loadConfiguration(&c, a)
-
- if err != nil {
- t.Error(err)
- }
-}
diff --git a/internal/config/loader.go b/internal/config/loader.go
new file mode 100644
index 000000000..9e77a0429
--- /dev/null
+++ b/internal/config/loader.go
@@ -0,0 +1,171 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Load returns the Application Configuration, starting with a default config,
+// then overriding with any provided config file, then env vars, and finally flags
+func Load(applicationName string, applicationVersion string, arguments []string) error {
+
+ providedOriginURL = ""
+ providedOriginType = ""
+
+ LoaderWarnings = make([]string, 0)
+
+ c := NewConfig()
+ c.parseFlags(applicationName, arguments) // Parse here to get config file path and version flags
+ if Flags.PrintVersion {
+ return nil
+ }
+ if err := c.loadFile(); err != nil && Flags.customPath {
+ // a user-provided path couldn't be loaded. return the error for the application to handle
+ return err
+ }
+
+ c.loadEnvVars()
+ c.loadFlags() // load parsed flags to override file and envs
+
+ // set the default origin url from the flags
+ if d, ok := c.Origins["default"]; ok {
+ if providedOriginURL != "" {
+ url, err := url.Parse(providedOriginURL)
+ if err != nil {
+ return err
+ }
+ if providedOriginType != "" {
+ d.OriginType = providedOriginType
+ }
+ d.OriginURL = providedOriginURL
+ d.Scheme = url.Scheme
+ d.Host = url.Host
+ d.PathPrefix = url.Path
+ }
+ // If the user has configured their own origins, and one of them is not "default"
+ // then Trickster will not use the auto-created default origin
+ if d.OriginURL == "" {
+ delete(c.Origins, "default")
+ }
+
+ if providedOriginType != "" {
+ d.OriginType = providedOriginType
+ }
+ }
+
+ if len(c.Origins) == 0 {
+ return fmt.Errorf("no valid origins configured%s", "")
+ }
+
+ Config = c
+ Main = c.Main
+ Origins = c.Origins
+ Caches = c.Caches
+ Frontend = c.Frontend
+ Logging = c.Logging
+ Metrics = c.Metrics
+ NegativeCacheConfigs = c.NegativeCacheConfigs
+
+ for k, n := range NegativeCacheConfigs {
+ for c := range n {
+ ci, err := strconv.Atoi(c)
+ if err != nil {
+ return fmt.Errorf(`invalid negative cache config in %s: %s is not a valid status code`, k, c)
+ }
+ if ci < 400 || ci >= 600 {
+ return fmt.Errorf(`invalid negative cache config in %s: %s is not a valid status code`, k, c)
+ }
+ }
+ }
+
+ for k, o := range c.Origins {
+
+ if o.OriginURL == "" {
+ return fmt.Errorf(`missing origin-url for origin "%s"`, k)
+ }
+
+ url, err := url.Parse(o.OriginURL)
+ if err != nil {
+ return err
+ }
+
+ if o.OriginType == "" {
+ return fmt.Errorf(`missing origin-type for origin "%s"`, k)
+ }
+
+ if strings.HasSuffix(url.Path, "/") {
+ url.Path = url.Path[0 : len(url.Path)-1]
+ }
+
+ o.Name = k
+ o.Scheme = url.Scheme
+ o.Host = url.Host
+ o.PathPrefix = url.Path
+ o.Timeout = time.Duration(o.TimeoutSecs) * time.Second
+ o.BackfillTolerance = time.Duration(o.BackfillToleranceSecs) * time.Second
+ o.TimeseriesRetention = time.Duration(o.TimeseriesRetentionFactor)
+ o.TimeseriesTTL = time.Duration(o.TimeseriesTTLSecs) * time.Second
+ o.FastForwardTTL = time.Duration(o.FastForwardTTLSecs) * time.Second
+ o.MaxTTL = time.Duration(o.MaxTTLSecs) * time.Second
+
+ if o.CompressableTypeList != nil {
+ o.CompressableTypes = make(map[string]bool)
+ for _, v := range o.CompressableTypeList {
+ o.CompressableTypes[v] = true
+ }
+ }
+
+ if o.CacheKeyPrefix == "" {
+ o.CacheKeyPrefix = o.Host
+ }
+
+ nc, ok := NegativeCacheConfigs[o.NegativeCacheName]
+ if !ok {
+ return fmt.Errorf(`invalid negative cache name: %s`, o.NegativeCacheName)
+ }
+
+ nc2 := map[int]time.Duration{}
+ for c, s := range nc {
+ ci, _ := strconv.Atoi(c)
+ nc2[ci] = time.Duration(s) * time.Second
+ }
+ o.NegativeCache = nc2
+
+ // enforce MaxTTL
+ if o.TimeseriesTTLSecs > o.MaxTTLSecs {
+ o.TimeseriesTTLSecs = o.MaxTTLSecs
+ o.TimeseriesTTL = o.MaxTTL
+ }
+
+ // unlikely but why not spend a few nanoseconds to check it at startup
+ if o.FastForwardTTLSecs > o.MaxTTLSecs {
+ o.FastForwardTTLSecs = o.MaxTTLSecs
+ o.FastForwardTTL = o.MaxTTL
+ }
+
+ Origins[k] = o
+ }
+
+ for _, c := range Caches {
+ c.Index.FlushInterval = time.Duration(c.Index.FlushIntervalSecs) * time.Second
+ c.Index.ReapInterval = time.Duration(c.Index.ReapIntervalSecs) * time.Second
+ }
+
+ return nil
+}
diff --git a/internal/config/loader_test.go b/internal/config/loader_test.go
new file mode 100644
index 000000000..8be0fdfb2
--- /dev/null
+++ b/internal/config/loader_test.go
@@ -0,0 +1,668 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+ "time"
+)
+
+func TestLoadConfiguration(t *testing.T) {
+ a := []string{"-origin-type", "testing", "-origin-url", "http://prometheus:9090/test/path"}
+ // it should not error if config path is not set
+ err := Load("trickster-test", "0", a)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if Origins["default"].TimeseriesRetention != 1024 {
+ t.Errorf("expected 1024, got %d", Origins["default"].TimeseriesRetention)
+ }
+
+ if Origins["default"].FastForwardTTL != time.Duration(15)*time.Second {
+ t.Errorf("expected 15, got %s", Origins["default"].FastForwardTTL)
+ }
+
+ if Caches["default"].Index.ReapInterval != time.Duration(3)*time.Second {
+ t.Errorf("expected 3, got %s", Caches["default"].Index.ReapInterval)
+ }
+
+}
+
+func TestLoadConfigurationFileFailures(t *testing.T) {
+
+ tests := []struct {
+ filename string
+ expected string
+ }{
+ { // Case 0
+ "../../testdata/test.missing-origin-url.conf",
+ `missing origin-url for origin "2"`,
+ },
+ { // Case 1
+ "../../testdata/test.bad_origin_url.conf",
+ fmt.Sprintf(`parse %s: first path segment in URL cannot contain colon`, "sasdf_asd[as;://asdf923_-=a*"),
+ },
+ { // Case 2
+ "../../testdata/test.missing_origin_type.conf",
+ `missing origin-type for origin "test"`,
+ },
+ { // Case 3
+ "../../testdata/test.bad-cache-name.conf",
+ `invalid cache name [test_fail] provided in origin config [test]`,
+ },
+ { // Case 4
+ "../../testdata/test.invalid-negative-cache-1.conf",
+ `invalid negative cache config in default: a is not a valid status code`,
+ },
+ { // Case 5
+ "../../testdata/test.invalid-negative-cache-2.conf",
+ `invalid negative cache config in default: 1212 is not a valid status code`,
+ },
+ { // Case 6
+ "../../testdata/test.invalid-negative-cache-3.conf",
+ `invalid negative cache name: foo`,
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ err := Load("trickster-test", "0", []string{"-config", test.filename})
+ if err == nil {
+ t.Errorf("expected error `%s` got nothing", test.expected)
+ } else if err.Error() != test.expected {
+ t.Errorf("expected error `%s` got `%s`", test.expected, err.Error())
+ }
+
+ })
+ }
+
+}
+
+func TestLoadConfigurationMissingOriginURL(t *testing.T) {
+ expected := `no valid origins configured`
+ a := []string{"-origin-type", "testing"}
+ err := Load("trickster-test", "0", a)
+ if err == nil {
+ t.Errorf("expected error `%s` got nothing", expected)
+ } else if err.Error() != expected {
+ t.Errorf("expected error `%s` got `%s`", expected, err.Error())
+ }
+}
+
+func TestFullLoadConfiguration(t *testing.T) {
+ a := []string{"-config", "../../testdata/test.full.conf"}
+ // it should not error if config path is not set
+ err := Load("trickster-test", "0", a)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // Test Proxy Server
+ if Frontend.ListenPort != 57821 {
+ t.Errorf("expected 57821, got %d", Frontend.ListenPort)
+ }
+
+ if Frontend.ListenAddress != "test" {
+ t.Errorf("expected test, got %s", Frontend.ListenAddress)
+ }
+
+ if Frontend.TLSListenAddress != "test-tls" {
+ t.Errorf("expected test-tls, got %s", Frontend.TLSListenAddress)
+ }
+
+ if Frontend.TLSListenPort != 38821 {
+ t.Errorf("expected 38821, got %d", Frontend.TLSListenPort)
+ }
+
+ // Test Metrics Server
+ if Metrics.ListenPort != 57822 {
+ t.Errorf("expected 57821, got %d", Metrics.ListenPort)
+ }
+
+ if Metrics.ListenAddress != "metrics_test" {
+ t.Errorf("expected test, got %s", Metrics.ListenAddress)
+ }
+
+ // Test Logging
+ if Logging.LogLevel != "test_log_level" {
+ t.Errorf("expected test_log_level, got %s", Logging.LogLevel)
+ }
+
+ if Logging.LogFile != "test_file" {
+ t.Errorf("expected test_file, got %s", Logging.LogFile)
+ }
+
+ // Test Origins
+
+ o, ok := Origins["test"]
+ if !ok {
+ t.Errorf("unable to find origin config: %s", "test")
+ return
+ }
+
+ if o.OriginType != "test_type" {
+ t.Errorf("expected test_type, got %s", o.OriginType)
+ }
+
+ if o.CacheName != "test" {
+ t.Errorf("expected test, got %s", o.CacheName)
+ }
+
+ if o.Scheme != "scheme" {
+ t.Errorf("expected scheme, got %s", o.Scheme)
+ }
+
+ if o.Host != "test_host" {
+ t.Errorf("expected test_host, got %s", o.Host)
+ }
+
+ if o.PathPrefix != "/test_path_prefix" {
+ t.Errorf("expected test_path_prefix, got %s", o.PathPrefix)
+ }
+
+ if o.TimeseriesRetentionFactor != 666 {
+ t.Errorf("expected 666, got %d", o.TimeseriesRetentionFactor)
+ }
+
+ if o.TimeseriesEvictionMethod != EvictionMethodLRU {
+ t.Errorf("expected %s, got %s", EvictionMethodLRU, o.TimeseriesEvictionMethod)
+ }
+
+ if !o.FastForwardDisable {
+ t.Errorf("expected fast_forward_disable true, got %t", o.FastForwardDisable)
+ }
+
+ if o.BackfillToleranceSecs != 301 {
+ t.Errorf("expected 301, got %d", o.BackfillToleranceSecs)
+ }
+
+ if o.TimeoutSecs != 37 {
+ t.Errorf("expected 37, got %d", o.TimeoutSecs)
+ }
+
+ if o.IsDefault != true {
+ t.Errorf("expected true got %t", o.IsDefault)
+ }
+
+ if o.MaxIdleConns != 23 {
+ t.Errorf("expected %d got %d", 23, o.MaxIdleConns)
+ }
+
+ if o.KeepAliveTimeoutSecs != 7 {
+ t.Errorf("expected %d got %d", 7, o.KeepAliveTimeoutSecs)
+ }
+
+ // MaxTTLSecs is 300, thus should override TimeseriesTTLSecs = 8666
+ if o.TimeseriesTTLSecs != 300 {
+ t.Errorf("expected 300, got %d", o.TimeseriesTTLSecs)
+ }
+
+ // MaxTTLSecs is 300, thus should override FastForwardTTLSecs = 382
+ if o.FastForwardTTLSecs != 300 {
+ t.Errorf("expected 300, got %d", o.FastForwardTTLSecs)
+ }
+
+ if o.TLS == nil {
+ t.Errorf("expected tls config for origin %s, got nil", "test")
+ }
+
+ if !o.TLS.InsecureSkipVerify {
+ t.Errorf("expected true got %t", o.TLS.InsecureSkipVerify)
+ }
+
+ if o.TLS.FullChainCertPath != "../../testdata/test.01.cert.pem" {
+ t.Errorf("expected ../../testdata/test.01.cert.pem got %s", o.TLS.FullChainCertPath)
+ }
+
+ if o.TLS.PrivateKeyPath != "../../testdata/test.01.key.pem" {
+ t.Errorf("expected ../../testdata/test.01.key.pem got %s", o.TLS.PrivateKeyPath)
+ }
+
+ if o.TLS.ClientCertPath != "test_client_cert" {
+ t.Errorf("expected test_client_cert got %s", o.TLS.ClientCertPath)
+ }
+
+ if o.TLS.ClientKeyPath != "test_client_key" {
+ t.Errorf("expected test_client_key got %s", o.TLS.ClientKeyPath)
+ }
+
+ // Test Caches
+
+ c, ok := Caches["test"]
+ if !ok {
+ t.Errorf("unable to find cache config: %s", "test")
+ return
+ }
+
+ if c.CacheType != "redis" {
+ t.Errorf("expected redis, got %s", c.CacheType)
+ }
+
+ if c.Index.ReapIntervalSecs != 4 {
+ t.Errorf("expected 4, got %d", c.Index.ReapIntervalSecs)
+ }
+
+ if c.Index.FlushIntervalSecs != 6 {
+ t.Errorf("expected 6, got %d", c.Index.FlushIntervalSecs)
+ }
+
+ if c.Index.MaxSizeBytes != 536870913 {
+ t.Errorf("expected 536870913, got %d", c.Index.MaxSizeBytes)
+ }
+
+ if c.Index.MaxSizeBackoffBytes != 16777217 {
+ t.Errorf("expected 16777217, got %d", c.Index.MaxSizeBackoffBytes)
+ }
+
+ if c.Index.MaxSizeObjects != 80 {
+ t.Errorf("expected 80, got %d", c.Index.MaxSizeObjects)
+ }
+
+ if c.Index.MaxSizeBackoffObjects != 20 {
+ t.Errorf("expected 20, got %d", c.Index.MaxSizeBackoffObjects)
+ }
+
+ if c.Index.ReapIntervalSecs != 4 {
+ t.Errorf("expected 4, got %d", c.Index.ReapIntervalSecs)
+ }
+
+ if c.Redis.ClientType != "test_redis_type" {
+ t.Errorf("expected test_redis_type, got %s", c.Redis.ClientType)
+ }
+
+ if c.Redis.Protocol != "test_protocol" {
+ t.Errorf("expected test_protocol, got %s", c.Redis.Protocol)
+ }
+
+ if c.Redis.Endpoint != "test_endpoint" {
+ t.Errorf("expected test_endpoint, got %s", c.Redis.Endpoint)
+ }
+
+ if c.Redis.SentinelMaster != "test_master" {
+ t.Errorf("expected test_master, got %s", c.Redis.SentinelMaster)
+ }
+
+ if c.Redis.Password != "test_password" {
+ t.Errorf("expected test_password, got %s", c.Redis.Password)
+ }
+
+ if c.Redis.DB != 42 {
+ t.Errorf("expected 42, got %d", c.Redis.DB)
+ }
+
+ if c.Redis.MaxRetries != 6 {
+ t.Errorf("expected 6, got %d", c.Redis.MaxRetries)
+ }
+
+ if c.Redis.MinRetryBackoffMS != 9 {
+ t.Errorf("expected 9, got %d", c.Redis.MinRetryBackoffMS)
+ }
+
+ if c.Redis.MaxRetryBackoffMS != 513 {
+ t.Errorf("expected 513, got %d", c.Redis.MaxRetryBackoffMS)
+ }
+
+ if c.Redis.DialTimeoutMS != 5001 {
+ t.Errorf("expected 5001, got %d", c.Redis.DialTimeoutMS)
+ }
+
+ if c.Redis.ReadTimeoutMS != 3001 {
+ t.Errorf("expected 3001, got %d", c.Redis.ReadTimeoutMS)
+ }
+
+ if c.Redis.WriteTimeoutMS != 3002 {
+ t.Errorf("expected 3002, got %d", c.Redis.WriteTimeoutMS)
+ }
+
+ if c.Redis.PoolSize != 21 {
+ t.Errorf("expected 21, got %d", c.Redis.PoolSize)
+ }
+
+ if c.Redis.MinIdleConns != 5 {
+ t.Errorf("expected 5, got %d", c.Redis.PoolSize)
+ }
+
+ if c.Redis.MaxConnAgeMS != 2000 {
+ t.Errorf("expected 2000, got %d", c.Redis.MaxConnAgeMS)
+ }
+
+ if c.Redis.PoolTimeoutMS != 4001 {
+ t.Errorf("expected 4001, got %d", c.Redis.PoolTimeoutMS)
+ }
+
+ if c.Redis.IdleTimeoutMS != 300001 {
+ t.Errorf("expected 300001, got %d", c.Redis.IdleTimeoutMS)
+ }
+
+ if c.Redis.IdleCheckFrequencyMS != 60001 {
+ t.Errorf("expected 60001, got %d", c.Redis.IdleCheckFrequencyMS)
+ }
+
+ if c.Filesystem.CachePath != "test_cache_path" {
+ t.Errorf("expected test_cache_path, got %s", c.Filesystem.CachePath)
+ }
+
+ if c.BBolt.Filename != "test_filename" {
+ t.Errorf("expected test_filename, got %s", c.BBolt.Filename)
+ }
+
+ if c.BBolt.Bucket != "test_bucket" {
+ t.Errorf("expected test_bucket, got %s", c.BBolt.Bucket)
+ }
+
+ if c.Badger.Directory != "test_directory" {
+ t.Errorf("expected test_directory, got %s", c.Badger.Directory)
+ }
+
+ if c.Badger.ValueDirectory != "test_value_directory" {
+ t.Errorf("expected test_value_directory, got %s", c.Badger.ValueDirectory)
+ }
+}
+
+func TestEmptyLoadConfiguration(t *testing.T) {
+ a := []string{"-config", "../../testdata/test.empty.conf"}
+ // it should not error if config path is not set
+ err := Load("trickster-test", "0", a)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(Origins) != 1 {
+ // we define a "test" cache, but never reference it by an origin,
+ // so it should not make it into the running config
+ t.Errorf("expected %d, got %d", 1, len(Origins))
+ }
+
+ // Test Proxy Server
+ if Frontend.ListenPort != defaultProxyListenPort {
+ t.Errorf("expected %d, got %d", defaultProxyListenPort, Frontend.ListenPort)
+ }
+
+ if Frontend.ListenAddress != defaultProxyListenAddress {
+ t.Errorf("expected '%s', got '%s'", defaultProxyListenAddress, Frontend.ListenAddress)
+ }
+
+ // Test Metrics Server
+ if Metrics.ListenPort != defaultMetricsListenPort {
+ t.Errorf("expected %d, got %d", defaultMetricsListenPort, Metrics.ListenPort)
+ }
+
+ if Metrics.ListenAddress != defaultMetricsListenAddress {
+ t.Errorf("expected '%s', got '%s'", defaultMetricsListenAddress, Metrics.ListenAddress)
+ }
+
+ // Test Logging
+ if Logging.LogLevel != defaultLogLevel {
+ t.Errorf("expected %s, got %s", defaultLogLevel, Logging.LogLevel)
+ }
+
+ if Logging.LogFile != defaultLogFile {
+ t.Errorf("expected '%s', got '%s'", defaultLogFile, Logging.LogFile)
+ }
+
+ // Test Origins
+
+ o, ok := Origins["test"]
+ if !ok {
+ t.Errorf("unable to find origin config: %s", "test")
+ return
+ }
+
+ if o.OriginType != "test" {
+ t.Errorf("expected %s origin type, got %s", "test", o.OriginType)
+ }
+
+ if o.CacheName != defaultOriginCacheName {
+ t.Errorf("expected %s, got %s", defaultOriginCacheName, o.CacheName)
+ }
+
+ if o.Scheme != "http" {
+ t.Errorf("expected %s, got %s", "http", o.Scheme)
+ }
+
+ if o.Host != "1" {
+ t.Errorf("expected %s, got %s", "1", o.Host)
+ }
+
+ if o.PathPrefix != "" {
+ t.Errorf("expected '%s', got '%s'", "", o.PathPrefix)
+ }
+
+ if o.TimeseriesRetentionFactor != defaultOriginTRF {
+ t.Errorf("expected %d, got %d", defaultOriginTRF, o.TimeseriesRetentionFactor)
+ }
+
+ if o.FastForwardDisable {
+ t.Errorf("expected fast_forward_disable false, got %t", o.FastForwardDisable)
+ }
+
+ if o.BackfillToleranceSecs != defaultBackfillToleranceSecs {
+ t.Errorf("expected %d, got %d", defaultBackfillToleranceSecs, o.BackfillToleranceSecs)
+ }
+
+ if o.TimeoutSecs != defaultOriginTimeoutSecs {
+ t.Errorf("expected %d, got %d", defaultOriginTimeoutSecs, o.TimeoutSecs)
+ }
+
+ if o.TimeseriesTTLSecs != defaultTimeseriesTTLSecs {
+ t.Errorf("expected %d, got %d", defaultTimeseriesTTLSecs, o.TimeseriesTTLSecs)
+ }
+
+ if o.FastForwardTTLSecs != defaultFastForwardTTLSecs {
+ t.Errorf("expected %d, got %d", defaultFastForwardTTLSecs, o.FastForwardTTLSecs)
+ }
+
+ c, ok := Caches["default"]
+ if !ok {
+ t.Errorf("unable to find cache config: %s", "default")
+ return
+ }
+
+ if c.CacheType != defaultCacheType {
+ t.Errorf("expected %s, got %s", defaultCacheType, c.CacheType)
+ }
+
+ if c.Index.ReapIntervalSecs != defaultCacheIndexReap {
+ t.Errorf("expected %d, got %d", defaultCacheIndexReap, c.Index.ReapIntervalSecs)
+ }
+
+ if c.Index.FlushIntervalSecs != defaultCacheIndexFlush {
+ t.Errorf("expected %d, got %d", defaultCacheIndexFlush, c.Index.FlushIntervalSecs)
+ }
+
+ if c.Index.MaxSizeBytes != defaultCacheMaxSizeBytes {
+ t.Errorf("expected %d, got %d", defaultCacheMaxSizeBytes, c.Index.MaxSizeBytes)
+ }
+
+ if c.Index.MaxSizeBackoffBytes != defaultMaxSizeBackoffBytes {
+ t.Errorf("expected %d, got %d", defaultMaxSizeBackoffBytes, c.Index.MaxSizeBackoffBytes)
+ }
+
+ if c.Index.MaxSizeObjects != defaultMaxSizeObjects {
+ t.Errorf("expected %d, got %d", defaultMaxSizeObjects, c.Index.MaxSizeObjects)
+ }
+
+ if c.Index.MaxSizeBackoffObjects != defaultMaxSizeBackoffObjects {
+ t.Errorf("expected %d, got %d", defaultMaxSizeBackoffObjects, c.Index.MaxSizeBackoffObjects)
+ }
+
+ if c.Index.ReapIntervalSecs != 3 {
+ t.Errorf("expected 3, got %d", c.Index.ReapIntervalSecs)
+ }
+
+ if c.Redis.ClientType != defaultRedisClientType {
+ t.Errorf("expected %s, got %s", defaultRedisClientType, c.Redis.ClientType)
+ }
+
+ if c.Redis.Protocol != defaultRedisProtocol {
+ t.Errorf("expected %s, got %s", defaultRedisProtocol, c.Redis.Protocol)
+ }
+
+ if c.Redis.Endpoint != "redis:6379" {
+ t.Errorf("expected redis:6379, got %s", c.Redis.Endpoint)
+ }
+
+ if c.Redis.SentinelMaster != "" {
+ t.Errorf("expected '', got %s", c.Redis.SentinelMaster)
+ }
+
+ if c.Redis.Password != "" {
+ t.Errorf("expected '', got %s", c.Redis.Password)
+ }
+
+ if c.Redis.DB != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.DB)
+ }
+
+ if c.Redis.MaxRetries != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.MaxRetries)
+ }
+
+ if c.Redis.MinRetryBackoffMS != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.MinRetryBackoffMS)
+ }
+
+ if c.Redis.MaxRetryBackoffMS != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.MaxRetryBackoffMS)
+ }
+
+ if c.Redis.DialTimeoutMS != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.DialTimeoutMS)
+ }
+
+ if c.Redis.ReadTimeoutMS != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.ReadTimeoutMS)
+ }
+
+ if c.Redis.WriteTimeoutMS != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.WriteTimeoutMS)
+ }
+
+ if c.Redis.PoolSize != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.PoolSize)
+ }
+
+ if c.Redis.MinIdleConns != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.PoolSize)
+ }
+
+ if c.Redis.MaxConnAgeMS != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.MaxConnAgeMS)
+ }
+
+ if c.Redis.PoolTimeoutMS != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.PoolTimeoutMS)
+ }
+
+ if c.Redis.IdleTimeoutMS != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.IdleTimeoutMS)
+ }
+
+ if c.Redis.IdleCheckFrequencyMS != 0 {
+ t.Errorf("expected 0, got %d", c.Redis.IdleCheckFrequencyMS)
+ }
+
+ if c.Filesystem.CachePath != "/tmp/trickster" {
+ t.Errorf("expected /tmp/trickster, got %s", c.Filesystem.CachePath)
+ }
+
+ if c.BBolt.Filename != "trickster.db" {
+ t.Errorf("expected trickster.db, got %s", c.BBolt.Filename)
+ }
+
+ if c.BBolt.Bucket != "trickster" {
+ t.Errorf("expected trickster, got %s", c.BBolt.Bucket)
+ }
+
+ if c.Badger.Directory != "/tmp/trickster" {
+ t.Errorf("expected /tmp/trickster, got %s", c.Badger.Directory)
+ }
+
+ if c.Badger.ValueDirectory != "/tmp/trickster" {
+ t.Errorf("expected /tmp/trickster, got %s", c.Badger.ValueDirectory)
+ }
+}
+
+func TestLoadConfigurationVersion(t *testing.T) {
+ a := []string{"-version"}
+ // it should not error if config path is not set
+ err := Load("trickster-test", "0", a)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !Flags.PrintVersion {
+ t.Errorf("expected true got false")
+ }
+}
+
+func TestLoadConfigurationBadPath(t *testing.T) {
+
+ const badPath = "/afeas/aasdvasvasdf48/ag4a4gas"
+
+ a := []string{"-config", badPath}
+ // it should not error if config path is not set
+ err := Load("trickster-test", "0", a)
+ if err == nil {
+ t.Errorf("expected error: open %s: no such file or directory", badPath)
+ }
+}
+
+func TestLoadConfigurationBadUrl(t *testing.T) {
+ const badURL = ":httap:]/]/example.com9091"
+ a := []string{"-origin-url", badURL}
+ err := Load("trickster-test", "0", a)
+ if err == nil {
+ t.Errorf("expected error: parse %s: missing protocol scheme", badURL)
+ }
+}
+
+func TestLoadConfigurationWarning1(t *testing.T) {
+
+ a := []string{"-config", "../../testdata/test.warning1.conf"}
+ // it should not error if config path is not set
+ err := Load("trickster-test", "0", a)
+ if err != nil {
+ t.Error(err)
+ }
+
+ expected := 1
+ l := len(LoaderWarnings)
+
+ if l != expected {
+ t.Errorf("exepcted %d got %d", expected, l)
+ }
+
+}
+
+func TestLoadConfigurationWarning2(t *testing.T) {
+
+ a := []string{"-config", "../../testdata/test.warning2.conf"}
+ // it should not error if config path is not set
+ err := Load("trickster-test", "0", a)
+ if err != nil {
+ t.Error(err)
+ }
+
+ expected := 1
+ l := len(LoaderWarnings)
+
+ if l != expected {
+ t.Errorf("exepcted %d got %d", expected, l)
+ }
+
+}
diff --git a/internal/config/origin_types.go b/internal/config/origin_types.go
new file mode 100644
index 000000000..3cfdeda08
--- /dev/null
+++ b/internal/config/origin_types.go
@@ -0,0 +1,62 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import "strconv"
+
+// OriginType enumerates the supported origin types
+type OriginType int
+
+const (
+ // OriginTypeRPC represents thee Reverse Proxy Cache origin type
+ OriginTypeRPC = OriginType(iota)
+ // OriginTypePrometheus represents the Prometheus origin type
+ OriginTypePrometheus
+ // OriginTypeInfluxDB represents the InfluxDB origin type
+ OriginTypeInfluxDB
+ // OriginTypeIronDB represents the IRONdb origin type
+ OriginTypeIronDB
+ // OriginTypeClickHouse represents the ClickHouse origin type
+ OriginTypeClickHouse
+)
+
+var originTypeNames = map[string]OriginType{
+ "rpc": OriginTypeRPC,
+ "reverseproxycache": OriginTypeRPC,
+ "prometheus": OriginTypePrometheus,
+ "influxdb": OriginTypeInfluxDB,
+ "irondb": OriginTypeIronDB,
+ "clickhouse": OriginTypeClickHouse,
+}
+
+var originTypeValues = map[OriginType]string{
+ OriginTypeRPC: "rpc",
+ OriginTypePrometheus: "prometheus",
+ OriginTypeInfluxDB: "influxdb",
+ OriginTypeIronDB: "irondb",
+ OriginTypeClickHouse: "clickhouse",
+}
+
+func (t OriginType) String() string {
+ if v, ok := originTypeValues[t]; ok {
+ return v
+ }
+ return strconv.Itoa(int(t))
+}
+
+// IsValidOriginType returns true if the provided OriginType is valid for use with Trickster
+func IsValidOriginType(t string) bool {
+ _, ok := originTypeNames[t]
+ return ok
+}
diff --git a/internal/config/origin_types_test.go b/internal/config/origin_types_test.go
new file mode 100644
index 000000000..028aac7f6
--- /dev/null
+++ b/internal/config/origin_types_test.go
@@ -0,0 +1,64 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "strconv"
+ "testing"
+)
+
+func TestOriginTypeString(t *testing.T) {
+
+ t1 := OriginTypeRPC
+ t2 := OriginTypePrometheus
+ var t3 OriginType = 13
+
+ if t1.String() != "rpc" {
+ t.Errorf("expected %s got %s", "rpc", t1.String())
+ }
+
+ if t2.String() != "prometheus" {
+ t.Errorf("expected %s got %s", "prometheus", t2.String())
+ }
+
+ if t3.String() != "13" {
+ t.Errorf("expected %s got %s", "13", t3.String())
+ }
+
+}
+
+func TestIsValidOriginType(t *testing.T) {
+
+ tests := []struct {
+ o string
+ expected bool
+ }{
+ {"rpc", true},
+ {"prometheus", true},
+ {"", false},
+ {"invalid", false},
+ {"influxdb", true},
+ {"irondb", true},
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ res := IsValidOriginType(test.o)
+ if test.expected != res {
+ t.Errorf("expected %t got %t", test.expected, res)
+ }
+ })
+ }
+
+}
diff --git a/internal/config/path.go b/internal/config/path.go
new file mode 100644
index 000000000..1c5c7280f
--- /dev/null
+++ b/internal/config/path.go
@@ -0,0 +1,209 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/Comcast/trickster/internal/proxy/methods"
+ ts "github.com/Comcast/trickster/internal/util/strings"
+)
+
+// PathMatchType enumerates the types of Path Matches used when registering Paths with the Router
+type PathMatchType int
+
+// KeyHasherFunc is a custom function that returns a hashed key value string for cache objects
+type KeyHasherFunc func(path string, params url.Values, headers http.Header, body io.ReadCloser, extra string) string
+
+const (
+ // PathMatchTypeExact indicates the router will map the Path by exact match against incoming requests
+ PathMatchTypeExact = PathMatchType(iota)
+ // PathMatchTypePrefix indicates the router will map the Path by prefix against incoming requests
+ PathMatchTypePrefix
+)
+
+var pathMatchTypeNames = map[string]PathMatchType{
+ "exact": PathMatchTypeExact,
+ "prefix": PathMatchTypePrefix,
+}
+
+var pathMatchTypeValues = map[PathMatchType]string{
+ PathMatchTypeExact: "exact",
+ PathMatchTypePrefix: "prefix",
+}
+
+func (t PathMatchType) String() string {
+ if v, ok := pathMatchTypeValues[t]; ok {
+ return v
+ }
+ return strconv.Itoa(int(t))
+}
+
+// PathConfig defines a URL Path that is associated with an HTTP Handler
+type PathConfig struct {
+ // Path indicates the HTTP Request's URL PATH to which this configuration applies
+ Path string `toml:"path"`
+ // MatchTypeName indicates the type of path match the router will apply to the path ('exact' or 'prefix')
+ MatchTypeName string `toml:"match_type"`
+ // HandlerName provides the name of the HTTP handler to use
+ HandlerName string `toml:"handler"`
+ // Methods provides the list of permitted HTTP request methods for this Path
+ Methods []string `toml:"methods"`
+ // CacheKeyParams provides the list of http request query parameters to be included in the hash for each request's cache key
+ CacheKeyParams []string `toml:"cache_key_params"`
+ // CacheKeyHeaders provides the list of http request headers to be included in the hash for each request's cache key
+ CacheKeyHeaders []string `toml:"cache_key_headers"`
+ // CacheKeyFormFields provides the list of http request body fields to be included in the hash for each request's cache key
+ CacheKeyFormFields []string `toml:"cache_key_form_fields"`
+ // RequestHeaders is a map of headers that will be added to requests to the upstream Origin for this path
+ RequestHeaders map[string]string `toml:"request_headers"`
+ // RequestParams is a map of headers that will be added to requests to the upstream Origin for this path
+ RequestParams map[string]string `toml:"request_params"`
+ // ResponseHeaders is a map of http headers that will be added to responses to the downstream client
+ ResponseHeaders map[string]string `toml:"response_headers"`
+ // ResponseCode sets a custom response code to be sent to downstream clients for this path.
+ ResponseCode int `toml:"response_code"`
+ // ResponseBody sets a custom response body to be sent to the donstream client for this path.
+ ResponseBody string `toml:"response_body"`
+ // NoMetrics, when set to true, disables metrics decoration for the path
+ NoMetrics bool `toml:"no_metrics"`
+ // CollapsedForwardingName indicates 'basic' or 'progressive' Collapsed Forwarding to be used by this path.
+ CollapsedForwardingName string `toml:"collapsed_forwarding"`
+
+ // Synthesized PathConfig Values
+ //
+ // Handler is the HTTP Handler represented by the Path's HandlerName
+ Handler http.Handler `toml:"-"`
+ // HasCustomResponseBody is a boolean indicating if the response body is custom
+ // this flag allows an empty string response to be configured as a return value
+ HasCustomResponseBody bool `toml:"-"`
+ // ResponseBodyBytes provides a byte slice version of the ResponseBody value
+ ResponseBodyBytes []byte `toml:"-"`
+ // MatchType is the PathMatchType representation of MatchTypeName
+ MatchType PathMatchType `toml:"-"`
+ // CollapsedForwardingType is the typed representation of CollapsedForwardingName
+ CollapsedForwardingType CollapsedForwardingType `toml:"-"`
+ // OriginConfig is the reference to the PathConfig's parent Origin Config
+ OriginConfig *OriginConfig `toml:"-"`
+ // KeyHasher points to an optional function that hashes the cacheKey with a custom algorithm
+ // NOTE: This is used by some origins like IronDB, but is not configurable by end users
+ // due to a bug in the vendored toml package, this must be a slice to avoid panic
+ KeyHasher []KeyHasherFunc `toml:"-"`
+
+ custom []string `toml:"-"`
+}
+
+// NewPathConfig returns a newly-instantiated *PathConfig
+func NewPathConfig() *PathConfig {
+ return &PathConfig{
+ Path: "/",
+ Methods: methods.CacheableHTTPMethods(),
+ HandlerName: "proxy",
+ MatchTypeName: "exact",
+ MatchType: PathMatchTypeExact,
+ CollapsedForwardingName: "basic",
+ CollapsedForwardingType: CFTypeBasic,
+ CacheKeyParams: make([]string, 0),
+ CacheKeyHeaders: make([]string, 0),
+ CacheKeyFormFields: make([]string, 0),
+ custom: make([]string, 0),
+ RequestHeaders: make(map[string]string),
+ RequestParams: make(map[string]string),
+ ResponseHeaders: make(map[string]string),
+ KeyHasher: nil,
+ }
+}
+
+// Clone returns an exact copy of the subject PathConfig
+func (p *PathConfig) Clone() *PathConfig {
+ c := &PathConfig{
+ Path: p.Path,
+ OriginConfig: p.OriginConfig,
+ MatchTypeName: p.MatchTypeName,
+ MatchType: p.MatchType,
+ HandlerName: p.HandlerName,
+ Handler: p.Handler,
+ RequestHeaders: ts.CloneMap(p.RequestHeaders),
+ RequestParams: ts.CloneMap(p.RequestParams),
+ ResponseHeaders: ts.CloneMap(p.ResponseHeaders),
+ ResponseBody: p.ResponseBody,
+ ResponseBodyBytes: p.ResponseBodyBytes,
+ CollapsedForwardingName: p.CollapsedForwardingName,
+ CollapsedForwardingType: p.CollapsedForwardingType,
+ NoMetrics: p.NoMetrics,
+ HasCustomResponseBody: p.HasCustomResponseBody,
+ Methods: make([]string, len(p.Methods)),
+ CacheKeyParams: make([]string, len(p.CacheKeyParams)),
+ CacheKeyHeaders: make([]string, len(p.CacheKeyHeaders)),
+ CacheKeyFormFields: make([]string, len(p.CacheKeyFormFields)),
+ custom: make([]string, len(p.custom)),
+ KeyHasher: p.KeyHasher,
+ }
+ copy(c.Methods, p.Methods)
+ copy(c.CacheKeyParams, p.CacheKeyParams)
+ copy(c.CacheKeyHeaders, p.CacheKeyHeaders)
+ copy(c.CacheKeyFormFields, p.CacheKeyFormFields)
+ copy(c.custom, p.custom)
+ return c
+
+}
+
+// Merge merges the non-default values of the provided PathConfig into the subject PathConfig
+func (p *PathConfig) Merge(p2 *PathConfig) {
+
+ if p2.OriginConfig != nil {
+ p.OriginConfig = p2.OriginConfig
+ }
+
+ for _, c := range p2.custom {
+ switch c {
+ case "path":
+ p.Path = p2.Path
+ case "match_type":
+ p.MatchType = p2.MatchType
+ p.MatchTypeName = p2.MatchTypeName
+ case "handler":
+ p.HandlerName = p2.HandlerName
+ p.Handler = p2.Handler
+ case "methods":
+ p.Methods = p2.Methods
+ case "cache_key_params":
+ p.CacheKeyParams = p2.CacheKeyParams
+ case "cache_key_headers":
+ p.CacheKeyHeaders = p2.CacheKeyHeaders
+ case "cache_key_form_fields":
+ p.CacheKeyFormFields = p2.CacheKeyFormFields
+ case "request_headers":
+ p.RequestHeaders = p2.RequestHeaders
+ case "request_params":
+ p.RequestParams = p2.RequestParams
+ case "response_headers":
+ p.ResponseHeaders = p2.ResponseHeaders
+ case "response_code":
+ p.ResponseCode = p2.ResponseCode
+ case "response_body":
+ p.ResponseBody = p2.ResponseBody
+ p.HasCustomResponseBody = true
+ p.ResponseBodyBytes = p2.ResponseBodyBytes
+ case "no_metrics":
+ p.NoMetrics = p2.NoMetrics
+ case "collapsed_forwarding":
+ p.CollapsedForwardingName = p2.CollapsedForwardingName
+ p.CollapsedForwardingType = p2.CollapsedForwardingType
+ }
+ }
+}
diff --git a/internal/config/path_test.go b/internal/config/path_test.go
new file mode 100644
index 000000000..25c9ac970
--- /dev/null
+++ b/internal/config/path_test.go
@@ -0,0 +1,162 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "net/http"
+ "testing"
+)
+
+func TestPMTString(t *testing.T) {
+
+ t1 := PathMatchTypeExact
+ t2 := PathMatchTypePrefix
+
+ var t3 PathMatchType = 3
+
+ if t1.String() != "exact" {
+ t.Errorf("expected %s got %s", "exact", t1.String())
+ }
+
+ if t2.String() != "prefix" {
+ t.Errorf("expected %s got %s", "prefix", t2.String())
+ }
+
+ if t3.String() != "3" {
+ t.Errorf("expected %s got %s", "3", t3.String())
+ }
+
+}
+
+func TestNewPathConfig(t *testing.T) {
+
+ pc := NewPathConfig()
+
+ if pc == nil {
+ t.Errorf("expected non-nil value you for %s", "PathConfig")
+ }
+
+ if pc.HandlerName != "proxy" {
+ t.Errorf("expected value %s, got %s", "proxy", pc.HandlerName)
+ }
+
+}
+
+func TestPathClone(t *testing.T) {
+
+ pc := NewPathConfig()
+ pc2 := pc.Clone()
+
+ if pc2 == nil {
+ t.Errorf("expected non-nil value you for %s", "PathConfig")
+ }
+
+ if pc2.HandlerName != "proxy" {
+ t.Errorf("expected value %s, got %s", "proxy", pc2.HandlerName)
+ }
+
+}
+
+func TestPathMerge(t *testing.T) {
+
+ pc := NewPathConfig()
+ pc2 := pc.Clone()
+
+ pc2.OriginConfig = NewOriginConfig()
+
+ pc2.custom = []string{"path", "match_type", "handler", "methods", "cache_key_params", "cache_key_headers", "cache_key_form_fields",
+ "request_headers", "request_params", "response_headers", "response_code", "response_body", "no_metrics", "collapsed_forwarding"}
+
+ expectedPath := "testPath"
+ expectedHandlerName := "testHandler"
+
+ pc2.Path = expectedPath
+ pc2.MatchType = PathMatchTypePrefix
+ pc2.HandlerName = expectedHandlerName
+ pc2.Methods = []string{http.MethodPost}
+ pc2.CacheKeyParams = []string{"params"}
+ pc2.CacheKeyHeaders = []string{"headers"}
+ pc2.CacheKeyFormFields = []string{"fields"}
+ pc2.RequestHeaders = map[string]string{"header1": "1"}
+ pc2.RequestParams = map[string]string{"param1": "foo"}
+ pc2.ResponseHeaders = map[string]string{"header2": "2"}
+ pc2.ResponseCode = 404
+ pc2.ResponseBody = "trickster"
+ pc2.NoMetrics = true
+ pc2.CollapsedForwardingName = "progressive"
+ pc2.CollapsedForwardingType = CFTypeProgressive
+
+ pc.Merge(pc2)
+
+ if pc.Path != expectedPath {
+ t.Errorf("expected %s got %s", expectedPath, pc.Path)
+ }
+
+ if pc.MatchType != PathMatchTypePrefix {
+ t.Errorf("expected %s got %s", PathMatchTypePrefix, pc.MatchType)
+ }
+
+ if pc.HandlerName != expectedHandlerName {
+ t.Errorf("expected %s got %s", expectedHandlerName, pc.HandlerName)
+ }
+
+ if len(pc.CacheKeyParams) != 1 {
+ t.Errorf("expected %d got %d", 1, len(pc.CacheKeyParams))
+ }
+
+ if len(pc.CacheKeyHeaders) != 1 {
+ t.Errorf("expected %d got %d", 1, len(pc.CacheKeyHeaders))
+ }
+
+ if len(pc.CacheKeyFormFields) != 1 {
+ t.Errorf("expected %d got %d", 1, len(pc.CacheKeyFormFields))
+ }
+
+ if len(pc.RequestHeaders) != 1 {
+ t.Errorf("expected %d got %d", 1, len(pc.RequestHeaders))
+ }
+
+ if len(pc.RequestParams) != 1 {
+ t.Errorf("expected %d got %d", 1, len(pc.RequestParams))
+ }
+
+ if len(pc.ResponseHeaders) != 1 {
+ t.Errorf("expected %d got %d", 1, len(pc.ResponseHeaders))
+ }
+
+ if pc.ResponseCode != 404 {
+ t.Errorf("expected %d got %d", 404, pc.ResponseCode)
+ }
+
+ if pc.ResponseCode != 404 {
+ t.Errorf("expected %d got %d", 404, pc.ResponseCode)
+ }
+
+ if pc.ResponseBody != "trickster" {
+ t.Errorf("expected %s got %s", "trickster", pc.ResponseBody)
+ }
+
+ if !pc.NoMetrics {
+ t.Errorf("expected %t got %t", true, pc.NoMetrics)
+ }
+
+ if pc.OriginConfig == nil {
+ t.Errorf("expected non-nil value you for %s", "OriginConfig")
+ }
+
+ if pc.CollapsedForwardingName != "progressive" || pc.CollapsedForwardingType != CFTypeProgressive {
+ t.Errorf("expected %s got %s", "progressive", pc.CollapsedForwardingName)
+ }
+
+}
diff --git a/internal/config/timeseries_eviction_method.go b/internal/config/timeseries_eviction_method.go
new file mode 100644
index 000000000..a42d8637f
--- /dev/null
+++ b/internal/config/timeseries_eviction_method.go
@@ -0,0 +1,50 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import "strconv"
+
+// TimeseriesEvictionMethod enumerates the methodologies for maintaining time series cache data
+type TimeseriesEvictionMethod int
+
+const (
+ // EvictionMethodOldest indicates that a time series cache object only holds values newer than an explicit date,
+ // called the Oldest Cacheable Timestamp, which is calculated with this formula on each request:
+ // time.Now().Add(-(config.ValueRetentionFactor * query.Step))
+ // This policy is the more performant methodology, because out-of-cache-range determination does not require querying
+ // the cache; thus the cache is only accessed for requests that are pre-determined to be cacheable
+ EvictionMethodOldest = TimeseriesEvictionMethod(iota)
+ // EvictionMethodLRU indicates a that a time series cache object hold up to ValueRetentionFactor number of
+ // unique timestamps, removing the least-recently-used timestamps as necessary to to remain at the ValueRetentionFactor
+ // This policy is the more compute-intensive, since we must maintain an LRU on each timestamp in each cache object,
+ // and retrieve the object from cache on each request
+ EvictionMethodLRU
+)
+
+var timeseriesEvictionMethodNames = map[string]TimeseriesEvictionMethod{
+ "oldest": EvictionMethodOldest,
+ "lru": EvictionMethodLRU,
+}
+
+var timeseriesEvictionMethodValues = map[TimeseriesEvictionMethod]string{
+ EvictionMethodOldest: "oldest",
+ EvictionMethodLRU: "lru",
+}
+
+func (t TimeseriesEvictionMethod) String() string {
+ if v, ok := timeseriesEvictionMethodValues[t]; ok {
+ return v
+ }
+ return strconv.Itoa(int(t))
+}
diff --git a/internal/config/timeseries_eviction_method_test.go b/internal/config/timeseries_eviction_method_test.go
new file mode 100644
index 000000000..d952da19f
--- /dev/null
+++ b/internal/config/timeseries_eviction_method_test.go
@@ -0,0 +1,38 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "testing"
+)
+
+func TestTEMString(t *testing.T) {
+
+ t1 := EvictionMethodLRU
+ t2 := EvictionMethodOldest
+ var t3 TimeseriesEvictionMethod = 3
+
+ if t1.String() != "lru" {
+ t.Errorf("expected %s got %s", "lru", t1.String())
+ }
+
+ if t2.String() != "oldest" {
+ t.Errorf("expected %s got %s", "oldest", t2.String())
+ }
+
+ if t3.String() != "3" {
+ t.Errorf("expected %s got %s", "3", t3.String())
+ }
+
+}
diff --git a/internal/config/tls.go b/internal/config/tls.go
new file mode 100644
index 000000000..921f7824b
--- /dev/null
+++ b/internal/config/tls.go
@@ -0,0 +1,138 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "crypto/tls"
+ "io/ioutil"
+)
+
+// TLSConfig is a collection of TLS-related client and server configurations
+type TLSConfig struct {
+ // FullChainCertPath specifies the path of the file containing the
+ // concatenated server certification and the intermediate certification for the tls endpoint
+ FullChainCertPath string `toml:"full_chain_cert_path"`
+ // PrivateKeyPath specifies the path of the private key file for the tls endpoint
+ PrivateKeyPath string `toml:"private_key_path"`
+ // ServeTLS is set to true once the Cert and Key files have been validated,
+ // indicating the consumer of this config can service requests over TLS
+ ServeTLS bool `toml:"-"`
+ // InsecureSkipVerify indicates that the HTTPS Client in Trickster should bypass
+ // hostname verification for the origin's certificate when proxying requests
+ InsecureSkipVerify bool `toml:"insecure_skip_verify"`
+ // CertificateAuthorities provides a list of custom Certificate Authorities for the upstream origin
+ // which are considered in addition to any system CA's by the Trickster HTTPS Client
+ CertificateAuthorityPaths []string `toml:"certificate_authority_paths"`
+ // ClientCertPath provides the path to the Client Certificate when using Mutual Authorization
+ ClientCertPath string `toml:"client_cert_path"`
+ // ClientKeyPath provides the path to the Client Key when using Mutual Authorization
+ ClientKeyPath string `toml:"client_key_path"`
+}
+
+// DefaultTLSConfig will return a *TLSConfig with the default settings
+func DefaultTLSConfig() *TLSConfig {
+ return &TLSConfig{
+ FullChainCertPath: "",
+ PrivateKeyPath: "",
+ }
+}
+
+// Clone returns an exact copy of the subject *TLSConfig
+func (tc *TLSConfig) Clone() *TLSConfig {
+
+ var caps []string
+ if tc.CertificateAuthorityPaths != nil {
+ caps = make([]string, len(tc.CertificateAuthorityPaths))
+ copy(caps, tc.CertificateAuthorityPaths)
+ }
+
+ return &TLSConfig{
+ FullChainCertPath: tc.FullChainCertPath,
+ PrivateKeyPath: tc.PrivateKeyPath,
+ ServeTLS: tc.ServeTLS,
+ InsecureSkipVerify: tc.InsecureSkipVerify,
+ CertificateAuthorityPaths: caps,
+ ClientCertPath: tc.ClientCertPath,
+ ClientKeyPath: tc.ClientKeyPath,
+ }
+}
+
+func (c *TricksterConfig) verifyTLSConfigs() error {
+
+ for _, oc := range c.Origins {
+
+ if oc.TLS == nil || (oc.TLS.FullChainCertPath == "" || oc.TLS.PrivateKeyPath == "") && (oc.TLS.CertificateAuthorityPaths == nil || len(oc.TLS.CertificateAuthorityPaths) == 0) {
+ continue
+ }
+
+ _, err := ioutil.ReadFile(oc.TLS.FullChainCertPath)
+ if err != nil {
+ return err
+ }
+ _, err = ioutil.ReadFile(oc.TLS.PrivateKeyPath)
+ if err != nil {
+ return err
+ }
+ c.Frontend.ServeTLS = true
+ oc.TLS.ServeTLS = true
+
+ // Verify CA Paths
+ if oc.TLS.CertificateAuthorityPaths != nil && len(oc.TLS.CertificateAuthorityPaths) > 0 {
+ for _, path := range oc.TLS.CertificateAuthorityPaths {
+ _, err = ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ }
+ return nil
+}
+
+// TLSCertConfig returns the crypto/tls configuration object with a list of name-bound certs derifed from the running config
+func (c *TricksterConfig) TLSCertConfig() (*tls.Config, error) {
+ var err error
+ if !c.Frontend.ServeTLS {
+ return nil, nil
+ }
+ to := []*OriginConfig{}
+ for _, oc := range c.Origins {
+ if oc.TLS.ServeTLS {
+ to = append(to, oc)
+ }
+ }
+
+ l := len(to)
+ if l == 0 {
+ return nil, nil
+ }
+
+ tlsConfig := &tls.Config{}
+ tlsConfig.Certificates = make([]tls.Certificate, l)
+
+ i := 0
+ for _, tc := range to {
+ tlsConfig.Certificates[i], err = tls.LoadX509KeyPair(tc.TLS.FullChainCertPath, tc.TLS.PrivateKeyPath)
+ if err != nil {
+ return nil, err
+ }
+ i++
+ }
+
+ tlsConfig.BuildNameToCertificate()
+
+ return tlsConfig, nil
+
+}
diff --git a/internal/config/tls_test.go b/internal/config/tls_test.go
new file mode 100644
index 000000000..36b0bbf56
--- /dev/null
+++ b/internal/config/tls_test.go
@@ -0,0 +1,155 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package config
+
+import (
+ "testing"
+)
+
+func TestDefaultTLSConfig(t *testing.T) {
+
+ dc := DefaultTLSConfig()
+ if dc == nil {
+ t.Errorf("expected config named %s", "default")
+ }
+
+ if dc.FullChainCertPath != "" {
+ t.Errorf("expected empty cert path got %s", dc.FullChainCertPath)
+ }
+
+ if dc.PrivateKeyPath != "" {
+ t.Errorf("expected empty key path got %s", dc.PrivateKeyPath)
+ }
+
+}
+
+func tlsConfig(id string) *TLSConfig {
+ return &TLSConfig{
+ FullChainCertPath: "../../testdata/test." + id + ".cert.pem",
+ PrivateKeyPath: "../../testdata/test." + id + ".key.pem",
+ ServeTLS: true,
+ }
+}
+
+func TestVerifyTLSConfigs(t *testing.T) {
+
+ config := NewConfig()
+ tls01 := tlsConfig("01")
+ config.Origins["default"].TLS = tls01
+
+ err := config.verifyTLSConfigs()
+ if err != nil {
+ t.Error(err)
+ }
+
+ // test for error when cert file can't be read
+ tls04 := tlsConfig("04")
+ originalFile := tls04.FullChainCertPath
+ badFile := originalFile + ".nonexistent"
+ tls04.FullChainCertPath = badFile
+ config.Origins["default"].TLS = tls04
+ err = config.verifyTLSConfigs()
+ if err == nil {
+ t.Errorf("expected error for bad file %s", badFile)
+ }
+ tls04.FullChainCertPath = originalFile
+
+ // test for error when key file can't be read
+ originalFile = tls04.PrivateKeyPath
+ badFile = originalFile + ".nonexistent"
+ tls04.PrivateKeyPath = badFile
+ err = config.verifyTLSConfigs()
+ if err == nil {
+ t.Errorf("expected error for bad file %s", badFile)
+ }
+ tls04.PrivateKeyPath = originalFile
+
+ originalFile = "../../testdata/test.rootca.pem"
+ badFile = originalFile + ".nonexistent"
+ // test for more RootCA's to add
+ tls04.CertificateAuthorityPaths = []string{originalFile}
+ err = config.verifyTLSConfigs()
+ if err != nil {
+ t.Error(err)
+ }
+
+ tls04.CertificateAuthorityPaths = []string{badFile}
+ err = config.verifyTLSConfigs()
+ if err == nil {
+ t.Errorf("expected error for bad file %s", badFile)
+ }
+}
+
+func TestProcessTLSConfigs(t *testing.T) {
+
+ a := []string{"-config", "../../testdata/test.full.conf"}
+ err := Load("trickster-test", "0", a)
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestTLSCertConfig(t *testing.T) {
+
+ config := NewConfig()
+
+ // test empty config condition #1 (ServeTLS is false, early bail)
+ n, err := config.TLSCertConfig()
+ if n != nil {
+ t.Errorf("expected nil config, got %d certs", len(n.Certificates))
+ }
+ if err != nil {
+ t.Error(err)
+ }
+
+ // test empty config condition 2 (ServeTLS is true, but there are 0 origins configured)
+ config.Frontend.ServeTLS = true
+ n, err = config.TLSCertConfig()
+ if n != nil {
+ t.Errorf("expected nil config, got %d certs", len(n.Certificates))
+ }
+ if err != nil {
+ t.Error(err)
+ }
+
+ tls01 := tlsConfig("01")
+ config.Frontend.ServeTLS = true
+
+ // test good config
+ config.Origins["default"].TLS = tls01
+ _, err = config.TLSCertConfig()
+ if err != nil {
+ t.Error(err)
+ }
+
+ // test config with key file that has invalid key data
+ expectedErr := "tls: failed to find any PEM data in key input"
+ tls05 := tlsConfig("05")
+ config.Origins["default"].TLS = tls05
+ _, err = config.TLSCertConfig()
+ if err == nil {
+ t.Errorf("expected error: %s", expectedErr)
+ }
+
+ // test config with cert file that has invalid key data
+ expectedErr = "tls: failed to find any PEM data in certificate input"
+ tls06 := tlsConfig("06")
+ config.Origins["default"].TLS = tls06
+ _, err = config.TLSCertConfig()
+ if err == nil {
+ t.Errorf("expected error: %s", expectedErr)
+ }
+
+}
diff --git a/internal/proxy/context/keys.go b/internal/proxy/context/keys.go
new file mode 100644
index 000000000..84fec4715
--- /dev/null
+++ b/internal/proxy/context/keys.go
@@ -0,0 +1,20 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package context
+
+type contextKey int
+
+const (
+ resourcesKey contextKey = iota
+)
diff --git a/internal/proxy/context/resources.go b/internal/proxy/context/resources.go
new file mode 100644
index 000000000..99a166bbe
--- /dev/null
+++ b/internal/proxy/context/resources.go
@@ -0,0 +1,31 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package context
+
+import (
+ "context"
+)
+
+// WithResources returns a copy of the provided context that also includes the OriginConfig, CachingConfig and PathConfig for the request
+func WithResources(ctx context.Context, r interface{}) context.Context {
+ if r != nil {
+ return context.WithValue(ctx, resourcesKey, r)
+ }
+ return ctx
+}
+
+// Resources returns the interface reference to the Request's resources
+func Resources(ctx context.Context) interface{} {
+ return ctx.Value(resourcesKey)
+}
diff --git a/internal/proxy/context/resources_test.go b/internal/proxy/context/resources_test.go
new file mode 100644
index 000000000..12f62deee
--- /dev/null
+++ b/internal/proxy/context/resources_test.go
@@ -0,0 +1,39 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package context
+
+import "testing"
+
+import "context"
+
+type testStruct struct {
+ testField1 bool
+}
+
+func TestResources(t *testing.T) {
+
+ ctx := context.Background()
+
+ // cover nil short circuit case
+ ctx = WithResources(ctx, nil)
+
+ r1 := &testStruct{testField1: true}
+ ctx = WithResources(ctx, r1)
+ r2 := Resources(ctx)
+
+ if !r2.(*testStruct).testField1 {
+ t.Errorf("expected %t got %t", true, r2.(testStruct).testField1)
+ }
+
+}
diff --git a/internal/proxy/engines/access_logs.go b/internal/proxy/engines/access_logs.go
new file mode 100644
index 000000000..d7b409e0d
--- /dev/null
+++ b/internal/proxy/engines/access_logs.go
@@ -0,0 +1,45 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/util/log"
+)
+
+func logUpstreamRequest(originName, originType, handlerName, method, path, userAgent string, responseCode, size int, requestDuration float64) {
+ log.Debug("upstream request",
+ log.Pairs{
+ "originName": originName,
+ "originType": originType,
+ "handlerName": handlerName,
+ "method": method,
+ "uri": path,
+ "userAgent": userAgent,
+ "code": responseCode,
+ "size": size,
+ "durationMS": int(requestDuration * 1000),
+ })
+}
+
+func logDownstreamRequest(r *http.Request) {
+ log.Debug("downtream request",
+ log.Pairs{
+ "uri": r.RequestURI,
+ "method": r.Method,
+ "userAgent": r.UserAgent(),
+ "clientIP": r.RemoteAddr,
+ })
+}
diff --git a/internal/proxy/engines/access_logs_test.go b/internal/proxy/engines/access_logs_test.go
new file mode 100644
index 000000000..8df972341
--- /dev/null
+++ b/internal/proxy/engines/access_logs_test.go
@@ -0,0 +1,60 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "net/http"
+ "os"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/log"
+)
+
+func TestLogUpstreamRequest(t *testing.T) {
+ fileName := "out.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "debug"}
+ log.Init()
+ logUpstreamRequest("testOrigin", "testType", "testHandler", "testMethod", "testPath", "testUserAgent", 200, 0, 1.0)
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ log.Logger.Close()
+ os.Remove(fileName)
+}
+
+func TestLogDownstreamRequest(t *testing.T) {
+ fileName := "out.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "debug"}
+ log.Init()
+
+ r, err := http.NewRequest("get", "http://testOrigin", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ logDownstreamRequest(r)
+
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ log.Logger.Close()
+ os.Remove(fileName)
+}
diff --git a/internal/proxy/engines/cache.go b/internal/proxy/engines/cache.go
new file mode 100644
index 000000000..c841f1dc3
--- /dev/null
+++ b/internal/proxy/engines/cache.go
@@ -0,0 +1,212 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "mime"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/golang/snappy"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/ranges/byterange"
+ "github.com/Comcast/trickster/internal/util/log"
+)
+
+// QueryCache queries the cache for an HTTPDocument and returns it
+func QueryCache(c cache.Cache, key string, ranges byterange.Ranges) (*HTTPDocument, status.LookupStatus, byterange.Ranges, error) {
+
+ d := &HTTPDocument{}
+ var lookupStatus status.LookupStatus
+ var bytes []byte
+ var err error
+
+ if c.Configuration().CacheType == "memory" {
+ mc := c.(cache.MemoryCache)
+ var ifc interface{}
+ ifc, lookupStatus, err = mc.RetrieveReference(key, true)
+ // normalize any cache miss errors to cache.ErrKNF. We'll get all of them updated so we can remove this code
+ if err != nil && err != cache.ErrKNF && strings.HasSuffix(err.Error(), "not in cache") {
+ err = cache.ErrKNF
+ }
+
+ if err != nil || (lookupStatus != status.LookupStatusHit) {
+ var nr byterange.Ranges
+ if lookupStatus == status.LookupStatusKeyMiss && ranges != nil && len(ranges) > 0 {
+ nr = ranges
+ }
+ return d, lookupStatus, nr, err
+ }
+
+ if ifc != nil {
+ d, _ = ifc.(*HTTPDocument)
+ } else {
+ return d, status.LookupStatusKeyMiss, ranges, err
+ }
+
+ } else {
+
+ bytes, lookupStatus, err = c.Retrieve(key, true)
+ // normalize any cache miss errors to cache.ErrKNF. We'll get all of them updated so we can remove this code
+ if err != nil && err != cache.ErrKNF && strings.HasSuffix(err.Error(), "not in cache") {
+ err = cache.ErrKNF
+ }
+
+ if err != nil || (lookupStatus != status.LookupStatusHit) {
+ var nr byterange.Ranges
+ if lookupStatus == status.LookupStatusKeyMiss && ranges != nil && len(ranges) > 0 {
+ nr = ranges
+
+ }
+ return d, lookupStatus, nr, err
+ }
+
+ var inflate bool
+ // check and remove compression bit
+ if len(bytes) > 0 {
+ if bytes[0] == 1 {
+ inflate = true
+ }
+ bytes = bytes[1:]
+ }
+
+ if inflate {
+ log.Debug("decompressing cached data", log.Pairs{"cacheKey": key})
+ b, err := snappy.Decode(nil, bytes)
+ if err == nil {
+ bytes = b
+ }
+ }
+ _, err = d.UnmarshalMsg(bytes)
+ if err != nil {
+ return d, status.LookupStatusKeyMiss, ranges, err
+ }
+
+ }
+
+ var delta byterange.Ranges
+
+ // Fulfillment is when we have a range stored, but a subsequent user wants the whole body, so
+ // we must inflate the requested range to be the entire object in order to get the correct delta.
+ d.isFulfillment = (d.Ranges != nil && len(d.Ranges) > 0) && (ranges == nil || len(ranges) == 0)
+
+ if d.isFulfillment {
+ ranges = byterange.Ranges{byterange.Range{Start: 0, End: d.ContentLength - 1}}
+ }
+
+ if ranges != nil && len(ranges) > 0 && d.Ranges != nil && len(d.Ranges) > 0 {
+ delta = ranges.CalculateDelta(d.Ranges, d.ContentLength)
+ if delta != nil && len(delta) > 0 {
+ if delta.Equal(ranges) {
+ lookupStatus = status.LookupStatusRangeMiss
+ } else {
+ lookupStatus = status.LookupStatusPartialHit
+ }
+ }
+
+ }
+ return d, lookupStatus, delta, nil
+}
+
+func stripConditionalHeaders(h http.Header) {
+ h.Del(headers.NameIfMatch)
+ h.Del(headers.NameIfUnmodifiedSince)
+ h.Del(headers.NameIfNoneMatch)
+ h.Del(headers.NameIfModifiedSince)
+}
+
+// WriteCache writes an HTTPDocument to the cache
+func WriteCache(c cache.Cache, key string, d *HTTPDocument, ttl time.Duration, compressTypes map[string]bool) error {
+
+ h := http.Header(d.Headers)
+ h.Del(headers.NameDate)
+ h.Del(headers.NameTransferEncoding)
+ h.Del(headers.NameContentRange)
+ h.Del(headers.NameTricksterResult)
+
+ var bytes []byte
+
+ var compress bool
+
+ if ce := http.Header(d.Headers).Get(headers.NameContentEncoding); (ce == "" || ce == "identity") &&
+ (d.CachingPolicy == nil || !d.CachingPolicy.NoTransform) {
+ if mt, _, err := mime.ParseMediaType(d.ContentType); err == nil {
+ if _, ok := compressTypes[mt]; ok {
+ compress = true
+ }
+ }
+ }
+
+ // for memory cache, don't serialize the document, since we can retrieve it by reference.
+ if c.Configuration().CacheType == "memory" {
+ mc := c.(cache.MemoryCache)
+
+ if d != nil {
+ // during unmarshal, these would come back as false, so lets set them as such even for direct access
+ d.rangePartsLoaded = false
+ d.isFulfillment = false
+ d.isLoaded = false
+ d.RangeParts = nil
+
+ if d.CachingPolicy != nil {
+ d.CachingPolicy.ResetClientConditionals()
+ }
+ }
+
+ return mc.StoreReference(key, d, ttl)
+ }
+
+ // for non-memory, we have to seralize the document to a byte slice to store
+ bytes, _ = d.MarshalMsg(nil)
+
+ if compress {
+ log.Debug("compressing cache data", log.Pairs{"cacheKey": key})
+ bytes = append([]byte{1}, snappy.Encode(nil, bytes)...)
+ } else {
+ bytes = append([]byte{0}, bytes...)
+ }
+ return c.Store(key, bytes, ttl)
+
+}
+
+// DocumentFromHTTPResponse returns an HTTPDocument from the provided HTTP Response and Body
+func DocumentFromHTTPResponse(resp *http.Response, body []byte, cp *CachingPolicy) *HTTPDocument {
+ d := &HTTPDocument{}
+ d.StatusCode = resp.StatusCode
+ d.Status = resp.Status
+ d.CachingPolicy = cp
+ d.ContentLength = resp.ContentLength
+
+ if resp.Header != nil {
+ d.Headers = resp.Header.Clone()
+ }
+
+ ct := http.Header(d.Headers).Get(headers.NameContentType)
+ if !strings.HasPrefix(ct, headers.ValueMultipartByteRanges) {
+ d.ContentType = ct
+ }
+
+ if d.StatusCode == http.StatusPartialContent && body != nil && len(body) > 0 {
+ d.ParsePartialContentBody(resp, body)
+ d.FulfillContentBody()
+ } else {
+ d.SetBody(body)
+ }
+
+ return d
+}
diff --git a/internal/proxy/engines/cache_test.go b/internal/proxy/engines/cache_test.go
new file mode 100644
index 000000000..ab8787b89
--- /dev/null
+++ b/internal/proxy/engines/cache_test.go
@@ -0,0 +1,408 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ cr "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/ranges/byterange"
+)
+
+const testRangeBody = "This is a test file, to see how the byte range requests work.\n"
+
+func newRangeRequestTestServer() *httptest.Server {
+
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.ServeContent(w, r, "", time.Now(),
+ strings.NewReader(testRangeBody))
+ })
+ s := httptest.NewServer(handler)
+ return s
+}
+
+func TestInvalidContentRange(t *testing.T) {
+ _, _, err := byterange.ParseContentRangeHeader("blah")
+ if err == nil {
+ t.Errorf("expected error: %s", `invalid input format`)
+ }
+}
+
+func TestMultiPartByteRange(t *testing.T) {
+
+ // TODO Make this work functionally
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+ resp2 := &http.Response{}
+ resp2.Header = make(http.Header)
+ resp2.Header.Add(headers.NameContentLength, "62")
+ resp2.Header.Add(headers.NameContentRange, "bytes 0-10/62")
+ resp2.Header.Add("Content-Type", "multipart/byteranges; boundary=ddffee123")
+ resp2.StatusCode = 200
+ d := DocumentFromHTTPResponse(resp2, []byte("This is a t"), nil)
+
+ ranges := make(byterange.Ranges, 1)
+ ranges[0] = byterange.Range{Start: 5, End: 10}
+ err = WriteCache(cache, "testKey", d, time.Duration(60)*time.Second, map[string]bool{"text/plain": true})
+ if err != nil {
+ t.Error("Expected multi part byte range request to pass, but failed with ", err.Error())
+ }
+}
+
+func TestCacheHitRangeRequest(t *testing.T) {
+ expected := "is a "
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+ resp2 := &http.Response{}
+ resp2.Header = make(http.Header)
+ resp2.Header.Add(headers.NameContentLength, strconv.Itoa(len(testRangeBody)))
+ resp2.StatusCode = 200
+ d := DocumentFromHTTPResponse(resp2, []byte(testRangeBody), nil)
+
+ err = WriteCache(cache, "testKey", d, time.Duration(60)*time.Second, map[string]bool{"text/plain": true})
+ if err != nil {
+ t.Error(err)
+ }
+
+ ranges := byterange.Ranges{byterange.Range{Start: 5, End: 10}}
+ d2, _, deltas, err := QueryCache(cache, "testKey", ranges)
+ if err != nil {
+ t.Error(err)
+ }
+ if (string(d2.Body[5:10])) != expected {
+ t.Errorf("expected %s got %s", expected, string(d2.Body[5:10]))
+ }
+ if deltas != nil {
+ t.Errorf("updated query range was expected to be empty")
+ }
+}
+
+func TestCacheHitRangeRequest2(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ have := byterange.Range{Start: 1, End: 20}
+ cl := int64(len(testRangeBody))
+ rl := (have.End - have.Start) + 1
+ resp2 := &http.Response{}
+ resp2.Header = make(http.Header)
+ resp2.Header.Add(headers.NameContentLength, strconv.FormatInt(rl, 10))
+ resp2.ContentLength = int64(rl)
+ resp2.Header.Add(headers.NameContentRange, have.ContentRangeHeader(cl))
+ resp2.StatusCode = 206
+ d := DocumentFromHTTPResponse(resp2, []byte(testRangeBody[have.Start:have.End+1]), nil)
+
+ err = WriteCache(cache, "testKey", d, time.Duration(60)*time.Second, map[string]bool{"text/plain": true})
+ if err != nil {
+ t.Error(err)
+ }
+
+ ranges := byterange.Ranges{byterange.Range{Start: 5, End: 10}}
+ d2, _, deltas, err := QueryCache(cache, "testKey", ranges)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if deltas != nil && len(deltas) > 0 {
+ t.Errorf("updated query range was expected to be empty: %v", deltas)
+ }
+ if d2.Ranges[0].Start != 1 || d2.Ranges[0].End != 20 {
+ t.Errorf("expected start %d end %d, got start %d end %d", 1, 20, deltas[0].Start, deltas[0].End)
+ }
+}
+
+func TestCacheHitRangeRequest3(t *testing.T) {
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ have := byterange.Range{Start: 1, End: 20}
+ cl := int64(len(testRangeBody))
+ rl := (have.End - have.Start) + 1
+ resp2 := &http.Response{}
+ resp2.Header = make(http.Header)
+ resp2.Header.Add(headers.NameContentLength, strconv.FormatInt(rl, 10))
+ resp2.ContentLength = int64(rl)
+ resp2.Header.Add(headers.NameContentRange, have.ContentRangeHeader(cl))
+ resp2.StatusCode = 206
+ d := DocumentFromHTTPResponse(resp2, []byte(testRangeBody[have.Start:have.End+1]), nil)
+
+ err = WriteCache(cache, "testKey", d, time.Duration(60)*time.Second, map[string]bool{"text/plain": true})
+ if err != nil {
+ t.Error(err)
+ }
+
+ qrange := byterange.Ranges{byterange.Range{Start: 5, End: 10}}
+ _, _, deltas, err := QueryCache(cache, "testKey", qrange)
+ if err != nil {
+ t.Error(err)
+ }
+ if deltas != nil && len(deltas) > 0 {
+ t.Error("Expected empty query range got non empty response ", deltas)
+ }
+}
+
+func TestPartialCacheMissRangeRequest(t *testing.T) {
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ have := byterange.Range{Start: 1, End: 9}
+ cl := int64(len(testRangeBody))
+ rl := (have.End - have.Start) + 1
+ resp2 := &http.Response{}
+ resp2.Header = make(http.Header)
+ resp2.Header.Add(headers.NameContentLength, strconv.FormatInt(rl, 10))
+ resp2.ContentLength = int64(rl)
+ resp2.Header.Add(headers.NameContentRange, have.ContentRangeHeader(cl))
+ resp2.StatusCode = 206
+ d := DocumentFromHTTPResponse(resp2, []byte(testRangeBody[have.Start:have.End+1]), nil)
+
+ err = WriteCache(cache, "testKey", d, time.Duration(60)*time.Second, map[string]bool{"text/plain": true})
+ if err != nil {
+ t.Error(err)
+ }
+
+ ranges := byterange.Ranges{byterange.Range{Start: 5, End: 20}}
+ _, _, deltas, err := QueryCache(cache, "testKey", ranges)
+ if err != nil {
+ t.Error(err)
+ }
+ if deltas == nil || len(deltas) < 1 {
+ t.Errorf("invalid deltas: %v", deltas)
+ } else if deltas[0].Start != 10 ||
+ deltas[0].End != 20 {
+ t.Errorf("expected start %d end %d, got start %d end %d", 10, 20, deltas[0].Start, deltas[0].End)
+ }
+}
+
+func TestFullCacheMissRangeRequest(t *testing.T) {
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+ have := byterange.Range{Start: 1, End: 9}
+ cl := int64(len(testRangeBody))
+ rl := (have.End - have.Start) + 1
+ resp2 := &http.Response{}
+ resp2.Header = make(http.Header)
+ resp2.Header.Add(headers.NameContentLength, strconv.FormatInt(rl, 10))
+ resp2.ContentLength = int64(rl)
+ resp2.Header.Add(headers.NameContentRange, have.ContentRangeHeader(cl))
+ resp2.StatusCode = 206
+ d := DocumentFromHTTPResponse(resp2, []byte(testRangeBody[have.Start:have.End+1]), nil)
+
+ err = WriteCache(cache, "testKey", d, time.Duration(60)*time.Second, map[string]bool{"text/plain": true})
+ if err != nil {
+ t.Error(err)
+ }
+
+ ranges := byterange.Ranges{byterange.Range{Start: 15, End: 20}}
+ _, _, deltas, err := QueryCache(cache, "testKey", ranges)
+ if err != nil {
+ t.Error(err)
+ }
+ if deltas[0].Start != 15 ||
+ deltas[0].End != 20 {
+ t.Errorf("expected start %d end %d, got start %d end %d", 10, 20, deltas[0].Start, deltas[0].End)
+ }
+}
+
+func TestRangeRequestFromClient(t *testing.T) {
+
+ want := byterange.Ranges{byterange.Range{Start: 15, End: 20}}
+ haves := byterange.Ranges{byterange.Range{Start: 10, End: 25}}
+
+ s := newRangeRequestTestServer()
+ defer s.Close()
+ client := &http.Client{}
+ request, err := http.NewRequest(http.MethodGet, s.URL, nil)
+
+ if err != nil {
+ log.Fatalln(err)
+ }
+ request.Header.Set(headers.NameRange, haves.String())
+ resp, err := client.Do(request)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bytes, _ := ioutil.ReadAll(resp.Body)
+
+ //--------------------------------------
+ err = config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, e2 := cr.GetCache("default")
+ if e2 != nil {
+ t.Error(e2)
+ }
+
+ d := DocumentFromHTTPResponse(resp, bytes, nil)
+ err = WriteCache(cache, "testKey2", d, time.Duration(60)*time.Second, map[string]bool{"text/plain": true})
+ if err != nil {
+ t.Error(err)
+ }
+ _, _, deltas, err := QueryCache(cache, "testKey2", want)
+ if err != nil {
+ t.Error(err)
+ }
+ if deltas != nil && len(deltas) > 0 {
+ t.Errorf("expected cache hit but got cache miss: %s", deltas)
+ }
+ want[0].Start = 20
+ want[0].End = 35
+ _, _, deltas, err = QueryCache(cache, "testKey2", want)
+ if err != nil {
+ t.Error(err)
+ }
+ if deltas == nil {
+ t.Errorf("expected cache miss but got cache hit")
+ }
+ if deltas[0].Start != 26 || deltas[0].End != 35 {
+ t.Errorf("expected start %d end %d, got start %d end %d", 26, 35, deltas[0].Start, deltas[0].End)
+ }
+}
+
+func TestQueryCache(t *testing.T) {
+
+ expected := "1234"
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ resp := &http.Response{}
+ resp.Header = make(http.Header)
+ resp.StatusCode = 200
+ resp.Header.Add(headers.NameContentLength, "4")
+ d := DocumentFromHTTPResponse(resp, []byte(expected), nil)
+ d.ContentType = "text/plain"
+
+ err = WriteCache(cache, "testKey", d, time.Duration(60)*time.Second, map[string]bool{"text/plain": true})
+ if err != nil {
+ t.Error(err)
+ }
+
+ d2, _, _, err := QueryCache(cache, "testKey", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(d2.Body) != string(expected) {
+ t.Errorf("expected %s got %s", string(expected), string(d2.Body))
+ }
+
+ if d2.StatusCode != 200 {
+ t.Errorf("expected %d got %d", 200, d2.StatusCode)
+ }
+
+ _, _, _, err = QueryCache(cache, "testKey2", nil)
+ if err == nil {
+ t.Errorf("expected error")
+ }
+
+ // test marshaling route by making our cache not appear to be a memory cache
+ cache.Remove("testKey")
+ cache.Configuration().CacheType = "test"
+
+ _, _, _, err = QueryCache(cache, "testKey", byterange.Ranges{{Start: 0, End: 1}})
+ if err == nil {
+ t.Errorf("expected error")
+ }
+
+ err = WriteCache(cache, "testKey", d, time.Duration(60)*time.Second, map[string]bool{"text/plain": true})
+ if err != nil {
+ t.Error(err)
+ }
+
+ d2, _, _, err = QueryCache(cache, "testKey", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(d2.Body) != string(expected) {
+ t.Errorf("expected %s got %s", string(expected), string(d2.Body))
+ }
+
+ if d2.StatusCode != 200 {
+ t.Errorf("expected %d got %d", 200, d2.StatusCode)
+ }
+
+}
diff --git a/internal/proxy/engines/caching_policy.go b/internal/proxy/engines/caching_policy.go
new file mode 100644
index 000000000..b9ee6e8ba
--- /dev/null
+++ b/internal/proxy/engines/caching_policy.go
@@ -0,0 +1,419 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+//go:generate msgp
+
+// CachingPolicy defines the attributes for determining the cachability of an HTTP object
+type CachingPolicy struct {
+ IsFresh bool `msg:"is_fresh"`
+ NoCache bool `msg:"nocache"`
+ NoTransform bool `msg:"notransform"`
+ FreshnessLifetime int `msg:"freshness_lifetime"`
+ CanRevalidate bool `msg:"can_revalidate"`
+ MustRevalidate bool `msg:"must_revalidate"`
+ LastModified time.Time `msg:"last_modified"`
+ Expires time.Time `msg:"expires"`
+ Date time.Time `msg:"date"`
+ LocalDate time.Time `msg:"local_date"`
+ ETag string `msg:"etag"`
+
+ IsNegativeCache bool `msg:"is_negative_cache"`
+
+ IfNoneMatchValue string `msg:"-"`
+ IfModifiedSinceTime time.Time `msg:"-"`
+ IfUnmodifiedSinceTime time.Time `msg:"-"`
+
+ IsClientConditional bool `msg:"-"`
+ IsClientFresh bool `msg:"-"`
+ HasIfModifiedSince bool `msg:"-"`
+ HasIfUnmodifiedSince bool `msg:"-"`
+ HasIfNoneMatch bool `msg:"-"`
+
+ IfNoneMatchResult bool `msg:"-"`
+}
+
+// Clone returns an exact copy of the Caching Policy
+func (cp *CachingPolicy) Clone() *CachingPolicy {
+ return &CachingPolicy{
+ IsFresh: cp.IsFresh,
+ NoCache: cp.NoCache,
+ NoTransform: cp.NoTransform,
+ FreshnessLifetime: cp.FreshnessLifetime,
+ CanRevalidate: cp.CanRevalidate,
+ MustRevalidate: cp.MustRevalidate,
+ LastModified: cp.LastModified,
+ Expires: cp.Expires,
+ Date: cp.Date,
+ LocalDate: cp.LocalDate,
+ ETag: cp.ETag,
+ IsNegativeCache: cp.IsNegativeCache,
+ IfNoneMatchValue: cp.IfNoneMatchValue,
+ IfModifiedSinceTime: cp.IfModifiedSinceTime,
+ IfUnmodifiedSinceTime: cp.IfUnmodifiedSinceTime,
+ IsClientConditional: cp.IsClientConditional,
+ IsClientFresh: cp.IsClientFresh,
+ HasIfModifiedSince: cp.HasIfModifiedSince,
+ HasIfUnmodifiedSince: cp.HasIfUnmodifiedSince,
+ HasIfNoneMatch: cp.HasIfNoneMatch,
+ IfNoneMatchResult: cp.IfNoneMatchResult,
+ }
+}
+
+// ResetClientConditionals sets the request-specific conditional values of the subject
+// caching policy to false, so as to facilitate reuse of the policy with subsequent requests
+// for the same cache object
+func (cp *CachingPolicy) ResetClientConditionals() {
+ cp.IfNoneMatchValue = ""
+ cp.IfModifiedSinceTime = time.Time{}
+ cp.IfUnmodifiedSinceTime = time.Time{}
+ cp.IsClientConditional = false
+ cp.IsClientFresh = false
+ cp.HasIfModifiedSince = false
+ cp.HasIfUnmodifiedSince = false
+ cp.HasIfNoneMatch = false
+ cp.IfNoneMatchResult = false
+}
+
+// Merge merges the source CachingPolicy into the subject CachingPolicy
+func (cp *CachingPolicy) Merge(src *CachingPolicy) {
+
+ if src == nil {
+ return
+ }
+
+ cp.NoCache = cp.NoCache || src.NoCache
+ cp.NoTransform = cp.NoTransform || src.NoTransform
+
+ cp.IsClientConditional = cp.IsClientConditional || src.IsClientConditional
+ cp.IsClientFresh = cp.IsClientFresh || src.IsClientFresh
+ cp.IsNegativeCache = cp.IsNegativeCache || src.IsNegativeCache
+
+ cp.IsFresh = src.IsFresh
+ cp.FreshnessLifetime = src.FreshnessLifetime
+ cp.CanRevalidate = src.CanRevalidate
+ cp.MustRevalidate = src.MustRevalidate
+ cp.LastModified = src.LastModified
+ cp.Expires = src.Expires
+ cp.Date = src.Date
+ cp.LocalDate = src.LocalDate
+ cp.ETag = src.ETag
+
+ // request policies (e.g., IfModifiedSince) are intentionally omitted,
+ // assuming a response policy is always merged into a request policy
+
+}
+
+// TTL returns a TTL based on the subject caching policy and the provided multiplier and max values
+func (cp *CachingPolicy) TTL(multiplier float64, max time.Duration) time.Duration {
+ var ttl time.Duration = time.Duration(cp.FreshnessLifetime) * time.Second
+ if cp.CanRevalidate {
+ ttl *= time.Duration(multiplier)
+ }
+ if ttl > max {
+ ttl = max
+ }
+ return ttl
+}
+
+func (cp *CachingPolicy) String() string {
+ return fmt.Sprintf(`{ "is_fresh":%t, "no_cache":%t, "no_transform":%t, "freshness_lifetime":%d, "can_revalidate":%t, "must_revalidate":%t,`+
+ ` "last_modified":%d, "expires":%d, "date":%d, "local_date":%d, "etag":"%s", "if_none_match":"%s"`+
+ ` "if_modified_since":%d, "if_unmodified_since":%d, "is_negative_cache":%t }`,
+ cp.IsFresh, cp.NoCache, cp.NoTransform, cp.FreshnessLifetime, cp.CanRevalidate, cp.MustRevalidate, cp.LastModified.Unix(), cp.Expires.Unix(), cp.Date.Unix(), cp.LocalDate.Unix(),
+ cp.ETag, cp.IfNoneMatchValue, cp.IfModifiedSinceTime.Unix(), cp.IfUnmodifiedSinceTime.Unix(), cp.IsNegativeCache)
+}
+
+// GetResponseCachingPolicy examines HTTP response headers for caching headers
+// a returns a CachingPolicy reference
+func GetResponseCachingPolicy(code int, negativeCache map[int]time.Duration, h http.Header) *CachingPolicy {
+
+ cp := &CachingPolicy{LocalDate: time.Now()}
+
+ if d, ok := negativeCache[code]; ok {
+ cp.FreshnessLifetime = int(d.Seconds())
+ cp.Expires = cp.LocalDate.Add(d)
+ cp.IsNegativeCache = true
+ return cp
+ }
+
+ // Do not cache content that includes set-cookie header
+ // Trickster can use PathConfig rules to strip set-cookie if cachablility is needed
+ if v := h.Get(headers.NameSetCookie); v != "" {
+ cp.NoCache = true
+ cp.FreshnessLifetime = -1
+ return cp
+ }
+
+ // Cache-Control has first precedence
+ if v := h.Get(headers.NameCacheControl); v != "" {
+ cp.parseCacheControlDirectives(v)
+ }
+
+ if cp.NoCache {
+ cp.FreshnessLifetime = -1
+ return cp
+ }
+
+ lastModifiedHeader := h.Get(headers.NameLastModified)
+ hasLastModified := lastModifiedHeader != ""
+ expiresHeader := h.Get(headers.NameExpires)
+ hasExpires := expiresHeader != ""
+ eTagHeader := h.Get(headers.NameETag)
+ hasETag := eTagHeader != ""
+
+ if !hasLastModified && !hasExpires && !hasETag && cp.FreshnessLifetime == 0 {
+ cp.NoCache = true
+ cp.FreshnessLifetime = -1
+ return cp
+ }
+
+ // Get the date header or, if it is not found or parsed, set it
+ if v := h.Get(headers.NameDate); v != "" {
+ if date, err := time.Parse(time.RFC1123, v); err != nil {
+ cp.Date = cp.LocalDate
+ h.Set(headers.NameDate, cp.Date.Format(time.RFC1123))
+ } else {
+ cp.Date = date
+ }
+ } else {
+ cp.Date = cp.LocalDate
+ h.Set(headers.NameDate, cp.Date.Format(time.RFC1123))
+ }
+
+ // no Max-Age provided yet, look for expires
+ if cp.FreshnessLifetime == 0 && !cp.MustRevalidate {
+ // if there is an Expires header, respect it
+ if hasExpires {
+ expires, err := time.Parse(time.RFC1123, expiresHeader)
+ if err == nil {
+ cp.Expires = expires
+ if expires.Before(cp.Date) {
+ cp.FreshnessLifetime = -1
+ cp.MustRevalidate = true
+ } else {
+ cp.FreshnessLifetime = int(cp.Expires.Sub(cp.Date).Seconds())
+ }
+ } else {
+ cp.FreshnessLifetime = -1
+ cp.MustRevalidate = true
+ }
+ }
+ }
+
+ if !hasETag && !hasLastModified {
+ cp.CanRevalidate = false
+ return cp
+ }
+
+ cp.CanRevalidate = true
+
+ if hasETag {
+ cp.ETag = eTagHeader
+ }
+
+ if hasLastModified {
+ lm, err := time.Parse(time.RFC1123, lastModifiedHeader)
+ if err != nil {
+ cp.CanRevalidate = false
+ cp.FreshnessLifetime = -1
+ } else {
+ cp.LastModified = lm
+ }
+ }
+
+ // else, if there is a Last-Modified header, set FreshnessLifetime to 20% of age
+ if cp.CanRevalidate && cp.FreshnessLifetime == 0 && !cp.LastModified.IsZero() &&
+ cp.LastModified.Before(cp.Date) && !cp.MustRevalidate {
+ objectAge := int(cp.Date.Sub(cp.LastModified).Seconds())
+ if objectAge > 0 {
+ cp.FreshnessLifetime = objectAge / 5
+ }
+ }
+
+ return cp
+}
+
+var supportedCCD = map[string]bool{
+ headers.ValuePrivate: true,
+ headers.ValueNoCache: true,
+ headers.ValueNoStore: true,
+ headers.ValueMaxAge: false,
+ headers.ValueSharedMaxAge: false,
+ headers.ValueMustRevalidate: false,
+ headers.ValueProxyRevalidate: false,
+}
+
+func (cp *CachingPolicy) parseCacheControlDirectives(directives string) {
+ dl := strings.Split(strings.Replace(strings.ToLower(directives), " ", "", -1), ",")
+ var noCache bool
+ var hasSharedMaxAge bool
+ var foundFreshnessDirective bool
+ for _, d := range dl {
+ var dsub string
+ if i := strings.Index(d, "="); i > 0 {
+ dsub = d[i+1:]
+ d = d[:i]
+ }
+ if v, ok := supportedCCD[d]; ok {
+ noCache = noCache || v
+ }
+ if noCache {
+ cp.NoCache = true
+ cp.FreshnessLifetime = -1
+ return
+ }
+ if d == headers.ValueSharedMaxAge && dsub != "" {
+ foundFreshnessDirective = true
+ secs, err := strconv.Atoi(dsub)
+ if err == nil {
+ hasSharedMaxAge = true
+ cp.FreshnessLifetime = secs
+ }
+ }
+ if (!hasSharedMaxAge) && d == headers.ValueMaxAge && dsub != "" {
+ foundFreshnessDirective = true
+ secs, err := strconv.Atoi(dsub)
+ if err == nil {
+ cp.FreshnessLifetime = secs
+ }
+ }
+ if (d == headers.ValueMustRevalidate || d == headers.ValueProxyRevalidate) || (cp.FreshnessLifetime == 0 && foundFreshnessDirective) {
+ cp.MustRevalidate = true
+ cp.FreshnessLifetime = 0
+ }
+ if d == headers.ValueNoTransform {
+ cp.NoTransform = true
+ }
+ }
+
+}
+
+func hasPragmaNoCache(h http.Header) bool {
+ if v := h.Get(headers.NamePragma); v != "" {
+ return v == headers.ValueNoCache
+ }
+ return false
+}
+
+// GetRequestCachingPolicy examines HTTP request headers for caching headers
+// and true if the corresponding response is OK to cache
+func GetRequestCachingPolicy(h http.Header) *CachingPolicy {
+ cp := &CachingPolicy{LocalDate: time.Now()}
+
+ if hasPragmaNoCache(h) {
+ cp.NoCache = true
+ return cp
+ }
+
+ // Cache-Control has first precedence
+ if v := h.Get(headers.NameCacheControl); v != "" {
+ cp.parseCacheControlDirectives(v)
+ if cp.NoCache {
+ return cp
+ }
+ }
+
+ if v := h.Get(headers.NameIfModifiedSince); v != "" {
+ if date, err := time.Parse(time.RFC1123, v); err == nil {
+ cp.IfModifiedSinceTime = date
+ }
+ }
+
+ if v := h.Get(headers.NameIfUnmodifiedSince); v != "" {
+ if date, err := time.Parse(time.RFC1123, v); err == nil {
+ cp.IfUnmodifiedSinceTime = date
+ }
+ }
+
+ if v := h.Get(headers.NameIfNoneMatch); v != "" {
+ cp.IfNoneMatchValue = v
+ }
+
+ return cp
+}
+
+// ResolveClientConditionals ensures any client conditionals are handled before
+// responding to the client request
+func (cp *CachingPolicy) ResolveClientConditionals(ls status.LookupStatus) {
+
+ cp.IsClientFresh = false
+ if !cp.IsClientConditional {
+ return
+ }
+
+ isClientFresh := true
+ if cp.HasIfNoneMatch {
+ cp.IfNoneMatchResult = CheckIfNoneMatch(cp.ETag, cp.IfNoneMatchValue, ls)
+ isClientFresh = isClientFresh && !cp.IfNoneMatchResult
+ }
+ if cp.HasIfModifiedSince {
+ isClientFresh = isClientFresh && !cp.LastModified.After(cp.IfModifiedSinceTime)
+ }
+ if cp.HasIfUnmodifiedSince {
+ isClientFresh = isClientFresh && cp.LastModified.After(cp.IfUnmodifiedSinceTime)
+ }
+ cp.IsClientFresh = isClientFresh
+}
+
+// ParseClientConditionals inspects the client http request to determine if it includes any conditions
+func (cp *CachingPolicy) ParseClientConditionals() {
+ cp.HasIfNoneMatch = cp.IfNoneMatchValue != ""
+ cp.HasIfModifiedSince = !cp.IfModifiedSinceTime.IsZero()
+ cp.HasIfUnmodifiedSince = !cp.IfUnmodifiedSinceTime.IsZero()
+ cp.IsClientConditional = cp.HasIfNoneMatch || cp.HasIfModifiedSince || cp.HasIfUnmodifiedSince
+}
+
+// CheckIfNoneMatch determines if the provided match value satisfies an "If-None-Match"
+// condition against the cached object. As Trickster is a cache, matching is always weak.
+func CheckIfNoneMatch(etag string, headerValue string, ls status.LookupStatus) bool {
+
+ if etag == "" || headerValue == "" {
+ return etag == headerValue
+ }
+
+ if headerValue == "*" {
+ if ls == status.LookupStatusHit || ls == status.LookupStatusRevalidated {
+ return false
+ }
+ return true
+ }
+
+ parts := strings.Split(headerValue, ",")
+ for _, p := range parts {
+ p = strings.Trim(p, " ")
+ if len(p) > 3 && p[1:2] == "/" {
+ p = p[2:]
+ }
+ if len(p) > 1 && strings.HasPrefix(p, `"`) && strings.HasSuffix(p, `"`) {
+ p = p[1 : len(p)-1]
+ }
+ if p == etag {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/internal/proxy/engines/caching_policy_gen.go b/internal/proxy/engines/caching_policy_gen.go
new file mode 100644
index 000000000..f64408aad
--- /dev/null
+++ b/internal/proxy/engines/caching_policy_gen.go
@@ -0,0 +1,409 @@
+package engines
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *CachingPolicy) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "is_fresh":
+ z.IsFresh, err = dc.ReadBool()
+ if err != nil {
+ return
+ }
+ case "nocache":
+ z.NoCache, err = dc.ReadBool()
+ if err != nil {
+ return
+ }
+ case "notransform":
+ z.NoTransform, err = dc.ReadBool()
+ if err != nil {
+ return
+ }
+ case "freshness_lifetime":
+ z.FreshnessLifetime, err = dc.ReadInt()
+ if err != nil {
+ return
+ }
+ case "can_revalidate":
+ z.CanRevalidate, err = dc.ReadBool()
+ if err != nil {
+ return
+ }
+ case "must_revalidate":
+ z.MustRevalidate, err = dc.ReadBool()
+ if err != nil {
+ return
+ }
+ case "last_modified":
+ z.LastModified, err = dc.ReadTime()
+ if err != nil {
+ return
+ }
+ case "expires":
+ z.Expires, err = dc.ReadTime()
+ if err != nil {
+ return
+ }
+ case "date":
+ z.Date, err = dc.ReadTime()
+ if err != nil {
+ return
+ }
+ case "local_date":
+ z.LocalDate, err = dc.ReadTime()
+ if err != nil {
+ return
+ }
+ case "etag":
+ z.ETag, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "if_none_match_value":
+ z.IfNoneMatchValue, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "if_modified_since_time":
+ z.IfModifiedSinceTime, err = dc.ReadTime()
+ if err != nil {
+ return
+ }
+ case "if_unmodified_since_time":
+ z.IfUnmodifiedSinceTime, err = dc.ReadTime()
+ if err != nil {
+ return
+ }
+ case "is_negative_cache":
+ z.IsNegativeCache, err = dc.ReadBool()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *CachingPolicy) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 15
+ // write "is_fresh"
+ err = en.Append(0x8f, 0xa8, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x65, 0x73, 0x68)
+ if err != nil {
+ return
+ }
+ err = en.WriteBool(z.IsFresh)
+ if err != nil {
+ return
+ }
+ // write "nocache"
+ err = en.Append(0xa7, 0x6e, 0x6f, 0x63, 0x61, 0x63, 0x68, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteBool(z.NoCache)
+ if err != nil {
+ return
+ }
+ // write "notransform"
+ err = en.Append(0xab, 0x6e, 0x6f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d)
+ if err != nil {
+ return
+ }
+ err = en.WriteBool(z.NoTransform)
+ if err != nil {
+ return
+ }
+ // write "freshness_lifetime"
+ err = en.Append(0xb2, 0x66, 0x72, 0x65, 0x73, 0x68, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt(z.FreshnessLifetime)
+ if err != nil {
+ return
+ }
+ // write "can_revalidate"
+ err = en.Append(0xae, 0x63, 0x61, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteBool(z.CanRevalidate)
+ if err != nil {
+ return
+ }
+ // write "must_revalidate"
+ err = en.Append(0xaf, 0x6d, 0x75, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteBool(z.MustRevalidate)
+ if err != nil {
+ return
+ }
+ // write "last_modified"
+ err = en.Append(0xad, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64)
+ if err != nil {
+ return
+ }
+ err = en.WriteTime(z.LastModified)
+ if err != nil {
+ return
+ }
+ // write "expires"
+ err = en.Append(0xa7, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73)
+ if err != nil {
+ return
+ }
+ err = en.WriteTime(z.Expires)
+ if err != nil {
+ return
+ }
+ // write "date"
+ err = en.Append(0xa4, 0x64, 0x61, 0x74, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteTime(z.Date)
+ if err != nil {
+ return
+ }
+ // write "local_date"
+ err = en.Append(0xaa, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteTime(z.LocalDate)
+ if err != nil {
+ return
+ }
+ // write "etag"
+ err = en.Append(0xa4, 0x65, 0x74, 0x61, 0x67)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.ETag)
+ if err != nil {
+ return
+ }
+ // write "if_none_match_value"
+ err = en.Append(0xb3, 0x69, 0x66, 0x5f, 0x6e, 0x6f, 0x6e, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.IfNoneMatchValue)
+ if err != nil {
+ return
+ }
+ // write "if_modified_since_time"
+ err = en.Append(0xb6, 0x69, 0x66, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteTime(z.IfModifiedSinceTime)
+ if err != nil {
+ return
+ }
+ // write "if_unmodified_since_time"
+ err = en.Append(0xb8, 0x69, 0x66, 0x5f, 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteTime(z.IfUnmodifiedSinceTime)
+ if err != nil {
+ return
+ }
+ // write "is_negative_cache"
+ err = en.Append(0xb1, 0x69, 0x73, 0x5f, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteBool(z.IsNegativeCache)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *CachingPolicy) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 15
+ // string "is_fresh"
+ o = append(o, 0x8f, 0xa8, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x65, 0x73, 0x68)
+ o = msgp.AppendBool(o, z.IsFresh)
+ // string "nocache"
+ o = append(o, 0xa7, 0x6e, 0x6f, 0x63, 0x61, 0x63, 0x68, 0x65)
+ o = msgp.AppendBool(o, z.NoCache)
+ // string "notransform"
+ o = append(o, 0xab, 0x6e, 0x6f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d)
+ o = msgp.AppendBool(o, z.NoTransform)
+ // string "freshness_lifetime"
+ o = append(o, 0xb2, 0x66, 0x72, 0x65, 0x73, 0x68, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65)
+ o = msgp.AppendInt(o, z.FreshnessLifetime)
+ // string "can_revalidate"
+ o = append(o, 0xae, 0x63, 0x61, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65)
+ o = msgp.AppendBool(o, z.CanRevalidate)
+ // string "must_revalidate"
+ o = append(o, 0xaf, 0x6d, 0x75, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65)
+ o = msgp.AppendBool(o, z.MustRevalidate)
+ // string "last_modified"
+ o = append(o, 0xad, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64)
+ o = msgp.AppendTime(o, z.LastModified)
+ // string "expires"
+ o = append(o, 0xa7, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73)
+ o = msgp.AppendTime(o, z.Expires)
+ // string "date"
+ o = append(o, 0xa4, 0x64, 0x61, 0x74, 0x65)
+ o = msgp.AppendTime(o, z.Date)
+ // string "local_date"
+ o = append(o, 0xaa, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x65)
+ o = msgp.AppendTime(o, z.LocalDate)
+ // string "etag"
+ o = append(o, 0xa4, 0x65, 0x74, 0x61, 0x67)
+ o = msgp.AppendString(o, z.ETag)
+ // string "if_none_match_value"
+ o = append(o, 0xb3, 0x69, 0x66, 0x5f, 0x6e, 0x6f, 0x6e, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65)
+ o = msgp.AppendString(o, z.IfNoneMatchValue)
+ // string "if_modified_since_time"
+ o = append(o, 0xb6, 0x69, 0x66, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65)
+ o = msgp.AppendTime(o, z.IfModifiedSinceTime)
+ // string "if_unmodified_since_time"
+ o = append(o, 0xb8, 0x69, 0x66, 0x5f, 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65)
+ o = msgp.AppendTime(o, z.IfUnmodifiedSinceTime)
+ // string "is_negative_cache"
+ o = append(o, 0xb1, 0x69, 0x73, 0x5f, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65)
+ o = msgp.AppendBool(o, z.IsNegativeCache)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *CachingPolicy) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "is_fresh":
+ z.IsFresh, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ return
+ }
+ case "nocache":
+ z.NoCache, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ return
+ }
+ case "notransform":
+ z.NoTransform, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ return
+ }
+ case "freshness_lifetime":
+ z.FreshnessLifetime, bts, err = msgp.ReadIntBytes(bts)
+ if err != nil {
+ return
+ }
+ case "can_revalidate":
+ z.CanRevalidate, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ return
+ }
+ case "must_revalidate":
+ z.MustRevalidate, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ return
+ }
+ case "last_modified":
+ z.LastModified, bts, err = msgp.ReadTimeBytes(bts)
+ if err != nil {
+ return
+ }
+ case "expires":
+ z.Expires, bts, err = msgp.ReadTimeBytes(bts)
+ if err != nil {
+ return
+ }
+ case "date":
+ z.Date, bts, err = msgp.ReadTimeBytes(bts)
+ if err != nil {
+ return
+ }
+ case "local_date":
+ z.LocalDate, bts, err = msgp.ReadTimeBytes(bts)
+ if err != nil {
+ return
+ }
+ case "etag":
+ z.ETag, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ case "if_none_match_value":
+ z.IfNoneMatchValue, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ case "if_modified_since_time":
+ z.IfModifiedSinceTime, bts, err = msgp.ReadTimeBytes(bts)
+ if err != nil {
+ return
+ }
+ case "if_unmodified_since_time":
+ z.IfUnmodifiedSinceTime, bts, err = msgp.ReadTimeBytes(bts)
+ if err != nil {
+ return
+ }
+ case "is_negative_cache":
+ z.IsNegativeCache, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *CachingPolicy) Msgsize() (s int) {
+ s = 1 + 9 + msgp.BoolSize + 8 + msgp.BoolSize + 12 + msgp.BoolSize + 19 + msgp.IntSize + 15 + msgp.BoolSize + 16 + msgp.BoolSize + 14 + msgp.TimeSize + 8 + msgp.TimeSize + 5 + msgp.TimeSize + 11 + msgp.TimeSize + 5 + msgp.StringPrefixSize + len(z.ETag) + 20 + msgp.StringPrefixSize + len(z.IfNoneMatchValue) + 23 + msgp.TimeSize + 25 + msgp.TimeSize + 18 + msgp.BoolSize
+ return
+}
diff --git a/internal/proxy/engines/caching_policy_gen_test.go b/internal/proxy/engines/caching_policy_gen_test.go
new file mode 100644
index 000000000..fcd37b378
--- /dev/null
+++ b/internal/proxy/engines/caching_policy_gen_test.go
@@ -0,0 +1,123 @@
+package engines
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
+func TestMarshalUnmarshalCachingPolicy(t *testing.T) {
+ v := CachingPolicy{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func BenchmarkMarshalMsgCachingPolicy(b *testing.B) {
+ v := CachingPolicy{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgCachingPolicy(b *testing.B) {
+ v := CachingPolicy{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalCachingPolicy(b *testing.B) {
+ v := CachingPolicy{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestEncodeDecodeCachingPolicy(t *testing.T) {
+ v := CachingPolicy{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+
+ m := v.Msgsize()
+ if buf.Len() > m {
+ t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
+ }
+
+ vn := CachingPolicy{}
+ err := msgp.Decode(&buf, &vn)
+ if err != nil {
+ t.Error(err)
+ }
+
+ buf.Reset()
+ msgp.Encode(&buf, &v)
+ err = msgp.NewReader(&buf).Skip()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkEncodeCachingPolicy(b *testing.B) {
+ v := CachingPolicy{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ en := msgp.NewWriter(msgp.Nowhere)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.EncodeMsg(en)
+ }
+ en.Flush()
+}
+
+func BenchmarkDecodeCachingPolicy(b *testing.B) {
+ v := CachingPolicy{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ rd := msgp.NewEndlessReader(buf.Bytes(), b)
+ dc := msgp.NewReader(rd)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := v.DecodeMsg(dc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/internal/proxy/engines/caching_policy_test.go b/internal/proxy/engines/caching_policy_test.go
new file mode 100644
index 000000000..f1dc4557b
--- /dev/null
+++ b/internal/proxy/engines/caching_policy_test.go
@@ -0,0 +1,313 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "net/http"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+func TestCachingPolicyClone(t *testing.T) {
+ cp := &CachingPolicy{
+ IsClientFresh: true,
+ }
+ v := cp.Clone().IsClientFresh
+ if !v {
+ t.Errorf("expected %t got %t", true, v)
+ }
+}
+
+func TestMerge(t *testing.T) {
+
+ cp := &CachingPolicy{
+ IsClientFresh: true,
+ }
+
+ cp.Merge(nil)
+ if !cp.IsClientFresh {
+ t.Errorf("expected %t got %t", true, cp.IsClientFresh)
+ }
+
+}
+
+func TestGetResponseCachingPolicy(t *testing.T) {
+
+ now := time.Now().Truncate(time.Second)
+
+ tests := []struct {
+ a http.Header
+ expectedTTL time.Duration
+ }{
+ { // 0 - Cache-Control: no-store
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValueNoStore},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 1 - Cache-Control: no-cache
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValueNoCache},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 2 - Cache-Control: max-age=300
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValueMaxAge + "=300"},
+ },
+ expectedTTL: time.Minute * time.Duration(5),
+ },
+ { // 3 - Cache-Control: max-age= should come back as -1 ttl
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValueMaxAge + "="},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 4 - Cache-Control: max-age (no =anything) should come back as 0 ttl
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValueMaxAge},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 5 - Cache-Control: private,max-age=300 should be treated as non-cacheable by proxy
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValuePrivate + "," + headers.ValueMaxAge + "=300"},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 6 - Cache-Control: public,max-age=300 should be treated as cacheable by proxy
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValuePublic + "," + headers.ValueMaxAge + "=300"},
+ },
+ expectedTTL: time.Minute * time.Duration(5),
+ },
+ { // 7 - Cache-Control and Expires, Cache-Control should win
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValuePublic + "," + headers.ValueMaxAge + "=300"},
+ headers.NameExpires: []string{"-1"},
+ },
+ expectedTTL: time.Minute * time.Duration(5),
+ },
+ { // 8 - Cache-Control and LastModified, Cache-Control should win
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValuePublic + "," + headers.ValueMaxAge + "=300"},
+ headers.NameLastModified: []string{"Sun, 16 Jun 2019 14:19:04 GMT"},
+ },
+ expectedTTL: time.Minute * time.Duration(5),
+ },
+ { // 9 - Already Expired (could not parse)
+ a: http.Header{
+ headers.NameExpires: []string{"-1"},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 10 - Already Expired (parseable in the past)
+ a: http.Header{
+ headers.NameExpires: []string{"Sun, 16 Jun 2019 14:19:04 GMT"},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 11 - Expires in an hour
+ a: http.Header{
+ headers.NameDate: []string{now.Format(time.RFC1123)},
+ headers.NameExpires: []string{now.Add(time.Hour * time.Duration(1)).Format(time.RFC1123)},
+ },
+ expectedTTL: 1 * time.Hour,
+ },
+ { // 12 - Synthesized TTL from Last Modified
+ a: http.Header{
+ headers.NameDate: []string{now.Format(time.RFC1123)},
+ headers.NameLastModified: []string{now.Add(-time.Hour * time.Duration(5)).Format(time.RFC1123)},
+ },
+ expectedTTL: 1 * time.Hour,
+ },
+ { // 13 - No Cache Control Response Headers
+ a: http.Header{
+ headers.NameDate: []string{now.Format(time.RFC1123)},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 14 - Invalid Date Header Format
+ a: http.Header{
+ headers.NameDate: []string{"1571338193"},
+ headers.NameExpires: []string{"-1"},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 15 - Invalid Date Header Format
+ a: http.Header{
+ headers.NameETag: []string{"etag-test"},
+ },
+ expectedTTL: 0,
+ },
+ { // 16 - Invalid Last Modified Date Header Format
+ a: http.Header{
+ headers.NameLastModified: []string{"1571338193"},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 17 - Must Revalidate
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValueMustRevalidate},
+ headers.NameLastModified: []string{"Sun, 16 Jun 2019 14:19:04 GMT"},
+ },
+ expectedTTL: 0,
+ },
+ { // 18 - NoTransform
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValueNoTransform},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ { // 19 - Set-Cookie
+ a: http.Header{
+ headers.NameSetCookie: []string{"some-fake-value-for-testing"},
+ },
+ expectedTTL: -1 * time.Second,
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+
+ p := GetResponseCachingPolicy(200, nil, test.a)
+ d := time.Duration(p.FreshnessLifetime) * time.Second
+ if test.expectedTTL != d {
+ t.Errorf("expected ttl of %d got %d", test.expectedTTL, d)
+ }
+ })
+ }
+}
+
+func TestResolveClientConditionalsIUS(t *testing.T) {
+
+ cp := &CachingPolicy{
+ IsClientConditional: true,
+ HasIfUnmodifiedSince: true,
+ LastModified: time.Unix(5, 0),
+ IfUnmodifiedSinceTime: time.Unix(4, 0),
+ }
+ cp.ResolveClientConditionals(status.LookupStatusHit)
+
+ if !cp.IsClientFresh {
+ t.Errorf("expected %t got %t", true, cp.IsClientFresh)
+ }
+
+}
+
+func TestGetResponseCachingPolicyNegativeCache(t *testing.T) {
+ p := GetResponseCachingPolicy(400, map[int]time.Duration{400: 300 * time.Second}, nil)
+ if p.FreshnessLifetime != 300 {
+ t.Errorf("expected ttl of %d got %d", 300, p.FreshnessLifetime)
+ }
+}
+
+func TestGetRequestCacheability(t *testing.T) {
+
+ tests := []struct {
+ a http.Header
+ isCacheable bool
+ }{
+ { // 0 - Cache-Control: no-store
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValueNoStore},
+ },
+ isCacheable: false,
+ },
+ { // 1 - Cache-Control: no-cache
+ a: http.Header{
+ headers.NameCacheControl: []string{headers.ValueNoCache},
+ },
+ isCacheable: false,
+ },
+ { // 2 - No Cache Control Request Headers
+ a: http.Header{},
+ isCacheable: true,
+ },
+ { // 3 - Pragma: NoCache
+ a: http.Header{
+ headers.NamePragma: []string{headers.ValueNoCache},
+ },
+ isCacheable: false,
+ },
+ { // 4 - IMS
+ a: http.Header{
+ headers.NameIfModifiedSince: []string{"Sun, 16 Jun 2019 14:19:04 GMT"},
+ },
+ isCacheable: true,
+ },
+ { // 5 - IUS
+ a: http.Header{
+ headers.NameIfUnmodifiedSince: []string{"Sun, 16 Jun 2019 14:19:04 GMT"},
+ },
+ isCacheable: true,
+ },
+ { // 6 - INM
+ a: http.Header{
+ headers.NameIfNoneMatch: []string{"test-string"},
+ },
+ isCacheable: true,
+ },
+ { // 7 - IM
+ a: http.Header{
+ headers.NameIfMatch: []string{"test-string"},
+ },
+ isCacheable: true,
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ p := GetRequestCachingPolicy(test.a)
+ ic := !p.NoCache
+ if ic != test.isCacheable {
+ t.Errorf("mismatch isCacheable expected %v got %v", test.isCacheable, ic)
+ }
+ })
+ }
+
+}
+
+func TestCheckIfNoneMatch(t *testing.T) {
+
+ res := CheckIfNoneMatch("", "", status.LookupStatusHit)
+ if !res {
+ t.Errorf("expected %t got %t", true, res)
+ }
+
+ res = CheckIfNoneMatch("test", "*", status.LookupStatusHit)
+ if res {
+ t.Errorf("expected %t got %t", false, res)
+ }
+
+ res = CheckIfNoneMatch("test", "*", status.LookupStatusKeyMiss)
+ if !res {
+ t.Errorf("expected %t got %t", true, res)
+ }
+
+ res = CheckIfNoneMatch("test", "test", status.LookupStatusHit)
+ if res {
+ t.Errorf("expected %t got %t", false, res)
+ }
+
+ res = CheckIfNoneMatch("test", "w/test", status.LookupStatusHit)
+ if res {
+ t.Errorf("expected %t got %t", false, res)
+ }
+
+}
diff --git a/internal/proxy/engines/client_test.go b/internal/proxy/engines/client_test.go
new file mode 100644
index 000000000..7abdc9f88
--- /dev/null
+++ b/internal/proxy/engines/client_test.go
@@ -0,0 +1,833 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ tt "github.com/Comcast/trickster/internal/proxy/timeconv"
+ "github.com/Comcast/trickster/internal/proxy/urls"
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/pkg/sort/times"
+
+ "github.com/prometheus/common/model"
+)
+
+// Prometheus API
+const (
+ APIPath = "/api/v1/"
+ mnQueryRange = "query_range"
+ mnQuery = "query"
+ mnLabels = "labels"
+ mnLabel = "label"
+ mnSeries = "series"
+ mnTargets = "targets"
+ mnRules = "rules"
+ mnAlerts = "alerts"
+ mnAlertManagers = "alertmanagers"
+ mnStatus = "status"
+)
+
+// Common URL Parameter Names
+const (
+ upQuery = "query"
+ upStart = "start"
+ upEnd = "end"
+ upStep = "step"
+ upTime = "time"
+ upMatch = "match[]"
+)
+
+// Client Implements Proxy Client Interface
+type TestClient struct {
+ name string
+ config *config.OriginConfig
+ cache cache.Cache
+ webClient *http.Client
+
+ fftime time.Time
+ InstantCacheKey string
+ RangeCacheKey string
+
+ handlers map[string]http.Handler
+ handlersRegistered bool
+}
+
+func (c *TestClient) registerHandlers() {
+ c.handlersRegistered = true
+ c.handlers = make(map[string]http.Handler)
+ // This is the registry of handlers that Trickster supports for Prometheus,
+ // and are able to be referenced by name (map key) in Config Files
+ c.handlers["health"] = http.HandlerFunc(c.HealthHandler)
+ c.handlers[mnQueryRange] = http.HandlerFunc(c.QueryRangeHandler)
+ c.handlers[mnQuery] = http.HandlerFunc(c.QueryHandler)
+ c.handlers[mnSeries] = http.HandlerFunc(c.SeriesHandler)
+ c.handlers["proxycache"] = http.HandlerFunc(c.QueryHandler)
+ c.handlers["proxy"] = http.HandlerFunc(c.ProxyHandler)
+}
+
+// Handlers returns a map of the HTTP Handlers the client has registered
+func (c *TestClient) Handlers() map[string]http.Handler {
+ if !c.handlersRegistered {
+ c.registerHandlers()
+ }
+ return c.handlers
+}
+
+// DefaultPathConfigs returns the default PathConfigs for the given OriginType
+func (c *TestClient) DefaultPathConfigs(oc *config.OriginConfig) map[string]*config.PathConfig {
+
+ paths := map[string]*config.PathConfig{
+
+ APIPath + mnQueryRange: {
+ Path: APIPath + mnQueryRange,
+ HandlerName: mnQueryRange,
+ Methods: []string{http.MethodGet, http.MethodPost},
+ CacheKeyParams: []string{upQuery, upStep},
+ CacheKeyHeaders: []string{headers.NameAuthorization},
+ ResponseHeaders: map[string]string{headers.NameCacheControl: fmt.Sprintf("%s=%d", headers.ValueSharedMaxAge, 86400)},
+ },
+
+ APIPath + mnQuery: {
+ Path: APIPath + mnQuery,
+ HandlerName: mnQuery,
+ Methods: []string{http.MethodGet, http.MethodPost},
+ CacheKeyParams: []string{upQuery, upTime},
+ CacheKeyHeaders: []string{headers.NameAuthorization},
+ ResponseHeaders: map[string]string{headers.NameCacheControl: fmt.Sprintf("%s=%d", headers.ValueSharedMaxAge, 30)},
+ },
+
+ APIPath + mnSeries: {
+ Path: APIPath + mnSeries,
+ HandlerName: mnSeries,
+ Methods: []string{http.MethodGet, http.MethodPost},
+ CacheKeyParams: []string{upMatch, upStart, upEnd},
+ CacheKeyHeaders: []string{headers.NameAuthorization},
+ },
+
+ APIPath + mnLabels: {
+ Path: APIPath + mnLabels,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet, http.MethodPost},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{headers.NameAuthorization},
+ },
+
+ APIPath + mnLabel: {
+ Path: APIPath + mnLabel,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{headers.NameAuthorization},
+ },
+
+ APIPath + mnTargets: {
+ Path: APIPath + mnTargets,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{headers.NameAuthorization},
+ },
+
+ APIPath + mnRules: {
+ Path: APIPath + mnRules,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{headers.NameAuthorization},
+ },
+
+ APIPath + mnAlerts: {
+ Path: APIPath + mnAlerts,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{headers.NameAuthorization},
+ },
+
+ APIPath + mnAlertManagers: {
+ Path: APIPath + mnAlertManagers,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{headers.NameAuthorization},
+ },
+
+ APIPath + mnStatus: {
+ Path: APIPath + mnStatus,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{headers.NameAuthorization},
+ },
+
+ APIPath: {
+ Path: APIPath,
+ HandlerName: "proxy",
+ Methods: []string{http.MethodGet, http.MethodPost},
+ },
+
+ "/opc": {
+ Path: "/opc",
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ },
+
+ "/": {
+ Path: "/",
+ HandlerName: "proxy",
+ Methods: []string{http.MethodGet, http.MethodPost},
+ },
+ }
+
+ oc.Paths = paths
+ oc.FastForwardPath = paths[APIPath+mnQuery]
+
+ return paths
+
+}
+
+// Configuration returns the upstream Configuration for this Client
+func (c *TestClient) Configuration() *config.OriginConfig {
+ return c.config
+}
+
+// SetCache sets the cache object the client will use for caching origin data
+func (c *TestClient) SetCache(cc cache.Cache) {
+ c.cache = cc
+}
+
+// HTTPClient returns the HTTP Client for this origin
+func (c *TestClient) HTTPClient() *http.Client {
+ return c.webClient
+}
+
+// Name returns the name of the upstream Configuration proxied by the Client
+func (c *TestClient) Name() string {
+ return c.name
+}
+
+// Cache returns and handle to the Cache instance used by the Client
+func (c *TestClient) Cache() cache.Cache {
+ return c.cache
+}
+
+// parseTime converts a query time URL parameter to time.Time.
+// Copied from https://github.com/prometheus/prometheus/blob/master/web/api/v1/api.go
+func parseTime(s string) (time.Time, error) {
+ if t, err := strconv.ParseFloat(s, 64); err == nil {
+ s, ns := math.Modf(t)
+ ns = math.Round(ns*1000) / 1000
+ return time.Unix(int64(s), int64(ns*float64(time.Second))), nil
+ }
+ if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
+ return t, nil
+ }
+ return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
+}
+
+// parseDuration parses prometheus step parameters, which can be float64 or durations like 1d, 5m, etc
+// the proxy.ParseDuration handles the second kind, and the float64's are handled here
+func parseDuration(input string) (time.Duration, error) {
+ v, err := strconv.ParseFloat(input, 64)
+ if err != nil {
+ return tt.ParseDuration(input)
+ }
+ // assume v is in seconds
+ return time.Duration(int64(v)) * time.Second, nil
+}
+
+// ParseTimeRangeQuery parses the key parts of a TimeRangeQuery from the inbound HTTP Request
+func (c *TestClient) ParseTimeRangeQuery(r *http.Request) (*timeseries.TimeRangeQuery, error) {
+
+ trq := ×eries.TimeRangeQuery{Extent: timeseries.Extent{}}
+ qp := r.URL.Query()
+
+ trq.Statement = qp.Get(upQuery)
+ if trq.Statement == "" {
+ return nil, errors.MissingURLParam(upQuery)
+ }
+
+ if p := qp.Get(upStart); p != "" {
+ t, err := parseTime(p)
+ if err != nil {
+ return nil, err
+ }
+ trq.Extent.Start = t
+ } else {
+ return nil, errors.MissingURLParam(upStart)
+ }
+
+ if p := qp.Get(upEnd); p != "" {
+ t, err := parseTime(p)
+ if err != nil {
+ return nil, err
+ }
+ trq.Extent.End = t
+ } else {
+ return nil, errors.MissingURLParam(upEnd)
+ }
+
+ if p := qp.Get(upStep); p != "" {
+ step, err := parseDuration(p)
+ if err != nil {
+ return nil, err
+ }
+ trq.Step = step
+ } else {
+ return nil, errors.MissingURLParam(upStep)
+ }
+
+ if strings.Contains(trq.Statement, " offset ") {
+ trq.IsOffset = true
+ trq.FastForwardDisable = true
+ }
+
+ return trq, nil
+}
+
+// BaseURL returns a URL in the form of scheme://host/path based on the proxy configuration
+func (c *TestClient) BaseURL() *url.URL {
+ u := &url.URL{}
+ u.Scheme = c.config.Scheme
+ u.Host = c.config.Host
+ u.Path = c.config.PathPrefix
+ return u
+}
+
+// BuildUpstreamURL will merge the downstream request with the BaseURL to construct the full upstream URL
+func (c *TestClient) BuildUpstreamURL(r *http.Request) *url.URL {
+ u := c.BaseURL()
+
+ if strings.HasPrefix(r.URL.Path, "/"+c.name+"/") {
+ u.Path += strings.Replace(r.URL.Path, "/"+c.name+"/", "/", 1)
+ } else {
+ u.Path += r.URL.Path
+ }
+
+ u.RawQuery = r.URL.RawQuery
+ u.Fragment = r.URL.Fragment
+ u.User = r.URL.User
+ return u
+}
+
+// SetExtent will change the upstream request query to use the provided Extent
+func (c *TestClient) SetExtent(r *http.Request, trq *timeseries.TimeRangeQuery, extent *timeseries.Extent) {
+ params := r.URL.Query()
+ params.Set(upStart, strconv.FormatInt(extent.Start.Unix(), 10))
+ params.Set(upEnd, strconv.FormatInt(extent.End.Unix(), 10))
+ r.URL.RawQuery = params.Encode()
+}
+
+// FastForwardURL returns the url to fetch the Fast Forward value based on a timerange url
+func (c *TestClient) FastForwardURL(r *http.Request) (*url.URL, error) {
+
+ u := urls.Clone(r.URL)
+
+ if strings.HasSuffix(u.Path, "/query_range") {
+ u.Path = u.Path[0 : len(u.Path)-6]
+ }
+
+ // let the test client have a way to throw an error
+ if strings.Contains(u.RawQuery, "throw_ffurl_error=1") {
+ return nil, fmt.Errorf("This is an intentional test error: %s", ":)")
+ }
+
+ p := u.Query()
+ p.Del(upStart)
+ p.Del(upEnd)
+ p.Del(upStep)
+
+ if c.fftime.IsZero() {
+ c.fftime = time.Now()
+ }
+ p.Set("time", strconv.FormatInt(c.fftime.Unix(), 10))
+
+ u.RawQuery = p.Encode()
+
+ return u, nil
+}
+
+// VectorEnvelope represents a Vector response object from the Prometheus HTTP API
+type VectorEnvelope struct {
+ Status string `json:"status"`
+ Data VectorData `json:"data"`
+}
+
+// VectorData represents the Data body of a Vector response object from the Prometheus HTTP API
+type VectorData struct {
+ ResultType string `json:"resultType"`
+ Result model.Vector `json:"result"`
+}
+
+// MatrixEnvelope represents a Matrix response object from the Prometheus HTTP API
+type MatrixEnvelope struct {
+ Status string `json:"status"`
+ Data MatrixData `json:"data"`
+ ExtentList timeseries.ExtentList `json:"extents,omitempty"`
+ StepDuration time.Duration `json:"step,omitempty"`
+
+ timestamps map[time.Time]bool // tracks unique timestamps in the matrix data
+ tslist times.Times
+ isSorted bool // tracks if the matrix data is currently sorted
+ isCounted bool // tracks if timestamps slice is up-to-date
+}
+
+// MatrixData represents the Data body of a Matrix response object from the Prometheus HTTP API
+type MatrixData struct {
+ ResultType string `json:"resultType"`
+ Result model.Matrix `json:"result"`
+}
+
+// MarshalTimeseries converts a Timeseries into a JSON blob
+func (c *TestClient) MarshalTimeseries(ts timeseries.Timeseries) ([]byte, error) {
+ // Marshal the Envelope back to a json object for Cache Storage
+ if c.RangeCacheKey == "failkey" {
+ return nil, fmt.Errorf("generic failure for testing purposes (key: %s)", c.RangeCacheKey)
+ }
+ return json.Marshal(ts)
+}
+
+// UnmarshalTimeseries converts a JSON blob into a Timeseries
+func (c *TestClient) UnmarshalTimeseries(data []byte) (timeseries.Timeseries, error) {
+ me := &MatrixEnvelope{}
+ err := json.Unmarshal(data, &me)
+ return me, err
+}
+
+// UnmarshalInstantaneous converts a JSON blob into an Instantaneous Data Point
+func (c *TestClient) UnmarshalInstantaneous(data []byte) (timeseries.Timeseries, error) {
+ ve := &VectorEnvelope{}
+ err := json.Unmarshal(data, &ve)
+ if err != nil {
+ return nil, err
+ }
+ return ve.ToMatrix(), nil
+}
+
+// ToMatrix converts a VectorEnvelope to a MatrixEnvelope
+func (ve *VectorEnvelope) ToMatrix() *MatrixEnvelope {
+ me := &MatrixEnvelope{}
+ me.Status = ve.Status
+ me.Data = MatrixData{
+ ResultType: "matrix",
+ Result: make(model.Matrix, 0, len(ve.Data.Result)),
+ }
+ var ts time.Time
+ for _, v := range ve.Data.Result {
+ v.Timestamp = model.TimeFromUnix(v.Timestamp.Unix()) // Round to nearest Second
+ ts = v.Timestamp.Time()
+ me.Data.Result = append(me.Data.Result, &model.SampleStream{Metric: v.Metric, Values: []model.SamplePair{{Timestamp: v.Timestamp, Value: v.Value}}})
+ }
+ me.ExtentList = timeseries.ExtentList{timeseries.Extent{Start: ts, End: ts}}
+ return me
+}
+
+// Step returns the step for the Timeseries
+func (me *MatrixEnvelope) Step() time.Duration {
+ return me.StepDuration
+}
+
+// SetStep sets the step for the Timeseries
+func (me *MatrixEnvelope) SetStep(step time.Duration) {
+ me.StepDuration = step
+}
+
+// Merge merges the provided Timeseries list into the base Timeseries (in the order provided) and optionally sorts the merged Timeseries
+func (me *MatrixEnvelope) Merge(sort bool, collection ...timeseries.Timeseries) {
+ meMetrics := make(map[string]*model.SampleStream)
+ for _, s := range me.Data.Result {
+ meMetrics[s.Metric.String()] = s
+ }
+ for _, ts := range collection {
+ if ts != nil {
+ me2 := ts.(*MatrixEnvelope)
+ for _, s := range me2.Data.Result {
+ name := s.Metric.String()
+ if _, ok := meMetrics[name]; !ok {
+ meMetrics[name] = s
+ me.Data.Result = append(me.Data.Result, s)
+ continue
+ }
+ meMetrics[name].Values = append(meMetrics[name].Values, s.Values...)
+ }
+ me.ExtentList = append(me.ExtentList, me2.ExtentList...)
+ }
+ }
+ me.ExtentList = me.ExtentList.Compress(me.StepDuration)
+ me.isSorted = false
+ me.isCounted = false
+ if sort {
+ me.Sort()
+ }
+}
+
+// Clone returns a perfect copy of the base Timeseries
+func (me *MatrixEnvelope) Clone() timeseries.Timeseries {
+ resMe := &MatrixEnvelope{
+ isCounted: me.isCounted,
+ isSorted: me.isSorted,
+ tslist: make(times.Times, len(me.tslist)),
+ timestamps: make(map[time.Time]bool),
+ Status: me.Status,
+ Data: MatrixData{
+ ResultType: me.Data.ResultType,
+ Result: make(model.Matrix, 0, len(me.Data.Result)),
+ },
+ StepDuration: me.StepDuration,
+ ExtentList: make(timeseries.ExtentList, len(me.ExtentList)),
+ }
+ copy(resMe.ExtentList, me.ExtentList)
+ copy(resMe.tslist, me.tslist)
+
+ for k, v := range me.timestamps {
+ resMe.timestamps[k] = v
+ }
+
+ for _, ss := range me.Data.Result {
+ newSS := &model.SampleStream{Metric: ss.Metric}
+ newSS.Values = ss.Values[:]
+ resMe.Data.Result = append(resMe.Data.Result, newSS)
+ }
+ return resMe
+}
+
+// CropToSize reduces the number of elements in the Timeseries to the provided count, by evicting elements
+// using a least-recently-used methodology. Any timestamps newer than the provided time are removed before
+// sizing, in order to support backfill tolerance. The provided extent will be marked as used during crop.
+func (me *MatrixEnvelope) CropToSize(sz int, t time.Time, lur timeseries.Extent) {
+ me.isCounted = false
+ me.isSorted = false
+ x := len(me.ExtentList)
+ // The Series has no extents, so no need to do anything
+ if x < 1 {
+ me.Data.Result = model.Matrix{}
+ me.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // Crop to the Backfill Tolerance Value if needed
+ if me.ExtentList[x-1].End.After(t) {
+ me.CropToRange(timeseries.Extent{Start: me.ExtentList[0].Start, End: t})
+ }
+
+ tc := me.TimestampCount()
+ if len(me.Data.Result) == 0 || tc <= sz {
+ return
+ }
+
+ el := timeseries.ExtentListLRU(me.ExtentList).UpdateLastUsed(lur, me.StepDuration)
+ sort.Sort(el)
+
+ rc := tc - sz // # of required timestamps we must delete to meet the rentention policy
+ removals := make(map[time.Time]bool)
+ done := false
+ var ok bool
+
+ for _, x := range el {
+ for ts := x.Start; !x.End.Before(ts) && !done; ts = ts.Add(me.StepDuration) {
+ if _, ok = me.timestamps[ts]; ok {
+ removals[ts] = true
+ done = len(removals) >= rc
+ }
+ }
+ if done {
+ break
+ }
+ }
+
+ for _, s := range me.Data.Result {
+ tmp := s.Values[:0]
+ for _, r := range s.Values {
+ t = r.Timestamp.Time()
+ if _, ok := removals[t]; !ok {
+ tmp = append(tmp, r)
+ }
+ }
+ s.Values = tmp
+ }
+
+ tl := times.FromMap(removals)
+ sort.Sort(tl)
+ for _, t := range tl {
+ for i, e := range el {
+ if e.StartsAt(t) {
+ el[i].Start = e.Start.Add(me.StepDuration)
+ }
+ }
+ }
+
+ me.ExtentList = timeseries.ExtentList(el).Compress(me.StepDuration)
+ me.Sort()
+}
+
+// CropToRange reduces the Timeseries down to timestamps contained within the provided Extents (inclusive).
+// CropToRange assumes the base Timeseries is already sorted, and will corrupt an unsorted Timeseries
+func (me *MatrixEnvelope) CropToRange(e timeseries.Extent) {
+ me.isCounted = false
+ x := len(me.ExtentList)
+ // The Series has no extents, so no need to do anything
+ if x < 1 {
+ me.Data.Result = model.Matrix{}
+ me.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // if the extent of the series is entirely outside the extent of the crop range, return empty set and bail
+ if me.ExtentList.OutsideOf(e) {
+ me.Data.Result = model.Matrix{}
+ me.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // if the series extent is entirely inside the extent of the crop range, simply adjust down its ExtentList
+ if me.ExtentList.InsideOf(e) {
+ if me.ValueCount() == 0 {
+ me.Data.Result = model.Matrix{}
+ }
+ me.ExtentList = me.ExtentList.Crop(e)
+ return
+ }
+
+ if len(me.Data.Result) == 0 {
+ me.ExtentList = me.ExtentList.Crop(e)
+ return
+ }
+
+ deletes := make(map[int]bool)
+
+ for i, s := range me.Data.Result {
+ start := -1
+ end := -1
+ for j, val := range s.Values {
+ t := val.Timestamp.Time()
+ if t.Equal(e.End) {
+ // for cases where the first element is the only qualifying element,
+ // start must be incremented or an empty response is returned
+ if j == 0 || t.Equal(e.Start) || start == -1 {
+ start = j
+ }
+ end = j + 1
+ break
+ }
+ if t.After(e.End) {
+ end = j
+ break
+ }
+ if t.Before(e.Start) {
+ continue
+ }
+ if start == -1 && (t.Equal(e.Start) || (e.End.After(t) && t.After(e.Start))) {
+ start = j
+ }
+ }
+ if start != -1 && len(s.Values) > 0 {
+ if end == -1 {
+ end = len(s.Values)
+ }
+ me.Data.Result[i].Values = s.Values[start:end]
+ } else {
+ deletes[i] = true
+ }
+ }
+ if len(deletes) > 0 {
+ tmp := me.Data.Result[:0]
+ for i, r := range me.Data.Result {
+ if _, ok := deletes[i]; !ok {
+ tmp = append(tmp, r)
+ }
+ }
+ me.Data.Result = tmp
+ }
+ me.ExtentList = me.ExtentList.Crop(e)
+}
+
+// Sort sorts all Values in each Series chronologically by their timestamp
+func (me *MatrixEnvelope) Sort() {
+
+ if me.isSorted {
+ return
+ }
+
+ tsm := map[time.Time]bool{}
+
+ for i, s := range me.Data.Result { // []SampleStream
+ m := make(map[time.Time]model.SamplePair)
+ for _, v := range s.Values { // []SamplePair
+ t := v.Timestamp.Time()
+ tsm[t] = true
+ m[t] = v
+ }
+ keys := make(times.Times, 0, len(m))
+ for key := range m {
+ keys = append(keys, key)
+ }
+ sort.Sort(keys)
+ sm := make([]model.SamplePair, 0, len(keys))
+ for _, key := range keys {
+ sm = append(sm, m[key])
+ }
+ me.Data.Result[i].Values = sm
+ }
+
+ sort.Sort(me.ExtentList)
+
+ me.timestamps = tsm
+ me.tslist = times.FromMap(tsm)
+ me.isCounted = true
+ me.isSorted = true
+}
+
+func (me *MatrixEnvelope) updateTimestamps() {
+ if me.isCounted {
+ return
+ }
+ m := make(map[time.Time]bool)
+ for _, s := range me.Data.Result { // []SampleStream
+ for _, v := range s.Values { // []SamplePair
+ t := v.Timestamp.Time()
+ m[t] = true
+ }
+ }
+ me.timestamps = m
+ me.tslist = times.FromMap(m)
+ me.isCounted = true
+}
+
+// SetExtents overwrites a Timeseries's known extents with the provided extent list
+func (me *MatrixEnvelope) SetExtents(extents timeseries.ExtentList) {
+ me.isCounted = false
+ me.ExtentList = extents
+}
+
+// Extents returns the Timeseries's ExentList
+func (me *MatrixEnvelope) Extents() timeseries.ExtentList {
+ return me.ExtentList
+}
+
+// TimestampCount returns the number of unique timestamps across the timeseries
+func (me *MatrixEnvelope) TimestampCount() int {
+ me.updateTimestamps()
+ return len(me.timestamps)
+}
+
+// SeriesCount returns the number of individual Series in the Timeseries object
+func (me *MatrixEnvelope) SeriesCount() int {
+ return len(me.Data.Result)
+}
+
+// ValueCount returns the count of all values across all Series in the Timeseries object
+func (me *MatrixEnvelope) ValueCount() int {
+ c := 0
+ for i := range me.Data.Result {
+ c += len(me.Data.Result[i].Values)
+ }
+ return c
+}
+
+func (c *TestClient) HealthHandler(w http.ResponseWriter, r *http.Request) {
+ u := c.BaseURL()
+ u.Path += APIPath + mnLabels
+ DoProxy(w, r)
+}
+
+func (c *TestClient) QueryRangeHandler(w http.ResponseWriter, r *http.Request) {
+
+ //rsc := request.NewResources(c.config, c.path
+
+ r.URL = c.BuildUpstreamURL(r)
+ DeltaProxyCacheRequest(w, r)
+}
+
+func (c *TestClient) QueryHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ ObjectProxyCacheRequest(w, r)
+}
+
+func (c *TestClient) SeriesHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ ObjectProxyCacheRequest(w, r)
+}
+
+func (c *TestClient) ProxyHandler(w http.ResponseWriter, r *http.Request) {
+ DoProxy(w, r)
+}
+
+func testResultHeaderPartMatch(header http.Header, kvp map[string]string) error {
+ if len(kvp) == 0 {
+ return nil
+ }
+ if header == nil || len(header) == 0 {
+ return fmt.Errorf("missing response headers%s", "")
+ }
+
+ if h, ok := header["X-Trickster-Result"]; ok {
+ res := strings.Join(h, "; ")
+ for k, v := range kvp {
+ if !strings.Contains(res, fmt.Sprintf("; %s=%s", k, v)) && strings.Index(res, fmt.Sprintf("%s=%s", k, v)) != 0 {
+ return fmt.Errorf("invalid status, expected %s=%s in %s", k, v, h)
+ }
+ }
+ } else {
+ return fmt.Errorf("missing X-Trickster-Result header%s", "")
+ }
+
+ return nil
+}
+
+func testStatusCodeMatch(have, expected int) error {
+ if have != expected {
+ return fmt.Errorf("expected http status %d got %d", expected, have)
+ }
+ return nil
+}
+
+func testStringMatch(have, expected string) error {
+ if have != expected {
+ return fmt.Errorf("expected string `%s` got `%s`", expected, have)
+ }
+ return nil
+}
+
+// Size returns the approximate memory utilization in bytes of the timeseries
+func (me *MatrixEnvelope) Size() int {
+
+ c := 0
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+ for i := range me.Data.Result {
+ wg.Add(1)
+ go func(s *model.SampleStream) {
+ mtx.Lock()
+ c += (len(s.Values) * 16) + len(s.Metric.String())
+ mtx.Unlock()
+ wg.Done()
+ }(me.Data.Result[i])
+ }
+ wg.Wait()
+ return c
+}
diff --git a/internal/proxy/engines/deltaproxycache.go b/internal/proxy/engines/deltaproxycache.go
new file mode 100644
index 000000000..9c4c4ceef
--- /dev/null
+++ b/internal/proxy/engines/deltaproxycache.go
@@ -0,0 +1,382 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ tctx "github.com/Comcast/trickster/internal/proxy/context"
+ "github.com/Comcast/trickster/internal/proxy/origins"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/internal/util/metrics"
+ "github.com/Comcast/trickster/pkg/locks"
+)
+
+// DeltaProxyCache is used for Time Series Acceleration, and not used for normal HTTP Object Caching
+
+// DeltaProxyCacheRequest identifies the gaps between the cache and a new timeseries request,
+// requests the gaps from the origin server and returns the reconstituted dataset to the downstream request
+// while caching the results for subsequent requests of the same data
+func DeltaProxyCacheRequest(w http.ResponseWriter, r *http.Request) {
+
+ rsc := request.GetResources(r)
+
+ oc := rsc.OriginConfig
+ pc := rsc.PathConfig
+ cache := rsc.CacheClient
+ cc := rsc.CacheConfig
+
+ client := rsc.OriginClient.(origins.TimeseriesClient)
+
+ trq, err := client.ParseTimeRangeQuery(r)
+ if err != nil {
+ // err may simply mean incompatible query (e.g., non-select), so just proxy
+ DoProxy(w, r)
+ return
+ }
+
+ var cacheStatus status.LookupStatus
+
+ pr := newProxyRequest(r, w)
+ trq.FastForwardDisable = oc.FastForwardDisable || trq.FastForwardDisable
+ trq.NormalizeExtent()
+
+ // this is used to ensure the head of the cache respects the BackFill Tolerance
+ bf := timeseries.Extent{Start: time.Unix(0, 0), End: trq.Extent.End}
+
+ if !trq.IsOffset && oc.BackfillTolerance > 0 {
+ bf.End = bf.End.Add(-oc.BackfillTolerance)
+ }
+
+ now := time.Now()
+
+ OldestRetainedTimestamp := time.Time{}
+ if oc.TimeseriesEvictionMethod == config.EvictionMethodOldest {
+ OldestRetainedTimestamp = now.Truncate(trq.Step).Add(-(trq.Step * oc.TimeseriesRetention))
+ if trq.Extent.End.Before(OldestRetainedTimestamp) {
+ log.Debug("timerange end is too early to consider caching", log.Pairs{"oldestRetainedTimestamp": OldestRetainedTimestamp, "step": trq.Step, "retention": oc.TimeseriesRetention})
+ DoProxy(w, r)
+ return
+ }
+ if trq.Extent.Start.After(bf.End) {
+ log.Debug("timerange is too new to cache due to backfill tolerance", log.Pairs{"backFillToleranceSecs": oc.BackfillToleranceSecs, "newestRetainedTimestamp": bf.End, "queryStart": trq.Extent.Start})
+ DoProxy(w, r)
+ return
+ }
+ }
+
+ client.SetExtent(r, trq, &trq.Extent)
+ key := oc.CacheKeyPrefix + "." + pr.DeriveCacheKey(trq.TemplateURL, "")
+
+ locks.Acquire(key)
+
+ // this is used to determine if Fast Forward should be activated for this request
+ normalizedNow := ×eries.TimeRangeQuery{
+ Extent: timeseries.Extent{Start: time.Unix(0, 0), End: now},
+ Step: trq.Step,
+ }
+ normalizedNow.NormalizeExtent()
+
+ var cts timeseries.Timeseries
+ var doc *HTTPDocument
+ var elapsed time.Duration
+
+ coReq := GetRequestCachingPolicy(r.Header)
+ if coReq.NoCache {
+ cacheStatus = status.LookupStatusPurge
+ cache.Remove(key)
+ cts, doc, elapsed, err = fetchTimeseries(pr, trq, client)
+ if err != nil {
+ recordDPCResult(r, status.LookupStatusProxyError, doc.StatusCode, r.URL.Path, "", elapsed.Seconds(), nil, doc.Headers)
+ Respond(w, doc.StatusCode, doc.Headers, doc.Body)
+ locks.Release(key)
+ return // fetchTimeseries logs the error
+ }
+ } else {
+ doc, cacheStatus, _, err = QueryCache(cache, key, nil)
+ if cacheStatus == status.LookupStatusKeyMiss {
+ cts, doc, elapsed, err = fetchTimeseries(pr, trq, client)
+ if err != nil {
+ recordDPCResult(r, status.LookupStatusProxyError, doc.StatusCode, r.URL.Path, "", elapsed.Seconds(), nil, doc.Headers)
+ Respond(w, doc.StatusCode, doc.Headers, doc.Body)
+ locks.Release(key)
+ return // fetchTimeseries logs the error
+ }
+ } else {
+ // Load the Cached Timeseries
+ if doc == nil {
+ err = errors.New("empty document body")
+ } else {
+ if cc.CacheType == "memory" {
+ cts = doc.timeseries
+ } else {
+ cts, err = client.UnmarshalTimeseries(doc.Body)
+ }
+ }
+ if err != nil {
+ log.Error("cache object unmarshaling failed", log.Pairs{"key": key, "originName": client.Name()})
+ cache.Remove(key)
+ cts, doc, elapsed, err = fetchTimeseries(pr, trq, client)
+ if err != nil {
+ recordDPCResult(r, status.LookupStatusProxyError, doc.StatusCode, r.URL.Path, "", elapsed.Seconds(), nil, doc.Headers)
+ Respond(w, doc.StatusCode, doc.Headers, doc.Body)
+ locks.Release(key)
+ return // fetchTimeseries logs the error
+ }
+ } else {
+ if oc.TimeseriesEvictionMethod == config.EvictionMethodLRU {
+ el := cts.Extents()
+ tsc := cts.TimestampCount()
+ if tsc > 0 &&
+ tsc >= oc.TimeseriesRetentionFactor {
+ if trq.Extent.End.Before(el[0].Start) {
+ log.Debug("timerange end is too early to consider caching", log.Pairs{"step": trq.Step, "retention": oc.TimeseriesRetention})
+ locks.Release(key)
+ DoProxy(w, r)
+ return
+ }
+ if trq.Extent.Start.After(el[len(el)-1].End) {
+ log.Debug("timerange is too new to cache due to backfill tolerance", log.Pairs{"backFillToleranceSecs": oc.BackfillToleranceSecs, "newestRetainedTimestamp": bf.End, "queryStart": trq.Extent.Start})
+ locks.Release(key)
+ DoProxy(w, r)
+ return
+ }
+ }
+ }
+ cacheStatus = status.LookupStatusPartialHit
+ }
+ }
+ }
+
+ // Find the ranges that we want, but which are not currently cached
+ var missRanges timeseries.ExtentList
+ if cacheStatus == status.LookupStatusPartialHit {
+ missRanges = trq.CalculateDeltas(cts.Extents())
+ }
+
+ if len(missRanges) == 0 && cacheStatus == status.LookupStatusPartialHit {
+ // on full cache hit, elapsed records the time taken to query the cache and definitively conclude that it is a full cache hit
+ elapsed = time.Since(now)
+ cacheStatus = status.LookupStatusHit
+ } else if len(missRanges) == 1 && missRanges[0].Start.Equal(trq.Extent.Start) && missRanges[0].End.Equal(trq.Extent.End) {
+ cacheStatus = status.LookupStatusRangeMiss
+ }
+
+ ffStatus := "off"
+
+ var ffURL *url.URL
+ // if the step resolution <= Fast Forward TTL, then no need to even try Fast Forward
+ if !trq.FastForwardDisable {
+ if trq.Step > oc.FastForwardTTL {
+ ffURL, err = client.FastForwardURL(r)
+ if err != nil || ffURL == nil {
+ ffStatus = "err"
+ trq.FastForwardDisable = true
+ }
+ } else {
+ trq.FastForwardDisable = true
+ }
+ }
+
+ dpStatus := log.Pairs{"cacheKey": key, "cacheStatus": cacheStatus, "reqStart": trq.Extent.Start.Unix(), "reqEnd": trq.Extent.End.Unix()}
+ if len(missRanges) > 0 {
+ dpStatus["extentsFetched"] = timeseries.ExtentList(missRanges).String()
+ }
+
+ // maintain a list of timeseries to merge into the main timeseries
+ mts := make([]timeseries.Timeseries, 0, len(missRanges))
+ wg := sync.WaitGroup{}
+ appendLock := sync.Mutex{}
+ uncachedValueCount := 0
+
+ // iterate each time range that the client needs and fetch from the upstream origin
+ for i := range missRanges {
+ wg.Add(1)
+ // This fetches the gaps from the origin and adds their datasets to the merge list
+ go func(e *timeseries.Extent, rq *proxyRequest) {
+ rq.Request = rq.WithContext(tctx.WithResources(r.Context(), request.NewResources(oc, pc, cc, cache, client)))
+ client.SetExtent(rq.Request, trq, e)
+ body, resp, _ := rq.Fetch()
+ if resp.StatusCode == http.StatusOK && len(body) > 0 {
+ nts, err := client.UnmarshalTimeseries(body)
+ if err != nil {
+ log.Error("proxy object unmarshaling failed", log.Pairs{"body": string(body)})
+ wg.Done()
+ return
+ }
+ uncachedValueCount += nts.ValueCount()
+ nts.SetStep(trq.Step)
+ nts.SetExtents([]timeseries.Extent{*e})
+ appendLock.Lock()
+ mts = append(mts, nts)
+ appendLock.Unlock()
+ }
+ wg.Done()
+ }(&missRanges[i], pr.Clone())
+ }
+
+ var hasFastForwardData bool
+ var ffts timeseries.Timeseries
+ // Only fast forward if configured and the user request is for the absolute latest datapoint
+ if (!trq.FastForwardDisable) && (trq.Extent.End.Equal(normalizedNow.Extent.End)) && ffURL.Scheme != "" {
+ wg.Add(1)
+ rs := request.NewResources(oc, oc.FastForwardPath, cc, cache, client)
+ rs.AlternateCacheTTL = oc.FastForwardTTL
+ req := r.Clone(tctx.WithResources(context.Background(), rs))
+ go func() {
+ // create a new context that uses the fast forward path config instead of the time series path config
+ req.URL = ffURL
+ body, resp, isHit := FetchViaObjectProxyCache(req)
+ if resp.StatusCode == http.StatusOK && len(body) > 0 {
+ ffts, err = client.UnmarshalInstantaneous(body)
+ if err != nil {
+ ffStatus = "err"
+ log.Error("proxy object unmarshaling failed", log.Pairs{"body": string(body)})
+ wg.Done()
+ return
+ }
+ ffts.SetStep(trq.Step)
+ x := ffts.Extents()
+ if isHit {
+ ffStatus = "hit"
+ } else {
+ ffStatus = "miss"
+ }
+ hasFastForwardData = len(x) > 0 && x[0].End.After(trq.Extent.End)
+ } else {
+ ffStatus = "err"
+ }
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+
+ // Merge the new delta timeseries into the cached timeseries
+ if len(mts) > 0 {
+ // on a partial hit, elapsed should record the amount of time waiting for all upstream requests to complete
+ elapsed = time.Since(now)
+ cts.Merge(true, mts...)
+ }
+
+ // cts is the cacheable time series, rts is the user's response timeseries
+ rts := cts.Clone()
+
+ // if it was a cache key miss, there is no need to undergo Crop since the extents are identical
+ if cacheStatus != status.LookupStatusKeyMiss {
+ rts.CropToRange(trq.Extent)
+ }
+ cachedValueCount := rts.ValueCount() - uncachedValueCount
+
+ if uncachedValueCount > 0 {
+ metrics.ProxyRequestElements.WithLabelValues(oc.Name, oc.OriginType, "uncached", r.URL.Path).Add(float64(uncachedValueCount))
+ }
+
+ if cachedValueCount > 0 {
+ metrics.ProxyRequestElements.WithLabelValues(oc.Name, oc.OriginType, "cached", r.URL.Path).Add(float64(cachedValueCount))
+ }
+
+ // Merge Fast Forward data if present. This must be done after the Downstream Crop since
+ // the cropped extent was normalized to stepboundaries and would remove fast forward data
+ // If the fast forward data point is older (e.g. cached) than the last datapoint in the returned time series, it will not be merged
+ if hasFastForwardData && len(ffts.Extents()) == 1 && ffts.Extents()[0].Start.Truncate(time.Second).After(normalizedNow.Extent.End) {
+ rts.Merge(false, ffts)
+ }
+ rts.SetExtents(nil) // so they are not included in the client response json
+ rts.SetStep(0)
+ rdata, err := client.MarshalTimeseries(rts)
+ rh := http.Header(doc.Headers).Clone()
+
+ switch cacheStatus {
+ case status.LookupStatusKeyMiss, status.LookupStatusPartialHit, status.LookupStatusRangeMiss:
+ wg.Add(1)
+ // Write the newly-merged object back to the cache
+ go func() {
+ defer wg.Done()
+ // Crop the Cache Object down to the Sample Size or Age Retention Policy and the Backfill Tolerance before storing to cache
+ switch oc.TimeseriesEvictionMethod {
+ case config.EvictionMethodLRU:
+ cts.CropToSize(oc.TimeseriesRetentionFactor, bf.End, trq.Extent)
+ default:
+ cts.CropToRange(timeseries.Extent{End: bf.End, Start: OldestRetainedTimestamp})
+ }
+ // Don't cache datasets with empty extents (everything was cropped so there is nothing to cache)
+ if len(cts.Extents()) > 0 {
+ if cc.CacheType == "memory" {
+ doc.timeseries = cts
+ } else {
+ cdata, err := client.MarshalTimeseries(cts)
+ if err != nil {
+ locks.Release(key)
+ return
+ }
+ doc.Body = cdata
+ }
+ WriteCache(cache, key, doc, oc.TimeseriesTTL, oc.CompressableTypes)
+ }
+ }()
+ }
+
+ // Respond to the user. Using the response headers from a Delta Response, so as to not map conflict with cacheData on WriteCache
+ logDeltaRoutine(dpStatus)
+ recordDPCResult(r, cacheStatus, doc.StatusCode, r.URL.Path, ffStatus, elapsed.Seconds(), missRanges, rh)
+ Respond(w, doc.StatusCode, rh, rdata)
+
+ wg.Wait()
+ locks.Release(key)
+}
+
+func logDeltaRoutine(p log.Pairs) { log.Debug("delta routine completed", p) }
+
+func fetchTimeseries(pr *proxyRequest, trq *timeseries.TimeRangeQuery, client origins.TimeseriesClient) (timeseries.Timeseries, *HTTPDocument, time.Duration, error) {
+
+ body, resp, elapsed := pr.Fetch()
+
+ d := &HTTPDocument{
+ Status: resp.Status,
+ StatusCode: resp.StatusCode,
+ Headers: resp.Header,
+ Body: body,
+ }
+
+ if resp.StatusCode != 200 {
+ log.Error("unexpected upstream response", log.Pairs{"statusCode": resp.StatusCode})
+ return nil, d, time.Duration(0), fmt.Errorf("Unexpected Upstream Response")
+ }
+
+ ts, err := client.UnmarshalTimeseries(body)
+ if err != nil {
+ log.Error("proxy object unmarshaling failed", log.Pairs{"body": string(body)})
+ return nil, d, time.Duration(0), err
+ }
+
+ ts.SetExtents([]timeseries.Extent{trq.Extent})
+ ts.SetStep(trq.Step)
+
+ return ts, d, elapsed, nil
+}
+
+func recordDPCResult(r *http.Request, cacheStatus status.LookupStatus, httpStatus int, path, ffStatus string, elapsed float64, needed []timeseries.Extent, header http.Header) {
+ recordResults(r, "DeltaProxyCache", cacheStatus, httpStatus, path, ffStatus, elapsed, timeseries.ExtentList(needed), header)
+}
diff --git a/internal/proxy/engines/deltaproxycache_test.go b/internal/proxy/engines/deltaproxycache_test.go
new file mode 100644
index 000000000..fe6ec6d35
--- /dev/null
+++ b/internal/proxy/engines/deltaproxycache_test.go
@@ -0,0 +1,1577 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ cr "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/timeseries"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+ "github.com/Comcast/trickster/pkg/promsim"
+)
+
+// test queries
+const (
+ queryReturnsOKNoLatency = "some_query_here{latency_ms=0,range_latency_ms=0}"
+ queryReturnsBadPayload = "some_query_here{invalid_response_body=1,latency_ms=0,range_latency_ms=0}"
+ queryReturnsBadRequest = "some_query_here{status_code=400,latency_ms=0,range_latency_ms=0}"
+ queryReturnsBadGateway = "some_query_here{status_code=502,latency_ms=0,range_latency_ms=0}"
+)
+
+var testConfigFile string
+
+func setupTestHarnessDPC() (*httptest.Server, *httptest.ResponseRecorder, *http.Request, *request.Resources, error) {
+
+ client := &TestClient{}
+ ts, w, r, hc, err := tu.NewTestInstance(testConfigFile, client.DefaultPathConfigs, 200, "", nil, "promsim", "/api/v1/query_range", "debug")
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ pc := rsc.PathConfig
+
+ if pc == nil {
+ return nil, nil, nil, nil, fmt.Errorf("could not find path %s", "/api/v1/query_range")
+ }
+
+ oc := rsc.OriginConfig
+ cc := rsc.CacheClient
+ oc.HTTPClient = hc
+
+ client.cache = cc
+ client.webClient = hc
+ client.config = oc
+
+ pc.CacheKeyParams = []string{"rangeKey"}
+ pc.CacheKeyParams = []string{"instantKey"}
+
+ return ts, w, r, rsc, nil
+}
+
+func TestDeltaProxyCacheRequestMissThenHit(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+
+ oc.FastForwardDisable = true
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+ extn := timeseries.Extent{Start: extr.Start.Truncate(step), End: extr.End.Truncate(step)}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // get cache hit coverage too by repeating:
+
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "hit"})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestDeltaProxyCacheRequestAllItemsTooNew(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ oc.FastForwardDisable = true
+ oc.BackfillToleranceSecs = 600
+ oc.BackfillTolerance = time.Second * time.Duration(oc.BackfillToleranceSecs)
+
+ step := time.Duration(300) * time.Second
+ end := time.Now()
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(5) * time.Minute), End: end}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extr.Start, extr.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if resp.Header.Get("status") != "" {
+ t.Errorf("status header should not be present. Found with value %s", resp.Header.Get("stattus"))
+ }
+
+ // ensure the request was sent through the proxy instead of the DeltaProxyCache
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"engine": "HTTPProxy"})
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestDeltaProxyCacheRequestRemoveStale(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ oc.FastForwardDisable = true
+
+ step := time.Duration(300) * time.Second
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+ extn := timeseries.Extent{Start: extr.Start.Truncate(step), End: extr.End.Truncate(step)}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // get cache hit coverage too by repeating:
+
+ oc.TimeseriesRetention = 10
+
+ extr = timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: now}
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ _, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestDeltaProxyCacheRequestRemoveStaleLRU(t *testing.T) {
+
+ testConfigFile = "../../../testdata/test.cache-lru.conf"
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ testConfigFile = ""
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ oc.FastForwardDisable = true
+
+ step := time.Duration(300) * time.Second
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+ extn := timeseries.Extent{Start: extr.Start.Truncate(step), End: extr.End.Truncate(step)}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // get cache hit coverage too by repeating:
+
+ oc.TimeseriesRetention = 10
+
+ extr = timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: now}
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ w = httptest.NewRecorder()
+
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ _, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestDeltaProxyCacheRequestMarshalFailure(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+
+ rsc.CacheConfig.CacheType = "test"
+ client.RangeCacheKey = "failkey"
+
+ oc.FastForwardDisable = true
+
+ step := time.Duration(300) * time.Second
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), "")
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func normalizeTime(t time.Time, d time.Duration) time.Time {
+ return time.Unix((t.Unix()/int64(d.Seconds()))*int64(d.Seconds()), 0)
+ //return t.Truncate(d)
+}
+
+func TestDeltaProxyCacheRequestPartialHit(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ client.RangeCacheKey = "test-range-key-phit"
+ client.InstantCacheKey = "test-instant-key-phit"
+
+ oc.FastForwardDisable = true
+
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+ extn := timeseries.Extent{Start: normalizeTime(extr.Start, step), End: normalizeTime(extr.End, step)}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s&rk=%s&ik=%s", int(step.Seconds()),
+ extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency, client.RangeCacheKey, client.InstantCacheKey)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // test partial hit (needing an upper fragment)
+ phitStart := normalizeTime(extr.End.Add(step), step)
+ extr.End = extr.End.Add(time.Duration(1) * time.Hour) // Extend the top by 1 hour to generate partial hit
+ extn.End = normalizeTime(extr.End, step)
+
+ expectedFetched := fmt.Sprintf("[%d:%d]", phitStart.Unix(), extn.End.Unix())
+ expected, _, _ = promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s&rk=%s&ik=%s", int(step.Seconds()),
+ extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency, client.RangeCacheKey, client.InstantCacheKey)
+
+ r.URL = u
+
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "phit"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"fetched": expectedFetched})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // test partial hit (needing a lower fragment)
+ phitEnd := extn.Start.Add(-step)
+ extr.Start = extr.Start.Add(time.Duration(-1) * time.Hour)
+ extn.Start = normalizeTime(extr.Start, step)
+
+ expectedFetched = fmt.Sprintf("[%d:%d]", extn.Start.Unix(), phitEnd.Unix())
+ expected, _, _ = promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s&rk=%s&ik=%s", int(step.Seconds()),
+ extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency, client.RangeCacheKey, client.InstantCacheKey)
+
+ r.URL = u
+
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "phit"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"fetched": expectedFetched})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // test partial hit (needing both upper and lower fragments)
+ phitEnd = normalizeTime(extr.Start.Add(-step), step)
+ phitStart = normalizeTime(extr.End.Add(step), step)
+
+ extr.Start = extr.Start.Add(time.Duration(-1) * time.Hour)
+ extn.Start = normalizeTime(extr.Start, step)
+ extr.End = extr.End.Add(time.Duration(1) * time.Hour) // Extend the top by 1 hour to generate partial hit
+ extn.End = normalizeTime(extr.End, step)
+
+ expectedFetched = fmt.Sprintf("[%d:%d,%d:%d]",
+ extn.Start.Unix(), phitEnd.Unix(), phitStart.Unix(), extn.End.Unix())
+
+ expected, _, _ = promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s&rk=%s&ik=%s", int(step.Seconds()),
+ extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency, client.RangeCacheKey, client.InstantCacheKey)
+
+ r.URL = u
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "phit"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"fetched": expectedFetched})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestDeltayProxyCacheRequestDeltaFetchError(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ client.RangeCacheKey = "testkey"
+ client.InstantCacheKey = "testInstantKey"
+
+ oc.FastForwardDisable = true
+
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+ extn := timeseries.Extent{Start: normalizeTime(extr.Start, step), End: normalizeTime(extr.End, step)}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // test partial hit (needing an upper fragment)
+ //phitStart := extr.End.Add(step)
+ extr.End = extr.End.Add(time.Duration(1) * time.Hour) // Extend the top by 1 hour to generate partial hit
+ extn.End = extr.End.Truncate(step)
+
+ //expectedFetched := fmt.Sprintf("[%d:%d]", phitStart.Truncate(step).Unix(), extn.End.Unix())
+ promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ client.InstantCacheKey = "foo1"
+ client.RangeCacheKey = "foo2"
+
+ // Switch to the failed query.
+ u.RawQuery = fmt.Sprintf("instantKey=foo1&step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsBadGateway)
+
+ r.URL = u
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusBadGateway)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "proxy-error"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // err = testResultHeaderPartMatch(resp.Header, map[string]string{"fetched": expectedFetched})
+ // if err != nil {
+ // t.Error(err)
+ // }
+
+}
+
+func TestDeltaProxyCacheRequestRangeMiss(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ oc.FastForwardDisable = true
+
+ step := time.Duration(3600) * time.Second
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+ extn := timeseries.Extent{Start: extr.Start.Truncate(step), End: extr.End.Truncate(step)}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // Test Range Miss Low End
+
+ extr.Start = extr.Start.Add(time.Duration(-3) * time.Hour)
+ extn.Start = extr.Start.Truncate(step)
+ extr.End = extr.Start.Add(time.Duration(1) * time.Hour)
+ extn.End = extr.End.Truncate(step)
+
+ expectedFetched := fmt.Sprintf("[%d:%d]", extn.Start.Unix(), extn.End.Unix())
+ expected, _, _ = promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ r.URL = u
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"fetched": expectedFetched})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "rmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"fetched": expectedFetched})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // Test Range Miss High End
+
+ extr.Start = now.Add(time.Duration(-10) * time.Hour)
+ extn.Start = extr.Start.Truncate(step)
+ extr.End = now.Add(time.Duration(-8) * time.Hour)
+ extn.End = extr.End.Truncate(step)
+
+ expectedFetched = fmt.Sprintf("[%d:%d", extn.Start.Unix(), extn.End.Unix())
+ expected, _, _ = promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+ r.URL = u
+
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "rmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"fetched": expectedFetched})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestDeltaProxyCacheRequestFastForward(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+ rsc.CacheConfig.CacheType = "test"
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+
+ client.InstantCacheKey = "test-dpc-ff-key-instant"
+ client.RangeCacheKey = "test-dpc-ff-key-range"
+
+ oc.FastForwardDisable = false
+
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ client.fftime = now.Truncate(oc.FastForwardTTL)
+
+ extr := timeseries.Extent{Start: now.Add(-time.Duration(12) * time.Hour), End: now}
+ extn := timeseries.Extent{Start: extr.Start.Truncate(step), End: extr.End.Truncate(step)}
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("instantKey=%s&rangeKey=%s&step=%d&start=%d&end=%d&query=%s",
+ client.InstantCacheKey, client.RangeCacheKey, int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ expectedMatrix, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+ em, err := client.UnmarshalTimeseries([]byte(expectedMatrix))
+ if err != nil {
+ t.Error(err)
+ }
+ em.SetExtents(timeseries.ExtentList{extn})
+
+ expectedVector, _, _ := promsim.GetInstantData(queryReturnsOKNoLatency, client.fftime)
+ ev, err := client.UnmarshalInstantaneous([]byte(expectedVector))
+ if err != nil {
+ t.Error(err)
+ }
+ ev.SetStep(step)
+
+ if len(ev.Extents()) == 1 && len(em.Extents()) > 0 && ev.Extents()[0].Start.Truncate(time.Second).After(em.Extents()[0].End) {
+ em.Merge(false, ev)
+ }
+
+ em.SetExtents(nil)
+ b, err := client.MarshalTimeseries(em)
+ if err != nil {
+ t.Error(err)
+ }
+
+ expected := string(b)
+
+ cr.LoadCachesFromConfig()
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"ffstatus": "miss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // do it again and look for a cache hit on the timeseries and fast forward
+
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "hit"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"ffstatus": "hit"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // instantKey := oc.Host + "." + md5.Checksum(strings.Replace(u.Path, "_range", "", -1)+client.InstantCacheKey) + ".sz"
+ // client.cache.Remove(instantKey)
+
+ // u.RawQuery = fmt.Sprintf("instantKey=%s&rangeKey=%s&step=%d&start=%d&end=%d&query=%s",
+ // client.InstantCacheKey, client.RangeCacheKey, int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsBadPayload)
+
+ // w = httptest.NewRecorder()
+ // client.QueryRangeHandler(w, r)
+ // resp = w.Result()
+
+ // err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ // if err != nil {
+ // t.Error(err)
+ // }
+
+ // err = testResultHeaderPartMatch(resp.Header, map[string]string{"ffstatus": "err"})
+ // if err != nil {
+ // t.Error(err)
+ // }
+
+ // // Now test a Response Code error
+
+ // u.RawQuery = fmt.Sprintf("instantKey=%s&rangeKey=%s&step=%d&start=%d&end=%d&query=%s",
+ // client.InstantCacheKey+"1", client.RangeCacheKey+"1", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsBadRequest)
+
+ // w = httptest.NewRecorder()
+ // client.QueryRangeHandler(w, r)
+ // resp = w.Result()
+
+ // err = testStatusCodeMatch(resp.StatusCode, http.StatusBadRequest)
+ // if err != nil {
+ // t.Error(err)
+ // }
+
+ // err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "proxy-error"})
+ // if err != nil {
+ // t.Error(err)
+ // }
+
+}
+
+func TestDeltaProxyCacheRequestFastForwardUrlError(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ oc.FastForwardDisable = true
+
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+ extn := timeseries.Extent{Start: extr.Start.Truncate(step), End: extr.End.Truncate(step)}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("throw_ffurl_error=1&step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ oc.FastForwardDisable = false
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"ffstatus": "err"})
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestDeltaProxyCacheRequestWithRefresh(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ oc.FastForwardDisable = true
+
+ r.Header.Set(headers.NameCacheControl, headers.ValueNoCache)
+
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+ extn := timeseries.Extent{Start: extr.Start.Truncate(step), End: extr.End.Truncate(step)}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "purge"})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestDeltaProxyCacheRequestWithRefreshError(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ oc.FastForwardDisable = true
+
+ r.Header.Set(headers.NameCacheControl, headers.ValueNoCache)
+
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsBadRequest)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusBadRequest)
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestDeltaProxyCacheRequestWithUnmarshalAndUpstreamErrors(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test" // disable direct-memory and force marshaling
+
+ client.RangeCacheKey = "testkey"
+
+ oc.FastForwardDisable = true
+
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+ extn := timeseries.Extent{Start: extr.Start.Truncate(step), End: extr.End.Truncate(step)}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ key := oc.Host + ".409d551e3653f5ad5aa9acbdac8d4ac3"
+
+ _, _, err = client.cache.Retrieve(key, false)
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.cache.Store(key, []byte("foo"), time.Duration(30)*time.Second)
+
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsBadRequest)
+ client.cache.Store(key, []byte("foo"), time.Duration(30)*time.Second)
+
+ r.URL = u
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusBadRequest)
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestDeltaProxyCacheRequest_BadParams(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ oc.FastForwardDisable = true
+
+ const query = "some_query_here{}"
+ step := time.Duration(300) * time.Second
+ end := time.Now()
+ start := end.Add(-time.Duration(6) * time.Hour)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ // Intentional typo &q instead of &query to force a proxied request due to ParseTimeRangeQuery() error
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&q=%s", int(step.Seconds()), start.Unix(), end.Unix(), query)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusBadRequest)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // ensure the request was sent through the proxy instead of the DeltaProxyCache
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"engine": "HTTPProxy"})
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestDeltaProxyCacheRequestCacheMissUnmarshalFailed(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test" // disable direct-memory and force marshaling
+
+ oc.FastForwardDisable = true
+
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsBadRequest)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ _, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusBadRequest)
+ if err != nil {
+ t.Error(err)
+ }
+
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsBadPayload)
+ r.URL = u
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = client.UnmarshalTimeseries(body)
+ if err == nil {
+ t.Errorf("expected unmarshaling error for %s", string(body))
+ }
+
+}
+
+func TestDeltaProxyCacheRequestOutOfWindow(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+
+ oc.FastForwardDisable = true
+
+ query := "some_query_here{}"
+ step := time.Duration(300) * time.Second
+ // Times are out-of-window for being cacheable
+ start := time.Unix(0, 0)
+ end := time.Unix(1800, 0)
+
+ // we still expect the same results
+ expected, _, _ := promsim.GetTimeSeriesData(query, start, end, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), start.Unix(), end.Unix(), query)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // Fully Out-of-Window Requests should be proxied and not cached
+ testResultHeaderPartMatch(resp.Header, map[string]string{"engine": "HTTPProxy"})
+
+ // do it again to ensure another cache miss
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // Fully Out-of-Window Requests should be proxied and not cached
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"engine": "HTTPProxy"})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestDeltaProxyCacheRequestBadGateway(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+ rsc.CacheConfig.CacheType = "test"
+
+ oc.FastForwardDisable = true
+
+ r.Header.Set(headers.NameCacheControl, headers.ValueNoCache)
+
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsBadGateway)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusBadGateway)
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestDeltaProxyCacheRequest_BackfillTolerance(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+
+ oc.BackfillTolerance = time.Duration(300) * time.Second
+ oc.FastForwardDisable = true
+
+ query := "some_query_here{}"
+ step := time.Duration(300) * time.Second
+
+ now := time.Now()
+ x := timeseries.Extent{Start: now.Add(-time.Duration(6) * time.Hour), End: now}
+ xn := timeseries.Extent{Start: now.Add(-time.Duration(6) * time.Hour).Truncate(step), End: now.Truncate(step)}
+
+ // We can predict what slice will need to be fetched and ensure that is only what is requested upstream
+ expectedFetched := fmt.Sprintf("[%d:%d]", xn.End.Unix(), xn.End.Unix())
+ expected, _, _ := promsim.GetTimeSeriesData(query, xn.Start, xn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), x.Start.Unix(), x.End.Unix(), query)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ // get cache partial hit coverage too by repeating:
+ w = httptest.NewRecorder()
+ client.QueryRangeHandler(w, r)
+ resp = w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "phit"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"fetched": expectedFetched})
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestDeltaProxyCacheRequestFFTTLBiggerThanStep(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessDPC()
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ client := rsc.OriginClient.(*TestClient)
+ oc := rsc.OriginConfig
+
+ oc.FastForwardDisable = false
+
+ step := time.Duration(300) * time.Second
+ oc.FastForwardTTL = step + 1
+
+ now := time.Now()
+ end := now.Add(-time.Duration(12) * time.Hour)
+
+ extr := timeseries.Extent{Start: end.Add(-time.Duration(18) * time.Hour), End: end}
+ extn := timeseries.Extent{Start: extr.Start.Truncate(step), End: extr.End.Truncate(step)}
+
+ expected, _, _ := promsim.GetTimeSeriesData(queryReturnsOKNoLatency, extn.Start, extn.End, step)
+
+ u := r.URL
+ u.Path = "/api/v1/query_range"
+ u.RawQuery = fmt.Sprintf("step=%d&start=%d&end=%d&query=%s", int(step.Seconds()), extr.Start.Unix(), extr.End.Unix(), queryReturnsOKNoLatency)
+
+ client.QueryRangeHandler(w, r)
+ resp := w.Result()
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), expected)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"status": "kmiss"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"ffstatus": "off"})
+ if err != nil {
+ t.Error(err)
+ }
+
+}
diff --git a/internal/proxy/engines/document.go b/internal/proxy/engines/document.go
new file mode 100644
index 000000000..2547a6f7c
--- /dev/null
+++ b/internal/proxy/engines/document.go
@@ -0,0 +1,203 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "bytes"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+ "strings"
+
+ txe "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/ranges/byterange"
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/internal/util/log"
+)
+
+//go:generate msgp
+
+// HTTPDocument represents a full HTTP Response/Cache Document with unbuffered body
+type HTTPDocument struct {
+ StatusCode int `msg:"status_code"`
+ Status string `msg:"status"`
+ Headers map[string][]string `msg:"headers"`
+ Body []byte `msg:"body"`
+ ContentLength int64 `msg:"content_length"`
+ ContentType string `msg:"content_type"`
+ CachingPolicy *CachingPolicy `msg:"caching_policy"`
+ // Ranges is the list of Byte Ranges contained in the body of this document
+ Ranges byterange.Ranges `msg:"ranges"`
+ RangeParts byterange.MultipartByteRanges `msg:"-"`
+ // StoredRangeParts is a version of RangeParts that can be exported to MessagePack
+ StoredRangeParts map[string]*byterange.MultipartByteRange `msg:"range_parts"`
+
+ rangePartsLoaded bool
+ isFulfillment bool
+ isLoaded bool
+ timeseries timeseries.Timeseries
+ isCompressed bool
+}
+
+// Size returns the size of the HTTPDocument's headers, CachingPolicy, RangeParts, Body and timeseries data
+func (d *HTTPDocument) Size() int {
+ var i int
+ i += len(headers.String(http.Header(d.Headers)))
+ i += len(d.Body)
+ if d.RangeParts != nil {
+ for _, p := range d.RangeParts {
+ i += p.Msgsize()
+ }
+ }
+ if d.CachingPolicy != nil {
+ i += d.CachingPolicy.Msgsize()
+ }
+ if d.timeseries != nil {
+ i += d.timeseries.Size()
+ }
+ return i
+}
+
+// SetBody sets the Document Body as well as the Content Length, based on the length of body.
+// This assumes that the caller has checked that the request is not a Range request
+func (d *HTTPDocument) SetBody(body []byte) {
+ if body == nil {
+ return
+ }
+ d.Body = body
+ bl := int64(len(d.Body))
+ if d.ContentLength == -1 || d.ContentLength != bl {
+ d.ContentLength = bl
+ }
+ if d.Headers == nil {
+ d.Headers = make(http.Header)
+ }
+ http.Header(d.Headers).Set(headers.NameContentLength, strconv.Itoa(len(body)))
+}
+
+// LoadRangeParts convert a StoredRangeParts into a RangeParts
+func (d *HTTPDocument) LoadRangeParts() {
+
+ if d.rangePartsLoaded {
+ return
+ }
+
+ if d.StoredRangeParts != nil && len(d.StoredRangeParts) > 0 {
+ d.RangeParts = make(byterange.MultipartByteRanges)
+ for _, p := range d.StoredRangeParts {
+ d.RangeParts[p.Range] = p
+ }
+ d.Ranges = d.RangeParts.Ranges()
+ }
+ d.rangePartsLoaded = true
+}
+
+// ParsePartialContentBody parses a Partial Content response body into 0 or more discrete parts
+func (d *HTTPDocument) ParsePartialContentBody(resp *http.Response, body []byte) {
+
+ ct := resp.Header.Get(headers.NameContentType)
+ if cr := resp.Header.Get(headers.NameContentRange); cr != "" {
+ if !strings.HasPrefix(ct, headers.ValueMultipartByteRanges) {
+ d.ContentType = ct
+ }
+ r, cl, err := byterange.ParseContentRangeHeader(cr)
+ d.ContentLength = int64(cl)
+ if err == nil && (r.Start >= 0 || r.End >= 0) {
+ mpbr := &byterange.MultipartByteRange{Range: r, Content: body}
+ if d.RangeParts == nil {
+ d.RangeParts = byterange.MultipartByteRanges{r: mpbr}
+ } else {
+ d.RangeParts[r] = mpbr
+ }
+ }
+ if d.RangeParts != nil {
+ byterange.MultipartByteRanges(d.RangeParts).Compress()
+ d.Ranges = d.RangeParts.Ranges()
+
+ if d.RangeParts != nil {
+ d.StoredRangeParts = d.RangeParts.PackableMultipartByteRanges()
+ }
+ }
+ } else if strings.HasPrefix(ct, headers.ValueMultipartByteRanges) {
+ p, ct, r, cl, err := byterange.ParseMultipartRangeResponseBody(ioutil.NopCloser(bytes.NewBuffer(body)), ct)
+ if err == nil {
+ if d.RangeParts == nil {
+ d.Ranges = r
+ d.RangeParts = p
+ } else {
+ d.RangeParts.Merge(p)
+ d.Ranges = d.RangeParts.Ranges()
+ }
+ d.StoredRangeParts = d.RangeParts.PackableMultipartByteRanges()
+ d.ContentLength = int64(cl)
+ if !strings.HasPrefix(ct, headers.ValueMultipartByteRanges) {
+ d.ContentType = ct
+ }
+ d.RangeParts.Compress()
+ d.Ranges = d.RangeParts.Ranges()
+ } else {
+ log.Error("unable to parse multipart range response body", log.Pairs{"detail": err.Error})
+ }
+ } else {
+ if !strings.HasPrefix(ct, headers.ValueMultipartByteRanges) {
+ d.ContentType = ct
+ }
+ d.SetBody(body)
+ }
+
+ if d.ContentLength > 0 && len(d.RangeParts) == 1 &&
+ d.RangeParts[d.RangeParts.Ranges()[0]].Range.Start == 0 &&
+ d.RangeParts[d.RangeParts.Ranges()[0]].Range.End == d.ContentLength-1 {
+ d.FulfillContentBody()
+ }
+
+ http.Header(d.Headers).Del(headers.NameContentType)
+
+}
+
+// FulfillContentBody will concatenate the document's Range parts into a single, full content body
+// the caller must know that document's multipart ranges include full content length before calling this method
+func (d *HTTPDocument) FulfillContentBody() error {
+
+ if d.RangeParts == nil || len(d.RangeParts) == 0 {
+ d.SetBody(nil)
+ return txe.ErrNoRanges
+ }
+
+ d.RangeParts.Compress()
+ d.Ranges = d.RangeParts.Ranges()
+
+ if len(d.RangeParts) != 1 {
+ d.SetBody(nil)
+ return errors.New("cached parts do not comprise the full body")
+ }
+
+ p := d.RangeParts[d.Ranges[0]]
+ r := p.Range
+
+ if r.Start != 0 || r.End != d.ContentLength-1 {
+ d.SetBody(nil)
+ return errors.New("cached parts do not comprise the full body")
+ }
+
+ d.StatusCode = http.StatusOK
+
+ d.Ranges = nil
+ d.RangeParts = nil
+ d.StoredRangeParts = nil
+ d.SetBody(p.Content)
+ return nil
+}
diff --git a/internal/proxy/engines/document_gen.go b/internal/proxy/engines/document_gen.go
new file mode 100644
index 000000000..44c752728
--- /dev/null
+++ b/internal/proxy/engines/document_gen.go
@@ -0,0 +1,529 @@
+package engines
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "github.com/Comcast/trickster/internal/proxy/ranges/byterange"
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *HTTPDocument) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "status_code":
+ z.StatusCode, err = dc.ReadInt()
+ if err != nil {
+ return
+ }
+ case "status":
+ z.Status, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "headers":
+ var zb0002 uint32
+ zb0002, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ if z.Headers == nil {
+ z.Headers = make(map[string][]string, zb0002)
+ } else if len(z.Headers) > 0 {
+ for key := range z.Headers {
+ delete(z.Headers, key)
+ }
+ }
+ for zb0002 > 0 {
+ zb0002--
+ var za0001 string
+ var za0002 []string
+ za0001, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ var zb0003 uint32
+ zb0003, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(za0002) >= int(zb0003) {
+ za0002 = (za0002)[:zb0003]
+ } else {
+ za0002 = make([]string, zb0003)
+ }
+ for za0003 := range za0002 {
+ za0002[za0003], err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ }
+ z.Headers[za0001] = za0002
+ }
+ case "body":
+ z.Body, err = dc.ReadBytes(z.Body)
+ if err != nil {
+ return
+ }
+ case "content_length":
+ z.ContentLength, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ case "content_type":
+ z.ContentType, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "caching_policy":
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ return
+ }
+ z.CachingPolicy = nil
+ } else {
+ if z.CachingPolicy == nil {
+ z.CachingPolicy = new(CachingPolicy)
+ }
+ err = z.CachingPolicy.DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ }
+ case "ranges":
+ err = z.Ranges.DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ case "range_parts":
+ var zb0004 uint32
+ zb0004, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ if z.StoredRangeParts == nil {
+ z.StoredRangeParts = make(map[string]*byterange.MultipartByteRange, zb0004)
+ } else if len(z.StoredRangeParts) > 0 {
+ for key := range z.StoredRangeParts {
+ delete(z.StoredRangeParts, key)
+ }
+ }
+ for zb0004 > 0 {
+ zb0004--
+ var za0004 string
+ var za0005 *byterange.MultipartByteRange
+ za0004, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ return
+ }
+ za0005 = nil
+ } else {
+ if za0005 == nil {
+ za0005 = new(byterange.MultipartByteRange)
+ }
+ err = za0005.DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ }
+ z.StoredRangeParts[za0004] = za0005
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *HTTPDocument) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 9
+ // write "status_code"
+ err = en.Append(0x89, 0xab, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt(z.StatusCode)
+ if err != nil {
+ return
+ }
+ // write "status"
+ err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.Status)
+ if err != nil {
+ return
+ }
+ // write "headers"
+ err = en.Append(0xa7, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73)
+ if err != nil {
+ return
+ }
+ err = en.WriteMapHeader(uint32(len(z.Headers)))
+ if err != nil {
+ return
+ }
+ for za0001, za0002 := range z.Headers {
+ err = en.WriteString(za0001)
+ if err != nil {
+ return
+ }
+ err = en.WriteArrayHeader(uint32(len(za0002)))
+ if err != nil {
+ return
+ }
+ for za0003 := range za0002 {
+ err = en.WriteString(za0002[za0003])
+ if err != nil {
+ return
+ }
+ }
+ }
+ // write "body"
+ err = en.Append(0xa4, 0x62, 0x6f, 0x64, 0x79)
+ if err != nil {
+ return
+ }
+ err = en.WriteBytes(z.Body)
+ if err != nil {
+ return
+ }
+ // write "content_length"
+ err = en.Append(0xae, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z.ContentLength)
+ if err != nil {
+ return
+ }
+ // write "content_type"
+ err = en.Append(0xac, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.ContentType)
+ if err != nil {
+ return
+ }
+ // write "caching_policy"
+ err = en.Append(0xae, 0x63, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79)
+ if err != nil {
+ return
+ }
+ if z.CachingPolicy == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ err = z.CachingPolicy.EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ }
+ // write "ranges"
+ err = en.Append(0xa6, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73)
+ if err != nil {
+ return
+ }
+ err = z.Ranges.EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ // write "range_parts"
+ err = en.Append(0xab, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x73)
+ if err != nil {
+ return
+ }
+ err = en.WriteMapHeader(uint32(len(z.StoredRangeParts)))
+ if err != nil {
+ return
+ }
+ for za0004, za0005 := range z.StoredRangeParts {
+ err = en.WriteString(za0004)
+ if err != nil {
+ return
+ }
+ if za0005 == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ err = za0005.EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *HTTPDocument) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 9
+ // string "status_code"
+ o = append(o, 0x89, 0xab, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65)
+ o = msgp.AppendInt(o, z.StatusCode)
+ // string "status"
+ o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
+ o = msgp.AppendString(o, z.Status)
+ // string "headers"
+ o = append(o, 0xa7, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73)
+ o = msgp.AppendMapHeader(o, uint32(len(z.Headers)))
+ for za0001, za0002 := range z.Headers {
+ o = msgp.AppendString(o, za0001)
+ o = msgp.AppendArrayHeader(o, uint32(len(za0002)))
+ for za0003 := range za0002 {
+ o = msgp.AppendString(o, za0002[za0003])
+ }
+ }
+ // string "body"
+ o = append(o, 0xa4, 0x62, 0x6f, 0x64, 0x79)
+ o = msgp.AppendBytes(o, z.Body)
+ // string "content_length"
+ o = append(o, 0xae, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68)
+ o = msgp.AppendInt64(o, z.ContentLength)
+ // string "content_type"
+ o = append(o, 0xac, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65)
+ o = msgp.AppendString(o, z.ContentType)
+ // string "caching_policy"
+ o = append(o, 0xae, 0x63, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79)
+ if z.CachingPolicy == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o, err = z.CachingPolicy.MarshalMsg(o)
+ if err != nil {
+ return
+ }
+ }
+ // string "ranges"
+ o = append(o, 0xa6, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73)
+ o, err = z.Ranges.MarshalMsg(o)
+ if err != nil {
+ return
+ }
+ // string "range_parts"
+ o = append(o, 0xab, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x73)
+ o = msgp.AppendMapHeader(o, uint32(len(z.StoredRangeParts)))
+ for za0004, za0005 := range z.StoredRangeParts {
+ o = msgp.AppendString(o, za0004)
+ if za0005 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o, err = za0005.MarshalMsg(o)
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *HTTPDocument) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "status_code":
+ z.StatusCode, bts, err = msgp.ReadIntBytes(bts)
+ if err != nil {
+ return
+ }
+ case "status":
+ z.Status, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ case "headers":
+ var zb0002 uint32
+ zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if z.Headers == nil {
+ z.Headers = make(map[string][]string, zb0002)
+ } else if len(z.Headers) > 0 {
+ for key := range z.Headers {
+ delete(z.Headers, key)
+ }
+ }
+ for zb0002 > 0 {
+ var za0001 string
+ var za0002 []string
+ zb0002--
+ za0001, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ var zb0003 uint32
+ zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(za0002) >= int(zb0003) {
+ za0002 = (za0002)[:zb0003]
+ } else {
+ za0002 = make([]string, zb0003)
+ }
+ for za0003 := range za0002 {
+ za0002[za0003], bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ }
+ z.Headers[za0001] = za0002
+ }
+ case "body":
+ z.Body, bts, err = msgp.ReadBytesBytes(bts, z.Body)
+ if err != nil {
+ return
+ }
+ case "content_length":
+ z.ContentLength, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "content_type":
+ z.ContentType, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ case "caching_policy":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ z.CachingPolicy = nil
+ } else {
+ if z.CachingPolicy == nil {
+ z.CachingPolicy = new(CachingPolicy)
+ }
+ bts, err = z.CachingPolicy.UnmarshalMsg(bts)
+ if err != nil {
+ return
+ }
+ }
+ case "ranges":
+ bts, err = z.Ranges.UnmarshalMsg(bts)
+ if err != nil {
+ return
+ }
+ case "range_parts":
+ var zb0004 uint32
+ zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if z.StoredRangeParts == nil {
+ z.StoredRangeParts = make(map[string]*byterange.MultipartByteRange, zb0004)
+ } else if len(z.StoredRangeParts) > 0 {
+ for key := range z.StoredRangeParts {
+ delete(z.StoredRangeParts, key)
+ }
+ }
+ for zb0004 > 0 {
+ var za0004 string
+ var za0005 *byterange.MultipartByteRange
+ zb0004--
+ za0004, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ za0005 = nil
+ } else {
+ if za0005 == nil {
+ za0005 = new(byterange.MultipartByteRange)
+ }
+ bts, err = za0005.UnmarshalMsg(bts)
+ if err != nil {
+ return
+ }
+ }
+ z.StoredRangeParts[za0004] = za0005
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *HTTPDocument) Msgsize() (s int) {
+ s = 1 + 12 + msgp.IntSize + 7 + msgp.StringPrefixSize + len(z.Status) + 8 + msgp.MapHeaderSize
+ if z.Headers != nil {
+ for za0001, za0002 := range z.Headers {
+ _ = za0002
+ s += msgp.StringPrefixSize + len(za0001) + msgp.ArrayHeaderSize
+ for za0003 := range za0002 {
+ s += msgp.StringPrefixSize + len(za0002[za0003])
+ }
+ }
+ }
+ s += 5 + msgp.BytesPrefixSize + len(z.Body) + 15 + msgp.Int64Size + 13 + msgp.StringPrefixSize + len(z.ContentType) + 15
+ if z.CachingPolicy == nil {
+ s += msgp.NilSize
+ } else {
+ s += z.CachingPolicy.Msgsize()
+ }
+ s += 7 + z.Ranges.Msgsize() + 12 + msgp.MapHeaderSize
+ if z.StoredRangeParts != nil {
+ for za0004, za0005 := range z.StoredRangeParts {
+ _ = za0005
+ s += msgp.StringPrefixSize + len(za0004)
+ if za0005 == nil {
+ s += msgp.NilSize
+ } else {
+ s += za0005.Msgsize()
+ }
+ }
+ }
+ return
+}
diff --git a/internal/proxy/engines/document_gen_test.go b/internal/proxy/engines/document_gen_test.go
new file mode 100644
index 000000000..563d6ccfa
--- /dev/null
+++ b/internal/proxy/engines/document_gen_test.go
@@ -0,0 +1,123 @@
+package engines
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
+func TestMarshalUnmarshalHTTPDocument(t *testing.T) {
+ v := HTTPDocument{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func BenchmarkMarshalMsgHTTPDocument(b *testing.B) {
+ v := HTTPDocument{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgHTTPDocument(b *testing.B) {
+ v := HTTPDocument{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalHTTPDocument(b *testing.B) {
+ v := HTTPDocument{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestEncodeDecodeHTTPDocument(t *testing.T) {
+ v := HTTPDocument{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+
+ m := v.Msgsize()
+ if buf.Len() > m {
+ t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
+ }
+
+ vn := HTTPDocument{}
+ err := msgp.Decode(&buf, &vn)
+ if err != nil {
+ t.Error(err)
+ }
+
+ buf.Reset()
+ msgp.Encode(&buf, &v)
+ err = msgp.NewReader(&buf).Skip()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkEncodeHTTPDocument(b *testing.B) {
+ v := HTTPDocument{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ en := msgp.NewWriter(msgp.Nowhere)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.EncodeMsg(en)
+ }
+ en.Flush()
+}
+
+func BenchmarkDecodeHTTPDocument(b *testing.B) {
+ v := HTTPDocument{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ rd := msgp.NewEndlessReader(buf.Bytes(), b)
+ dc := msgp.NewReader(rd)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := v.DecodeMsg(dc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/internal/proxy/engines/document_test.go b/internal/proxy/engines/document_test.go
new file mode 100644
index 000000000..f81b2d4e5
--- /dev/null
+++ b/internal/proxy/engines/document_test.go
@@ -0,0 +1,225 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "net/http"
+ "strings"
+ "testing"
+
+ txe "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/ranges/byterange"
+)
+
+func TestDocumentFromHTTPResponse(t *testing.T) {
+
+ expected := []byte("1234")
+
+ resp := &http.Response{}
+ resp.Header = http.Header{headers.NameContentRange: []string{"bytes 1-4/8"}}
+ resp.StatusCode = 206
+ d := DocumentFromHTTPResponse(resp, []byte("1234"), nil)
+
+ if len(d.Ranges) != 1 {
+ t.Errorf("expected 1 got %d", len(d.Ranges))
+ } else if string(d.RangeParts[d.Ranges[0]].Content) != string(expected) {
+ t.Errorf("expected %s got %s", string(expected), string(d.Body))
+ }
+
+ if d.StatusCode != 206 {
+ t.Errorf("expected %d got %d", 206, d.StatusCode)
+ }
+
+}
+
+func TestCachingPolicyString(t *testing.T) {
+
+ cp := &CachingPolicy{NoTransform: true}
+ s := cp.String()
+
+ i := strings.Index(s, `"no_transform":true`)
+ if i < 1 {
+ t.Errorf("expected value > 1, got %d", i)
+ }
+
+}
+
+func TestSetBody(t *testing.T) {
+
+ r := byterange.Range{Start: 0, End: 10}
+ d := &HTTPDocument{ContentLength: -1, RangeParts: byterange.MultipartByteRanges{r: &byterange.MultipartByteRange{Range: r, Content: []byte("01234567890")}}}
+ d.SetBody([]byte("testing"))
+
+ if d.ContentLength < 0 {
+ t.Errorf("expected value > 0, got %d", d.ContentLength)
+ }
+}
+
+func TestSize(t *testing.T) {
+ r := byterange.Range{Start: 0, End: 10}
+ d := &HTTPDocument{ContentLength: -1, RangeParts: byterange.MultipartByteRanges{r: &byterange.MultipartByteRange{Range: r, Content: []byte("01234567890")}}}
+
+ i := d.Size()
+
+ if i != 62 {
+ t.Errorf("expected %d got %d", 62, i)
+ }
+
+}
+
+func TestFulfillContentBody(t *testing.T) {
+ d := &HTTPDocument{}
+ err := d.FulfillContentBody()
+ if err != txe.ErrNoRanges {
+ if err != nil {
+ t.Error(err)
+ } else {
+ t.Errorf("expected error: %s", txe.ErrNoRanges.Error())
+ }
+ }
+}
+
+func TestParsePartialContentBodyNoRanges(t *testing.T) {
+
+ d := &HTTPDocument{}
+ resp := &http.Response{Header: make(http.Header)}
+ d.ParsePartialContentBody(resp, []byte("test"))
+
+ if string(d.Body) != "test" {
+ t.Errorf("expected %s got %s", "test", string(d.Body))
+ }
+
+}
+
+func TestParsePartialContentBodySingleRange(t *testing.T) {
+ d := &HTTPDocument{}
+ d.Ranges = make(byterange.Ranges, 0)
+ d.RangeParts = make(byterange.MultipartByteRanges)
+ d.StoredRangeParts = make(map[string]*byterange.MultipartByteRange)
+
+ resp := &http.Response{Header: http.Header{
+ headers.NameContentRange: []string{"bytes 0-10/1222"},
+ }}
+
+ d.ParsePartialContentBody(resp, []byte("Lorem ipsum"))
+
+ if string(d.Body) != "" {
+ t.Errorf("expected %s got %s", "", string(d.Body))
+ }
+
+ if len(d.RangeParts) != 1 {
+ t.Errorf("expected %d got %d", 1, len(d.RangeParts))
+ }
+}
+
+func TestParsePartialContentBodyMultipart(t *testing.T) {
+ d := &HTTPDocument{}
+ d.Ranges = make(byterange.Ranges, 0)
+ d.RangeParts = make(byterange.MultipartByteRanges)
+ d.StoredRangeParts = make(map[string]*byterange.MultipartByteRange)
+
+ resp := &http.Response{
+ StatusCode: http.StatusPartialContent,
+ Header: http.Header{},
+ }
+
+ resp.Header.Set(headers.NameContentType, "multipart/byteranges; boundary=c4fb8e6049a6fdb126d32fa0b15c21e3")
+ resp.Header.Set(headers.NameContentLength, "271")
+
+ d.ParsePartialContentBody(resp, []byte(`
+--c4fb8e6049a6fdb126d32fa0b15c21e3
+Content-Range: bytes 0-6/1222
+Content-Type: text/plain; charset=utf-8
+
+Lorem i
+--c4fb8e6049a6fdb126d32fa0b15c21e3
+Content-Range: bytes 10-20/1222
+Content-Type: text/plain; charset=utf-8
+
+m dolor sit
+--c4fb8e6049a6fdb126d32fa0b15c21e3--`))
+
+ if string(d.Body) != "" {
+ t.Errorf("expected %s got %s", "", string(d.Body))
+ }
+
+ if len(d.RangeParts) != 2 {
+ t.Errorf("expected %d got %d", 2, len(d.RangeParts))
+ }
+}
+
+func TestParsePartialContentBodyMultipartBadBody(t *testing.T) {
+ d := &HTTPDocument{}
+ d.Ranges = make(byterange.Ranges, 0)
+ d.RangeParts = make(byterange.MultipartByteRanges)
+ d.StoredRangeParts = make(map[string]*byterange.MultipartByteRange)
+
+ resp := &http.Response{
+ StatusCode: http.StatusPartialContent,
+ Header: http.Header{},
+ }
+
+ resp.Header.Set(headers.NameContentType, "multipart/byteranges; boundary=c4fb8e6049a6fdb126d32fa0b15c21e3")
+ resp.Header.Set(headers.NameContentLength, "271")
+
+ d.ParsePartialContentBody(resp, []byte(`
+--c4fb8e6049a6fdb126d32fa0b15c21e3
+Content-Range: bytes 0-6/1222
+Content-Type: text/plain; charset=utf-8
+
+Lorem i
+--c4fb8e6049a6fdb126d32fa0b15c21e3
+Content-Range: baytes 1s0-20/12s22x
+Content-Type: text/plain; charset=utf-8
+
+m dolor sit
+--c4fb8e6049a6fdb126d32fa0b15c21e3--`))
+
+ if string(d.Body) != "" {
+ t.Errorf("expected %s got %s", "", string(d.Body))
+ }
+
+ if len(d.RangeParts) != 0 {
+ t.Errorf("expected %d got %d", 0, len(d.RangeParts))
+ }
+
+}
+
+func TestLoadRangeParts(t *testing.T) {
+
+ d := &HTTPDocument{
+ rangePartsLoaded: true,
+ StoredRangeParts: map[string]*byterange.MultipartByteRange{
+ "range": {
+ Range: byterange.Range{Start: 0, End: 8},
+ Content: []byte("trickster"),
+ },
+ },
+ }
+
+ // test the short circuit
+ d.LoadRangeParts()
+ if d.Ranges != nil {
+ t.Errorf("expected nil got %s", d.Ranges.String())
+ }
+
+ // and now the main functionality
+ d.rangePartsLoaded = false
+ d.LoadRangeParts()
+ if len(d.Ranges) != 1 {
+ t.Errorf("expected %d got %d", 1, len(d.Ranges))
+ }
+
+}
diff --git a/internal/proxy/engines/httpproxy.go b/internal/proxy/engines/httpproxy.go
new file mode 100644
index 000000000..9835ba655
--- /dev/null
+++ b/internal/proxy/engines/httpproxy.go
@@ -0,0 +1,217 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/params"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+// Reqs is for Progressive Collapsed Forwarding
+var Reqs sync.Map
+
+// HTTPBlockSize represents 32K of bytes
+const HTTPBlockSize = 32 * 1024
+
+// DoProxy proxies an inbound request to its corresponding upstream origin with no caching features
+func DoProxy(w io.Writer, r *http.Request) *http.Response {
+
+ rsc := request.GetResources(r)
+ pc := rsc.PathConfig
+ oc := rsc.OriginConfig
+
+ start := time.Now()
+ var elapsed time.Duration
+ var cacheStatusCode status.LookupStatus
+ var resp *http.Response
+ var reader io.Reader
+ if pc == nil || pc.CollapsedForwardingType != config.CFTypeProgressive {
+ reader, resp, _ = PrepareFetchReader(r)
+ cacheStatusCode = setStatusHeader(resp.StatusCode, resp.Header)
+ writer := PrepareResponseWriter(w, resp.StatusCode, resp.Header)
+ if writer != nil && reader != nil {
+ io.Copy(writer, reader)
+ }
+ } else {
+ pr := newProxyRequest(r, w)
+ key := oc.CacheKeyPrefix + "." + pr.DeriveCacheKey(nil, "")
+ result, ok := Reqs.Load(key)
+ if !ok {
+ var contentLength int64
+ reader, resp, contentLength = PrepareFetchReader(r)
+ cacheStatusCode = setStatusHeader(resp.StatusCode, resp.Header)
+ writer := PrepareResponseWriter(w, resp.StatusCode, resp.Header)
+ // Check if we know the content length and if it is less than our max object size.
+ if contentLength != 0 && contentLength < int64(oc.MaxObjectSizeBytes) {
+ pcf := NewPCF(resp, contentLength)
+ Reqs.Store(key, pcf)
+ // Blocks until server completes
+ go func() {
+ io.Copy(pcf, reader)
+ pcf.Close()
+ Reqs.Delete(key)
+ }()
+ pcf.AddClient(writer)
+ }
+ } else {
+ pcf, _ := result.(ProgressiveCollapseForwarder)
+ resp = pcf.GetResp()
+ writer := PrepareResponseWriter(w, resp.StatusCode, resp.Header)
+ pcf.AddClient(writer)
+ }
+ }
+ elapsed = time.Since(start)
+ recordResults(r, "HTTPProxy", cacheStatusCode, resp.StatusCode, r.URL.Path, "", elapsed.Seconds(), nil, resp.Header)
+ return resp
+}
+
+// PrepareResponseWriter prepares a response and returns an io.Writer for the data to be written to.
+// Used in Respond.
+func PrepareResponseWriter(w io.Writer, code int, header http.Header) io.Writer {
+ if rw, ok := w.(http.ResponseWriter); ok {
+ h := rw.Header()
+ headers.Merge(h, header)
+ headers.AddResponseHeaders(h)
+ rw.WriteHeader(code)
+ return rw
+ }
+ return w
+}
+
+// PrepareFetchReader prepares an http response and returns io.ReadCloser to
+// provide the response data, the response object and the content length.
+// Used in Fetch.
+func PrepareFetchReader(r *http.Request) (io.ReadCloser, *http.Response, int64) {
+
+ rsc := request.GetResources(r)
+ pc := rsc.PathConfig
+ oc := rsc.OriginConfig
+
+ var rc io.ReadCloser
+
+ if r != nil && r.Header != nil {
+ headers.AddProxyHeaders(r.RemoteAddr, r.Header)
+ }
+
+ headers.RemoveClientHeaders(r.Header)
+
+ if pc != nil {
+ headers.UpdateHeaders(r.Header, pc.RequestHeaders)
+ params.UpdateParams(r.URL.Query(), pc.RequestParams)
+ }
+
+ r.RequestURI = ""
+ resp, err := oc.HTTPClient.Do(r)
+ if err != nil {
+ log.Error("error downloading url", log.Pairs{"url": r.URL.String(), "detail": err.Error()})
+ // if there is an err and the response is nil, the server could not be reached; make a 502 for the downstream response
+ if resp == nil {
+ resp = &http.Response{StatusCode: http.StatusBadGateway, Request: r, Header: make(http.Header)}
+ }
+ if pc != nil {
+ headers.UpdateHeaders(resp.Header, pc.ResponseHeaders)
+ }
+ return nil, resp, 0
+ }
+
+ originalLen := int64(-1)
+ if v, ok := resp.Header[headers.NameContentLength]; ok {
+ originalLen, err = strconv.ParseInt(strings.Join(v, ""), 10, 64)
+ if err != nil {
+ originalLen = -1
+ }
+ resp.ContentLength = int64(originalLen)
+ }
+ rc = resp.Body
+
+ // warn if the clock between trickster and the origin is off by more than 1 minute
+ if date := resp.Header.Get(headers.NameDate); date != "" {
+ d, err := http.ParseTime(date)
+ if err == nil {
+ if offset := time.Since(d); time.Duration(math.Abs(float64(offset))) > time.Minute {
+ log.WarnOnce("clockoffset."+oc.Name,
+ "clock offset between trickster host and origin is high and may cause data anomalies",
+ log.Pairs{
+ "originName": oc.Name,
+ "tricksterTime": strconv.FormatInt(d.Add(offset).Unix(), 10),
+ "originTime": strconv.FormatInt(d.Unix(), 10),
+ "offset": strconv.FormatInt(int64(offset.Seconds()), 10) + "s",
+ })
+ }
+ }
+ }
+
+ hasCustomResponseBody := false
+ resp.Header.Del(headers.NameContentLength)
+
+ if pc != nil {
+ headers.UpdateHeaders(resp.Header, pc.ResponseHeaders)
+ hasCustomResponseBody = pc.HasCustomResponseBody
+ }
+
+ if hasCustomResponseBody {
+ rc = ioutil.NopCloser(bytes.NewBuffer(pc.ResponseBodyBytes))
+ }
+
+ return rc, resp, originalLen
+}
+
+// Respond sends an HTTP Response down to the requesting client
+func Respond(w http.ResponseWriter, code int, header http.Header, body []byte) {
+ PrepareResponseWriter(w, code, header)
+ w.Write(body)
+}
+
+func setStatusHeader(httpStatus int, header http.Header) status.LookupStatus {
+ st := status.LookupStatusProxyOnly
+ if httpStatus >= http.StatusBadRequest {
+ st = status.LookupStatusProxyError
+ }
+ headers.SetResultsHeader(header, "HTTPProxy", st.String(), "", nil)
+ return st
+}
+
+func recordResults(r *http.Request, engine string, cacheStatus status.LookupStatus, statusCode int, path, ffStatus string, elapsed float64, extents timeseries.ExtentList, header http.Header) {
+
+ rsc := request.GetResources(r)
+ pc := rsc.PathConfig
+ oc := rsc.OriginConfig
+
+ status := cacheStatus.String()
+
+ if pc != nil && !pc.NoMetrics {
+ httpStatus := strconv.Itoa(statusCode)
+ metrics.ProxyRequestStatus.WithLabelValues(oc.Name, oc.OriginType, r.Method, status, httpStatus, path).Inc()
+ if elapsed > 0 {
+ metrics.ProxyRequestDuration.WithLabelValues(oc.Name, oc.OriginType, r.Method, status, httpStatus, path).Observe(elapsed)
+ }
+ }
+ headers.SetResultsHeader(header, engine, status, ffStatus, extents)
+}
diff --git a/internal/proxy/engines/httpproxy_test.go b/internal/proxy/engines/httpproxy_test.go
new file mode 100644
index 000000000..11be4e86d
--- /dev/null
+++ b/internal/proxy/engines/httpproxy_test.go
@@ -0,0 +1,298 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ tc "github.com/Comcast/trickster/internal/proxy/context"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/internal/util/metrics"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func init() {
+ metrics.Init()
+}
+
+func TestDoProxy(t *testing.T) {
+
+ es := tu.NewTestServer(http.StatusOK, "test", nil)
+ defer es.Close()
+
+ err := config.Load("trickster", "test", []string{"-origin-url", es.URL, "-origin-type", "test", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ pc := &config.PathConfig{
+ Path: "/",
+ RequestHeaders: map[string]string{},
+ ResponseHeaders: map[string]string{},
+ ResponseBody: "test",
+ ResponseBodyBytes: []byte("test"),
+ HasCustomResponseBody: true,
+ }
+
+ oc.HTTPClient = http.DefaultClient
+ br := bytes.NewBuffer([]byte("test"))
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", es.URL, br)
+ r = r.WithContext(tc.WithResources(r.Context(), request.NewResources(oc, pc, nil, nil, nil)))
+
+ //req := model.NewRequest("TestProxyRequest", r.Method, r.URL, http.Header{"testHeaderName": []string{"testHeaderValue"}}, time.Duration(30)*time.Second, r, tu.NewTestWebClient())
+ DoProxy(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), "test")
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"engine": "HTTPProxy"})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestProxyRequestBadGateway(t *testing.T) {
+
+ const badUpstream = "http://127.0.0.1:64389"
+
+ // assume nothing listens on badUpstream, so this should force the proxy to generate a 502 Bad Gateway
+ err := config.Load("trickster", "test", []string{"-origin-url", badUpstream, "-origin-type", "test", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ pc := &config.PathConfig{
+ Path: "/",
+ RequestHeaders: map[string]string{},
+ ResponseHeaders: map[string]string{},
+ }
+
+ oc.HTTPClient = http.DefaultClient
+ br := bytes.NewBuffer([]byte("test"))
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", badUpstream, br)
+ r = r.WithContext(tc.WithResources(r.Context(), request.NewResources(oc, pc, nil, nil, nil)))
+
+ //req := model.NewRequest("TestProxyRequest", r.Method, r.URL, make(http.Header), time.Duration(30)*time.Second, r, tu.NewTestWebClient())
+ DoProxy(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusBadGateway)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"engine": "HTTPProxy"})
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestClockOffsetWarning(t *testing.T) {
+
+ handler := func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Add(headers.NameDate, time.Now().Add(-1*time.Hour).Format(http.TimeFormat))
+ w.WriteHeader(200)
+ }
+ s := httptest.NewServer(http.HandlerFunc(handler))
+
+ err := config.Load("trickster", "test", []string{"-origin-url", s.URL, "-origin-type", "test", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ pc := &config.PathConfig{
+ Path: "/",
+ RequestHeaders: map[string]string{},
+ ResponseHeaders: map[string]string{},
+ }
+
+ oc.HTTPClient = http.DefaultClient
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", s.URL, nil)
+ r = r.WithContext(tc.WithResources(r.Context(), request.NewResources(oc, pc, nil, nil, nil)))
+
+ if log.HasWarnedOnce("clockoffset.default") {
+ t.Errorf("expected %t got %t", false, true)
+ }
+
+ //req := model.NewRequest("TestProxyRequest", http.MethodGet, r.URL, make(http.Header), time.Duration(30)*time.Second, r, tu.NewTestWebClient())
+ DoProxy(w, r)
+ resp := w.Result()
+
+ if !log.HasWarnedOnce("clockoffset.default") {
+ t.Errorf("expected %t got %t", true, false)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"engine": "HTTPProxy"})
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestDoProxyWithPCF(t *testing.T) {
+
+ es := tu.NewTestServer(http.StatusOK, "test", nil)
+ defer es.Close()
+
+ err := config.Load("trickster", "test", []string{"-origin-url", es.URL, "-origin-type", "test", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ pc := &config.PathConfig{
+ Path: "/",
+ RequestHeaders: map[string]string{},
+ ResponseHeaders: map[string]string{},
+ ResponseBody: "test",
+ ResponseBodyBytes: []byte("test"),
+ HasCustomResponseBody: true,
+ CollapsedForwardingName: "progressive",
+ CollapsedForwardingType: config.CFTypeProgressive,
+ }
+
+ oc.HTTPClient = http.DefaultClient
+ br := bytes.NewBuffer([]byte("test"))
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", es.URL, br)
+ r = r.WithContext(tc.WithResources(r.Context(), request.NewResources(oc, pc, nil, nil, nil)))
+
+ // get URL
+
+ //req := model.NewRequest("TestProxyRequest", r.Method, r.URL, http.Header{"testHeaderName": []string{"testHeaderValue"}}, time.Duration(30)*time.Second, r, tu.NewTestWebClient())
+ DoProxy(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), "test")
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"engine": "HTTPProxy"})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestProxyRequestWithPCFMultipleClients(t *testing.T) {
+
+ es := tu.NewTestServer(http.StatusOK, "test", nil)
+ defer es.Close()
+
+ err := config.Load("trickster", "test", []string{"-origin-url", es.URL, "-origin-type", "test", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ pc := &config.PathConfig{
+ Path: "/",
+ RequestHeaders: map[string]string{},
+ ResponseHeaders: map[string]string{},
+ ResponseBody: "test",
+ ResponseBodyBytes: []byte("test"),
+ HasCustomResponseBody: true,
+ CollapsedForwardingName: "progressive",
+ CollapsedForwardingType: config.CFTypeProgressive,
+ }
+
+ oc.HTTPClient = http.DefaultClient
+ br := bytes.NewBuffer([]byte("test"))
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", es.URL, br)
+ r = r.WithContext(tc.WithResources(r.Context(), request.NewResources(oc, pc, nil, nil, nil)))
+
+ // get URL
+
+ //req := model.NewRequest("TestProxyRequest", r.Method, r.URL, http.Header{"testHeaderName": []string{"testHeaderValue"}}, time.Duration(30)*time.Second, r, tu.NewTestWebClient())
+ DoProxy(w, r)
+ resp := w.Result()
+
+ err = testStatusCodeMatch(resp.StatusCode, http.StatusOK)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testStringMatch(string(bodyBytes), "test")
+ if err != nil {
+ t.Error(err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, map[string]string{"engine": "HTTPProxy"})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPrepareFetchReaderErr(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://example.com/", "-origin-type", "test", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ oc.HTTPClient = http.DefaultClient
+
+ r := httptest.NewRequest("GET", "http://example.com/", nil)
+ r = r.WithContext(tc.WithResources(r.Context(), request.NewResources(oc, nil, nil, nil, nil)))
+ r.Method = "\t"
+ _, _, i := PrepareFetchReader(r)
+ if i != 0 {
+ t.Errorf("expected 0 got %d", i)
+ }
+}
diff --git a/internal/proxy/engines/key.go b/internal/proxy/engines/key.go
new file mode 100644
index 000000000..e3673692a
--- /dev/null
+++ b/internal/proxy/engines/key.go
@@ -0,0 +1,165 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/util/md5"
+)
+
+var methodsWithBody = map[string]bool{http.MethodPut: true, http.MethodPost: true, http.MethodPatch: true}
+
+// DeriveCacheKey calculates a query-specific keyname based on the prometheus query in the user request
+func (pr *proxyRequest) DeriveCacheKey(templateURL *url.URL, extra string) string {
+
+ rsc := request.GetResources(pr.Request)
+ pc := rsc.PathConfig
+
+ if pc == nil {
+ return md5.Checksum(pr.URL.Path + extra)
+ }
+
+ var params url.Values
+ r := pr.Request
+
+ if pr.upstreamRequest != nil {
+ r = pr.upstreamRequest
+ }
+
+ if templateURL != nil {
+ params = templateURL.Query()
+ } else if r.URL != nil {
+ params = r.URL.Query()
+ } else {
+ params = pr.URL.Query()
+ }
+
+ if pc.KeyHasher != nil && len(pc.KeyHasher) == 1 {
+ return pc.KeyHasher[0](pr.URL.Path, params, pr.Header, pr.Body, extra)
+ }
+
+ vals := make([]string, 0, (len(pc.CacheKeyParams) + len(pc.CacheKeyHeaders) + len(pc.CacheKeyFormFields)*2))
+
+ if v := pr.Header.Get(headers.NameAuthorization); v != "" {
+ vals = append(vals, fmt.Sprintf("%s.%s.", headers.NameAuthorization, v))
+ }
+
+ // Append the http method to the slice for creating the derived cache key
+ vals = append(vals, fmt.Sprintf("%s.%s.", "method", pr.Method))
+
+ if len(pc.CacheKeyParams) == 1 && pc.CacheKeyParams[0] == "*" {
+ for p := range params {
+ vals = append(vals, fmt.Sprintf("%s.%s.", p, params.Get(p)))
+ }
+ } else {
+ for _, p := range pc.CacheKeyParams {
+ if v := params.Get(p); v != "" {
+ vals = append(vals, fmt.Sprintf("%s.%s.", p, v))
+ }
+ }
+ }
+
+ for _, p := range pc.CacheKeyHeaders {
+ if v := pr.Header.Get(p); v != "" {
+ vals = append(vals, fmt.Sprintf("%s.%s.", p, v))
+ }
+ }
+
+ if _, ok := methodsWithBody[pr.Method]; ok && pc.CacheKeyFormFields != nil && len(pc.CacheKeyFormFields) > 0 {
+ ct := pr.Header.Get(headers.NameContentType)
+ if ct == headers.ValueXFormURLEncoded || strings.HasPrefix(ct, headers.ValueMultipartFormData) || ct == headers.ValueApplicationJSON {
+ b, _ := ioutil.ReadAll(pr.Body)
+ pr.Body = ioutil.NopCloser(bytes.NewReader(b))
+ if ct == headers.ValueXFormURLEncoded {
+ pr.ParseForm()
+ } else if strings.HasPrefix(ct, headers.ValueMultipartFormData) {
+ pr.ParseMultipartForm(1024 * 1024)
+ } else if ct == headers.ValueApplicationJSON {
+ var document map[string]interface{}
+ err := json.Unmarshal(b, &document)
+ if err == nil {
+ for _, f := range pc.CacheKeyFormFields {
+ v, err := deepSearch(document, f)
+ if err == nil {
+ if pr.Form == nil {
+ pr.Form = url.Values{}
+ }
+ pr.Form.Set(f, v)
+ }
+ }
+ }
+ }
+ pr.Body = ioutil.NopCloser(bytes.NewReader(b))
+ }
+
+ for _, f := range pc.CacheKeyFormFields {
+ if _, ok := pr.Form[f]; ok {
+ if v := pr.FormValue(f); v != "" {
+ vals = append(vals, fmt.Sprintf("%s.%s.", f, v))
+ }
+ }
+ }
+ }
+
+ sort.Strings(vals)
+ return md5.Checksum(pr.URL.Path + "." + strings.Join(vals, "") + extra)
+}
+
+func deepSearch(document map[string]interface{}, key string) (string, error) {
+
+ if key == "" {
+ return "", fmt.Errorf("invalid key name: %s", key)
+ }
+ parts := strings.Split(key, "/")
+ m := document
+ l := len(parts) - 1
+ for i, p := range parts {
+ v, ok := m[p]
+ if !ok {
+ return "", fmt.Errorf("could not find key: %s", key)
+ }
+ if l != i {
+ m, ok = v.(map[string]interface{})
+ if !ok {
+ return "", fmt.Errorf("could not find key: %s", key)
+ }
+ continue
+ }
+
+ if s, ok := v.(string); ok {
+ return s, nil
+ }
+
+ if i, ok := v.(float64); ok {
+ return strconv.FormatFloat(i, 'f', 4, 64), nil
+ }
+
+ if b, ok := v.(bool); ok {
+ return fmt.Sprintf("%t", b), nil
+ }
+
+ }
+ return "", fmt.Errorf("could not find key: %s", key)
+}
diff --git a/internal/proxy/engines/key_test.go b/internal/proxy/engines/key_test.go
new file mode 100644
index 000000000..913ba6504
--- /dev/null
+++ b/internal/proxy/engines/key_test.go
@@ -0,0 +1,244 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strconv"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+ ct "github.com/Comcast/trickster/internal/proxy/context"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/request"
+)
+
+const testMultipartBoundary = `; boundary=------------------------d0509edbe55938c0`
+const testMultipartBody = `--------------------------d0509edbe55938c0
+Content-Disposition: form-data; name="field1"
+
+value1
+--------------------------d0509edbe55938c0
+Content-Disposition: form-data; name="field2"
+
+value2
+--------------------------d0509edbe55938c0--
+`
+
+const testJSONDocument = `
+{
+ "requestType": "query",
+ "query": {
+ "table": "movies",
+ "fields": "eidr,title",
+ "filter": "year=1979",
+ "options": {
+ "batchSize": 20,
+ "someArray": [ "test" ],
+ "booleanHere": true
+ }
+ },
+ "field1": "value1"
+}
+`
+
+func TestDeepSearch(t *testing.T) {
+
+ var document map[string]interface{}
+ json.Unmarshal([]byte(testJSONDocument), &document)
+
+ val, err := deepSearch(document, "query/table")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if val != "movies" {
+ t.Errorf("expected %s got %s", "movies", val)
+ }
+
+ _, err = deepSearch(document, "")
+ if err == nil {
+ t.Errorf("expected error: %s", "could not find key")
+ }
+
+ _, err = deepSearch(document, "missingKey")
+ if err == nil {
+ t.Errorf("expected error: %s", "could not find key")
+ }
+
+ _, err = deepSearch(document, "query/filter/nottamap")
+ if err == nil {
+ t.Errorf("expected error: %s", "could not find key")
+ }
+
+ _, err = deepSearch(document, "query/options/batchSize")
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = deepSearch(document, "query/options/booleanHere")
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = deepSearch(document, "query/options/someArray")
+ if err == nil {
+ t.Errorf("expected error: %s", "could not find key")
+ }
+}
+
+func TestDeriveCacheKey(t *testing.T) {
+
+ rpath := &config.PathConfig{
+ Path: "/",
+ CacheKeyParams: []string{"query", "step", "time"},
+ CacheKeyHeaders: []string{},
+ CacheKeyFormFields: []string{"field1"},
+ }
+
+ cfg := &config.OriginConfig{
+ Paths: map[string]*config.PathConfig{
+ "root": rpath,
+ },
+ }
+
+ tr := httptest.NewRequest("GET", "http://127.0.0.1/?query=12345&start=0&end=0&step=300&time=0", nil)
+ tr = tr.WithContext(ct.WithResources(context.Background(), request.NewResources(cfg, cfg.Paths["root"], nil, nil, nil)))
+
+ pr := newProxyRequest(tr, nil)
+ key := pr.DeriveCacheKey(nil, "extra")
+
+ if key != "52dc11456c84506d3444e53ee4c99777" {
+ t.Errorf("expected %s got %s", "52dc11456c84506d3444e53ee4c99777", key)
+ }
+
+ cfg.Paths["root"].CacheKeyParams = []string{"*"}
+
+ pr = newProxyRequest(tr, nil)
+ key = pr.DeriveCacheKey(pr.URL, "extra")
+ if key != "407aba34f02c87f6898a6d80b01f38a4" {
+ t.Errorf("expected %s got %s", "407aba34f02c87f6898a6d80b01f38a4", key)
+ }
+
+ const expected = "cb84ad010abb4d0f864470540a46f137"
+
+ tr = httptest.NewRequest(http.MethodPost, "http://127.0.0.1/", bytes.NewReader([]byte("field1=value1")))
+ tr = tr.WithContext(ct.WithResources(context.Background(), request.NewResources(cfg, cfg.Paths["root"], nil, nil, nil)))
+ tr.Header.Set(headers.NameContentType, headers.ValueXFormURLEncoded)
+ pr = newProxyRequest(tr, nil)
+ key = pr.DeriveCacheKey(nil, "extra")
+ if key != expected {
+ t.Errorf("expected %s got %s", expected, key)
+ }
+
+ tr = httptest.NewRequest(http.MethodPut, "http://127.0.0.1/", bytes.NewReader([]byte(testMultipartBody)))
+ tr = tr.WithContext(ct.WithResources(context.Background(), request.NewResources(cfg, cfg.Paths["root"], nil, nil, nil)))
+ tr.Header.Set(headers.NameContentType, headers.ValueMultipartFormData+testMultipartBoundary)
+ tr.Header.Set(headers.NameContentLength, strconv.Itoa(len(testMultipartBody)))
+ pr = newProxyRequest(tr, nil)
+ key = pr.DeriveCacheKey(nil, "extra")
+ if key != "4766201eee9ef1916f57309deae22f90" {
+ t.Errorf("expected %s got %s", "4766201eee9ef1916f57309deae22f90", key)
+ }
+
+ tr = httptest.NewRequest(http.MethodPost, "http://127.0.0.1/", bytes.NewReader([]byte(testJSONDocument)))
+ tr = tr.WithContext(ct.WithResources(context.Background(), request.NewResources(cfg, cfg.Paths["root"], nil, nil, nil)))
+ tr.Header.Set(headers.NameContentType, headers.ValueApplicationJSON)
+ tr.Header.Set(headers.NameContentLength, strconv.Itoa(len(testJSONDocument)))
+ pr = newProxyRequest(tr, nil)
+ pr.upstreamRequest.URL = nil
+ key = pr.DeriveCacheKey(nil, "extra")
+ if key != expected {
+ t.Errorf("expected %s got %s", expected, key)
+ }
+
+ // Test Custom KeyHasher Integration
+ rpath.KeyHasher = []config.KeyHasherFunc{exampleKeyHasher}
+ key = pr.DeriveCacheKey(nil, "extra")
+ if key != "test-key" {
+ t.Errorf("expected %s got %s", "test-key", key)
+ }
+
+}
+
+func exampleKeyHasher(path string, params url.Values, headers http.Header, body io.ReadCloser, extra string) string {
+ return "test-key"
+}
+
+func TestDeriveCacheKeyAuthHeader(t *testing.T) {
+
+ client := &TestClient{
+ config: &config.OriginConfig{
+ Paths: map[string]*config.PathConfig{
+ "root": {
+ Path: "/",
+ CacheKeyParams: []string{"query", "step", "time"},
+ CacheKeyHeaders: []string{"X-Test-Header"},
+ },
+ },
+ },
+ }
+
+ tr := httptest.NewRequest("GET", "http://127.0.0.1/?query=12345&start=0&end=0&step=300&time=0", nil)
+ tr = tr.WithContext(ct.WithResources(context.Background(),
+ request.NewResources(client.Configuration(), client.Configuration().Paths["root"], nil, nil, nil)))
+
+ tr.Header.Add("Authorization", "test")
+ tr.Header.Add("X-Test-Header", "test2")
+
+ pr := newProxyRequest(tr, nil)
+
+ //r := &model.Request{URL: u, TimeRangeQuery: ×eries.TimeRangeQuery{Step: 300000}, ClientRequest: tr}
+ //r.Headers = tr.Header
+
+ key := pr.DeriveCacheKey(nil, "extra")
+
+ if key != "60257fa6b18d6072b90a294269a8e6e1" {
+ t.Errorf("expected %s got %s", "60257fa6b18d6072b90a294269a8e6e1", key)
+ }
+
+}
+
+func TestDeriveCacheKeyNoPathConfig(t *testing.T) {
+
+ client := &TestClient{
+ config: &config.OriginConfig{
+ Paths: map[string]*config.PathConfig{
+ "root": {
+ Path: "/",
+ CacheKeyParams: []string{"query", "step", "time"},
+ CacheKeyHeaders: []string{},
+ },
+ },
+ },
+ }
+
+ tr := httptest.NewRequest("GET", "http://127.0.0.1/?query=12345&start=0&end=0&step=300&time=0", nil)
+ tr = tr.WithContext(ct.WithResources(context.Background(),
+ request.NewResources(client.Configuration(), nil, nil, nil, nil)))
+
+ pr := newProxyRequest(tr, nil)
+ key := pr.DeriveCacheKey(nil, "extra")
+
+ if key != "f53b04ce5c434a7357804ae15a64ee6c" {
+ t.Errorf("expected %s got %s", "f53b04ce5c434a7357804ae15a64ee6c", key)
+ }
+
+}
diff --git a/internal/proxy/engines/objectproxycache.go b/internal/proxy/engines/objectproxycache.go
new file mode 100644
index 000000000..7c651db28
--- /dev/null
+++ b/internal/proxy/engines/objectproxycache.go
@@ -0,0 +1,339 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/pkg/locks"
+)
+
+func handleCacheKeyHit(pr *proxyRequest) error {
+
+ d := pr.cacheDocument
+
+ if d != nil && d.StoredRangeParts != nil && len(d.StoredRangeParts) > 0 {
+ d.LoadRangeParts()
+ }
+
+ ok, err := confirmTrueCacheHit(pr)
+ if ok {
+ return handleTrueCacheHit(pr)
+ }
+
+ // if not ok, then confirmTrueCacheHit already redirected the
+ // request to the correct handle; we just return its result here.
+ return err
+}
+
+func handleCachePartialHit(pr *proxyRequest) error {
+
+ // if we already have a revalidation in progress, then we've already confirmed it's not
+ // a true cache hit on the existing cached ranges. otherwise we need to verify first.
+ if pr.revalidation == RevalStatusNone {
+ if ok, err := confirmTrueCacheHit(pr); !ok {
+ // if not ok, then confirmTrueCacheHit has already redirected the
+ // request to the correct handle; we just return its result here.
+ return err
+ }
+ }
+
+ pr.prepareUpstreamRequests()
+
+ handleUpstreamTransactions(pr)
+
+ d := pr.cacheDocument
+ resp := pr.upstreamResponse
+ if pr.isPartialResponse {
+ b, _ := ioutil.ReadAll(pr.upstreamReader)
+ d2 := &HTTPDocument{}
+
+ d2.ParsePartialContentBody(resp, b)
+ d.LoadRangeParts()
+
+ d2.Ranges = d2.RangeParts.Ranges()
+
+ d.RangeParts.Merge(d2.RangeParts)
+ d.Ranges = d.RangeParts.Ranges()
+ d.StoredRangeParts = d.RangeParts.PackableMultipartByteRanges()
+ err := d.FulfillContentBody()
+
+ if err == nil {
+ pr.upstreamResponse.Body = ioutil.NopCloser(bytes.NewBuffer(d.Body))
+ pr.upstreamResponse.Header.Set(headers.NameContentType, d.ContentType)
+ pr.upstreamReader = pr.upstreamResponse.Body
+ } else {
+ h, b := d.RangeParts.ExtractResponseRange(pr.wantedRanges, d.ContentLength, d.ContentType, nil)
+
+ headers.Merge(pr.upstreamResponse.Header, h)
+ pr.upstreamReader = ioutil.NopCloser(bytes.NewBuffer(b))
+ }
+
+ } else {
+ if d != nil {
+ d.RangeParts = nil
+ d.Ranges = nil
+ d.StoredRangeParts = nil
+ d.StatusCode = resp.StatusCode
+ http.Header(d.Headers).Del(headers.NameContentRange)
+ }
+ }
+
+ pr.store()
+
+ return handleResponse(pr)
+
+}
+
+func confirmTrueCacheHit(pr *proxyRequest) (bool, error) {
+
+ pr.cachingPolicy.Merge(pr.cacheDocument.CachingPolicy)
+
+ if (!pr.checkCacheFreshness()) && (pr.cachingPolicy.CanRevalidate) {
+ return false, handleCacheRevalidation(pr)
+ }
+ if !pr.cachingPolicy.IsFresh {
+ pr.cacheStatus = status.LookupStatusKeyMiss
+ return false, handleCacheKeyMiss(pr)
+ }
+
+ return true, nil
+}
+
+func handleCacheRangeMiss(pr *proxyRequest) error {
+ // ultimately we can optimize range miss functionality compared to partial hit
+ // (e.g., if the object has expired, no need to revalidate on a range miss,
+ // but must dump old parts if the new range has a different etag or last-modified)
+ // for now we'll just treat it like partial hit, but it's still observed as a range miss
+ return handleCachePartialHit(pr)
+}
+
+func handleCacheRevalidation(pr *proxyRequest) error {
+
+ pr.revalidation = RevalStatusInProgress
+
+ // if it's a range miss, we don't need to remote revalidate.
+ // range miss means we have a cache key for this object, but
+ // not any of the byte ranges that the user has requested.
+ // since the needed range is 100% uncached, we can use
+ // the last-modified/etag of the new response to perform
+ // an internal revalidation of the pre-existing partial content.
+ if pr.cacheStatus == status.LookupStatusRangeMiss {
+ pr.revalidation = RevalStatusLocal
+ return handleCacheRangeMiss(pr)
+ }
+
+ // all other cache statuses that got us to this point means
+ // we have to perform a remote revalidation; quee it up
+ pr.prepareRevalidationRequest()
+
+ if pr.cacheStatus == status.LookupStatusPartialHit {
+ // this will handle all upstream calls including prepared reval
+ return handleCachePartialHit(pr)
+ }
+
+ // all remaining cache statuses indicate there are no other upstream
+ // requests than this revalidation. so lets make the call
+ handleUpstreamTransactions(pr)
+
+ return handleCacheRevalidationResponse(pr)
+
+}
+
+func handleCacheRevalidationResponse(pr *proxyRequest) error {
+
+ if pr.upstreamResponse.StatusCode == http.StatusNotModified {
+ pr.revalidation = RevalStatusOK
+ pr.cachingPolicy.IsFresh = true
+ pr.cachingPolicy.LocalDate = time.Now()
+ pr.cacheStatus = status.LookupStatusRevalidated
+ pr.upstreamResponse.StatusCode = pr.cacheDocument.StatusCode
+ pr.writeToCache = true
+ pr.store()
+ pr.upstreamReader = bytes.NewBuffer(pr.cacheDocument.Body)
+ return handleTrueCacheHit(pr)
+ }
+
+ pr.revalidation = RevalStatusFailed
+ pr.cacheStatus = status.LookupStatusKeyMiss
+ return handleAllWrites(pr)
+}
+
+func handleTrueCacheHit(pr *proxyRequest) error {
+
+ d := pr.cacheDocument
+ if d == nil {
+ return errors.New("nil cacheDocument")
+ }
+
+ if pr.cachingPolicy.IsNegativeCache {
+ pr.cacheStatus = status.LookupStatusNegativeCacheHit
+ }
+
+ pr.upstreamResponse = &http.Response{StatusCode: d.StatusCode, Request: pr.Request, Header: d.Headers}
+ if pr.wantsRanges {
+ h, b := d.RangeParts.ExtractResponseRange(pr.wantedRanges, d.ContentLength, d.ContentType, d.Body)
+ headers.Merge(pr.upstreamResponse.Header, h)
+ pr.upstreamReader = bytes.NewBuffer(b)
+ } else {
+ pr.upstreamReader = bytes.NewBuffer(d.Body)
+ }
+
+ return handleResponse(pr)
+
+}
+
+func handleCacheKeyMiss(pr *proxyRequest) error {
+
+ pr.prepareUpstreamRequests()
+
+ handleUpstreamTransactions(pr)
+
+ return handleAllWrites(pr)
+}
+
+func handleUpstreamTransactions(pr *proxyRequest) error {
+
+ pr.makeUpstreamRequests()
+ pr.reconstituteResponses()
+ pr.determineCacheability()
+
+ return nil
+}
+
+func handleAllWrites(pr *proxyRequest) error {
+ handleResponse(pr)
+ if pr.writeToCache {
+ if pr.cacheDocument == nil || !pr.cacheDocument.isLoaded {
+ d := DocumentFromHTTPResponse(pr.upstreamResponse, nil, pr.cachingPolicy)
+ pr.cacheDocument = d
+ if pr.isPartialResponse {
+ d.ParsePartialContentBody(pr.upstreamResponse, pr.cacheBuffer.Bytes())
+ } else {
+ d.Body = pr.cacheBuffer.Bytes()
+ }
+ }
+ pr.store()
+ }
+ return nil
+}
+
+func handleResponse(pr *proxyRequest) error {
+
+ pr.prepareResponse()
+ pr.writeResponseHeader()
+ pr.setBodyWriter() // what about partial hit? it does not set this
+ pr.writeResponseBody()
+ return nil
+}
+
+// Cache Status Response Handler Mappings
+var cacheResponseHandlers = map[status.LookupStatus]func(*proxyRequest) error{
+ status.LookupStatusHit: handleCacheKeyHit,
+ status.LookupStatusPartialHit: handleCachePartialHit,
+ status.LookupStatusKeyMiss: handleCacheKeyMiss,
+ status.LookupStatusRangeMiss: handleCacheRangeMiss,
+}
+
+func fetchViaObjectProxyCache(w io.Writer, r *http.Request) (*http.Response, status.LookupStatus) {
+
+ rsc := request.GetResources(r)
+ oc := rsc.OriginConfig
+ cc := rsc.CacheClient
+
+ pr := newProxyRequest(r, w)
+ pr.parseRequestRanges()
+
+ pr.cachingPolicy = GetRequestCachingPolicy(pr.Header)
+
+ pr.key = oc.CacheKeyPrefix + "." + pr.DeriveCacheKey(nil, "")
+ pcfResult, pcfExists := Reqs.Load(pr.key)
+ if (!pr.wantsRanges && pcfExists) || pr.cachingPolicy.NoCache {
+ if pr.cachingPolicy.NoCache {
+ locks.Acquire(pr.key)
+ cc.Remove(pr.key)
+ locks.Release(pr.key)
+ }
+ return nil, status.LookupStatusProxyOnly
+ }
+
+ if pcfExists {
+ pr.collapsedForwarder = pcfResult.(ProgressiveCollapseForwarder)
+ }
+
+ pr.cachingPolicy.ParseClientConditionals()
+
+ if !rsc.NoLock {
+ locks.Acquire(pr.key)
+ }
+
+ var err error
+ pr.cacheDocument, pr.cacheStatus, pr.neededRanges, err = QueryCache(cc, pr.key, pr.wantedRanges)
+ if err == nil || err == cache.ErrKNF {
+ if f, ok := cacheResponseHandlers[pr.cacheStatus]; ok {
+ f(pr)
+ } else {
+ log.Warn("unhandled cache lookup response", log.Pairs{"lookupStatus": pr.cacheStatus})
+ return nil, status.LookupStatusProxyOnly
+ }
+ } else {
+ log.Error("cache lookup error", log.Pairs{"detail": err.Error()})
+ pr.cacheDocument = nil
+ pr.cacheStatus = status.LookupStatusKeyMiss
+ handleCacheKeyMiss(pr)
+ }
+
+ if !rsc.NoLock {
+ locks.Release(pr.key)
+ }
+
+ // newProxyRequest sets pr.started to time.Now()
+ pr.elapsed = time.Since(pr.started)
+ el := float64(pr.elapsed.Milliseconds()) / 1000.0
+ recordOPCResult(r, pr.cacheStatus, pr.upstreamResponse.StatusCode, r.URL.Path, el, pr.upstreamResponse.Header)
+
+ return pr.upstreamResponse, pr.cacheStatus
+}
+
+// ObjectProxyCacheRequest provides a Basic HTTP Reverse Proxy/Cache
+func ObjectProxyCacheRequest(w http.ResponseWriter, r *http.Request) {
+ _, cacheStatus := fetchViaObjectProxyCache(w, r)
+ if cacheStatus == status.LookupStatusProxyOnly {
+ DoProxy(w, r)
+ }
+}
+
+// FetchViaObjectProxyCache Fetches an object from Cache or Origin (on miss), writes the object to the cache, and returns the object to the caller
+func FetchViaObjectProxyCache(r *http.Request) ([]byte, *http.Response, bool) {
+ w := bytes.NewBuffer(nil)
+ resp, cacheStatus := fetchViaObjectProxyCache(w, r)
+ if cacheStatus == status.LookupStatusProxyOnly {
+ resp = DoProxy(w, r)
+ }
+ return w.Bytes(), resp, cacheStatus == status.LookupStatusHit
+}
+
+func recordOPCResult(r *http.Request, cacheStatus status.LookupStatus, httpStatus int, path string, elapsed float64, header http.Header) {
+ recordResults(r, "ObjectProxyCache", cacheStatus, httpStatus, path, "", elapsed, nil, header)
+}
diff --git a/internal/proxy/engines/objectproxycache_test.go b/internal/proxy/engines/objectproxycache_test.go
new file mode 100644
index 000000000..288f5e709
--- /dev/null
+++ b/internal/proxy/engines/objectproxycache_test.go
@@ -0,0 +1,1202 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ tc "github.com/Comcast/trickster/internal/proxy/context"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+ "github.com/Comcast/trickster/pkg/rangesim"
+)
+
+func setupTestHarnessOPC(file, body string, code int, headers map[string]string) (*httptest.Server, *httptest.ResponseRecorder, *http.Request, *request.Resources, error) {
+ return setupTestHarnessOPCByType(file, "test", "/opc", body, code, headers)
+}
+
+func setupTestHarnessOPCRange(hdr map[string]string) (*httptest.Server, *httptest.ResponseRecorder, *http.Request, *request.Resources, error) {
+ s, rr, r, rsc, err := setupTestHarnessOPCByType("", "rangesim", "/opc", "", 0, hdr)
+ return s, rr, r, rsc, err
+}
+
+func setupTestHarnessOPCByType(
+ file, serverType, path, body string, code int, headers map[string]string,
+) (*httptest.Server, *httptest.ResponseRecorder, *http.Request, *request.Resources, error) {
+
+ client := &TestClient{}
+ ts, w, r, hc, err := tu.NewTestInstance(file, client.DefaultPathConfigs, code, body, headers, serverType, path, "debug")
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("Could not load configuration: %s", err.Error())
+ }
+ r.RequestURI = ""
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ pc := rsc.PathConfig
+
+ if pc == nil {
+ return nil, nil, nil, nil, fmt.Errorf("could not find path %s", "/")
+ }
+
+ oc := rsc.OriginConfig
+ cc := rsc.CacheClient
+ oc.HTTPClient = hc
+
+ client.cache = cc
+ client.webClient = hc
+ client.config = oc
+
+ pc.CacheKeyParams = []string{"rangeKey", "instantKey"}
+
+ return ts, w, r, rsc, nil
+}
+
+func setupTestHarnessOPCWithPCF(file, body string, code int, headers map[string]string) (*httptest.Server, *httptest.ResponseRecorder, *http.Request, *request.Resources, error) {
+
+ client := &TestClient{}
+ ts, w, r, hc, err := tu.NewTestInstance(file, client.DefaultPathConfigs, code, body, headers, "prometheus", "/api/v1/query", "debug")
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ pc := rsc.PathConfig
+
+ if pc == nil {
+ return nil, nil, nil, nil, fmt.Errorf("could not find path %s", "/api/v1/query")
+ }
+
+ pc.CollapsedForwardingName = "progressive"
+ pc.CollapsedForwardingType = config.CFTypeProgressive
+
+ oc := rsc.OriginConfig
+ cc := rsc.CacheClient
+
+ oc.HTTPClient = hc
+ client.cache = cc
+ client.webClient = hc
+ client.config = oc
+
+ pc.CacheKeyParams = []string{"rangeKey", "instantKey"}
+
+ return ts, w, r, rsc, nil
+}
+
+func TestObjectProxyCacheRequest(t *testing.T) {
+
+ hdrs := map[string]string{"Cache-Control": "max-age=60"}
+ ts, _, r, rsc, err := setupTestHarnessOPC("", "test", http.StatusPartialContent, hdrs)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ r.Header.Add(headers.NameRange, "bytes=0-3")
+
+ oc := rsc.OriginConfig
+ oc.MaxTTLSecs = 15
+ oc.MaxTTL = time.Duration(oc.MaxTTLSecs) * time.Second
+
+ _, e := testFetchOPC(r, http.StatusPartialContent, "test", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // get cache hit coverage too by repeating:
+ _, e = testFetchOPC(r, http.StatusPartialContent, "test", map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // Remove Cache Hit from the Response Handler Map to test unknown handler error condition
+ delete(cacheResponseHandlers, status.LookupStatusHit)
+
+ _, e = testFetchOPC(r, http.StatusPartialContent, "test", map[string]string{"status": "proxy-only"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // add cache hit back
+ cacheResponseHandlers[status.LookupStatusHit] = handleCacheKeyHit
+
+}
+
+func TestObjectProxyCachePartialHit(t *testing.T) {
+ ts, _, r, rsc, err := setupTestHarnessOPCRange(nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ // Cache miss on range
+ r.Header.Set(headers.NameRange, "bytes=0-10")
+ expectedBody, err := getExpectedRangeBody(r, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e := testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // Partial Hit on an overlapping range
+ r.Header.Set(headers.NameRange, "bytes=5-15")
+
+ expectedBody, err = getExpectedRangeBody(r, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // Range Miss on an separate range
+ r.Header.Set(headers.NameRange, "bytes=60-70")
+
+ expectedBody, err = getExpectedRangeBody(r, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "rmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // Partial Hit on an multiple ranges
+ r.Header.Set(headers.NameRange, "bytes=10-20,50-55,60-65,69-75")
+ expectedBody, err = getExpectedRangeBody(r, "d5a5acd7eb4d3f622c62947a9904b89b")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // Fulfill the cache with the remaining parts
+ r.Header.Del(headers.NameRange)
+ _, e = testFetchOPC(r, http.StatusOK, rangesim.Body, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // Test Articulated Upstream
+ rsc.OriginConfig.DearticulateUpstreamRanges = true
+ r.Header.Set(headers.NameRange, "bytes=10-20,50-55,60-65,69-75")
+ r.URL.Path = "/new/test/path"
+ expectedBody, err = getExpectedRangeBody(r, "d5a5acd7eb4d3f622c62947a9904b89b")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestFullArticuation(t *testing.T) {
+
+ ts, _, r, rsc, err := setupTestHarnessOPCRange(nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ // Test Articulated Upstream
+ rsc.OriginConfig.DearticulateUpstreamRanges = true
+ rsc.OriginConfig.RevalidationFactor = 2
+ r.Header.Set(headers.NameRange, "bytes=10-20,50-55,60-65,69-75")
+ r.URL.Path = "/new/test/path"
+ r.URL.RawQuery = "max-age=1"
+ expectedBody, err := getExpectedRangeBody(r, "d5a5acd7eb4d3f622c62947a9904b89b")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e := testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.RawQuery = "max-age=1&status=200"
+ r.URL.Path = "/new/test/path/2"
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.RawQuery = "max-age=1&ims=200"
+ r.URL.Path = "/new/test/path/3"
+ r.Header.Set(headers.NameRange, "bytes=10-20")
+ expectedBody, err = getExpectedRangeBody(r, "d5a5acd7eb4d3f622c62947a9904b89b")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ time.Sleep(time.Millisecond * 1050)
+
+ r.Header.Set(headers.NameRange, "bytes=10-20, 25-30, 45-60")
+ expectedBody, err = getExpectedRangeBody(r, "a262725e1b8ae4967d369cff746e3924")
+ if err != nil {
+ t.Error(err)
+ }
+ r.URL.RawQuery = "max-age=1&ims=206"
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ time.Sleep(time.Millisecond * 1050)
+
+ r.Header.Set(headers.NameRange, "bytes=9-20, 25-31, 42-65, 70-80")
+ expectedBody, err = getExpectedRangeBody(r, "34b73ea5c4c1ab5b9e34c9888119c58f")
+ if err != nil {
+ t.Error(err)
+ }
+ r.URL.RawQuery = "max-age=1&ims=206"
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ time.Sleep(time.Millisecond * 1050)
+
+ r.Header.Set(headers.NameRange, "bytes=9-20, 90-95, 100-105")
+ expectedBody, err = getExpectedRangeBody(r, "01760208a2d6589fc9620627d561640d")
+ if err != nil {
+ t.Error(err)
+ }
+ r.URL.RawQuery = "max-age=1&ims=206"
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=9-20, 90-95, 100-105")
+ expectedBody, err = getExpectedRangeBody(r, "01760208a2d6589fc9620627d561640d")
+ if err != nil {
+ t.Error(err)
+ }
+ r.URL.Path = "/new/test/path/20"
+ r.URL.RawQuery = "max-age=1"
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ time.Sleep(time.Millisecond * 1050)
+
+ r.Header.Set(headers.NameRange, "bytes=9-20, 25-32, 41-65")
+ expectedBody, err = getExpectedRangeBody(r, "722af19813169c99d8bda37a2f244f39")
+ if err != nil {
+ t.Error(err)
+ }
+ r.URL.RawQuery = "max-age=1&ims=206&non-ims=206"
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ time.Sleep(time.Millisecond * 1050)
+
+ r.Header.Del(headers.NameRange)
+ r.URL.RawQuery = "max-age=1"
+ _, e = testFetchOPC(r, http.StatusOK, rangesim.Body, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=9-20, 21-22")
+ r.URL.Path = "/new/test/path/21"
+ expectedBody, err = getExpectedRangeBody(r, "368b9fbcef800068a48e70fa6e040289")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=0-1221")
+ r.URL.Path = "/new/test/path/22"
+ r.URL.RawQuery = ""
+ expectedBody, err = getExpectedRangeBody(r, "722af19813169c99d8bda37a2f244f39")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=0-1220,1221-1221")
+ expectedBody, err = getExpectedRangeBody(r, "0a6d16343fbe859a10cf1ac673e23dc9")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Del(headers.NameRange)
+ _, e = testFetchOPC(r, http.StatusOK, rangesim.Body, map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+}
+
+func TestObjectProxyCachePartialHitNotFresh(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessOPCRange(nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ pr := newProxyRequest(r, w)
+ oc := rsc.OriginConfig
+ cc := rsc.CacheClient
+ pr.cachingPolicy = GetRequestCachingPolicy(pr.Header)
+ pr.key = oc.Host + "." + pr.DeriveCacheKey(nil, "")
+ pr.cacheDocument, pr.cacheStatus, pr.neededRanges, _ = QueryCache(cc, pr.key, pr.wantedRanges)
+ handleCacheKeyMiss(pr)
+
+ pr.cachingPolicy.CanRevalidate = false
+ pr.cachingPolicy.IsFresh = false
+ pr.cachingPolicy.FreshnessLifetime = 0
+
+ pr.store()
+
+ handleCachePartialHit(pr)
+
+ if pr.isPartialResponse {
+ t.Errorf("Expected full response, got %t", pr.isPartialResponse)
+ }
+
+ if pr.cacheStatus != status.LookupStatusKeyMiss {
+ t.Errorf("Expected %s, got %s", status.LookupStatusKeyMiss, pr.cacheStatus)
+ }
+}
+
+func TestObjectProxyCachePartialHitFullResponse(t *testing.T) {
+
+ ts, w, r, rsc, err := setupTestHarnessOPCRange(nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ pr := newProxyRequest(r, w)
+ oc := rsc.OriginConfig
+ cc := rsc.CacheClient
+ pr.cachingPolicy = GetRequestCachingPolicy(pr.Header)
+ pr.key = oc.Host + "." + pr.DeriveCacheKey(nil, "")
+ pr.cacheDocument, pr.cacheStatus, pr.neededRanges, _ = QueryCache(cc, pr.key, pr.wantedRanges)
+ handleCacheKeyMiss(pr)
+ handleCachePartialHit(pr)
+
+ if pr.isPartialResponse {
+ t.Errorf("Expected full response, got %t", pr.isPartialResponse)
+ }
+}
+
+func TestObjectProxyCacheRangeMiss(t *testing.T) {
+
+ ts, _, r, _, err := setupTestHarnessOPCRange(nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ r.Header.Set(headers.NameRange, "bytes=0-10")
+ expectedBody, err := getExpectedRangeBody(r, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e := testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=15-20")
+ expectedBody, err = getExpectedRangeBody(r, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "rmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestObjectProxyCacheRevalidation(t *testing.T) {
+
+ ts, _, r, rsc, err := setupTestHarnessOPCRange(nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ rsc.OriginConfig.RevalidationFactor = 2
+
+ r.Header.Set(headers.NameRange, "bytes=0-10")
+ if rsc.PathConfig == nil {
+ t.Error(errors.New("nil path config"))
+ }
+
+ if rsc.PathConfig.ResponseHeaders == nil {
+ rsc.PathConfig.ResponseHeaders = make(map[string]string)
+ }
+ rsc.PathConfig.ResponseHeaders[headers.NameCacheControl] = "max-age=1"
+
+ expectedBody, err := getExpectedRangeBody(r, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e := testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ time.Sleep(time.Millisecond * 1010)
+
+ r.Header.Set(headers.NameRange, "bytes=0-10")
+ expectedBody, err = getExpectedRangeBody(r, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "rhit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ time.Sleep(time.Millisecond * 1010)
+ r.Header.Set(headers.NameRange, "bytes=0-15")
+ expectedBody, err = getExpectedRangeBody(r, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // purge the cache
+ r.Header.Del(headers.NameRange)
+ r.Header.Set(headers.NameCacheControl, headers.ValueNoCache)
+
+ expectedBody, err = getExpectedRangeBody(r, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusOK, expectedBody, map[string]string{"status": "proxy-only"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/10"
+
+ r.Header.Del(headers.NameCacheControl)
+
+ // now store it with an earlier last modified header
+ r.Header.Del(headers.NameCacheControl)
+ rsc.PathConfig.ResponseHeaders[headers.NameLastModified] = time.Unix(1577836799, 0).Format(time.RFC1123)
+ rsc.PathConfig.ResponseHeaders["-"+headers.NameCacheControl] = ""
+ rsc.PathConfig.ResponseHeaders[headers.NameExpires] = time.Now().Add(-1 * time.Minute).Format(time.RFC1123)
+
+ expectedBody, err = getExpectedRangeBody(r, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusOK, expectedBody, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // delete(rsc.PathConfig.ResponseHeaders, headers.NameLastModified)
+ // delete(rsc.PathConfig.ResponseHeaders, headers.NameExpires)
+
+ // expectedBody, err = getExpectedRangeBody(r, "")
+ // _, e = testFetchOPC(r, http.StatusOK, expectedBody, map[string]string{"status": "kmiss"})
+ // if e != nil {
+ // for _, err = range e {
+ // t.Error(err)
+ // }
+ // }
+}
+
+func TestObjectProxyCacheRequestWithPCF(t *testing.T) {
+
+ headers := map[string]string{"Cache-Control": "max-age=60"}
+ ts, _, r, rsc, err := setupTestHarnessOPCWithPCF("", "test", http.StatusOK, headers)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ oc := rsc.OriginConfig
+ oc.MaxTTLSecs = 15
+ oc.MaxTTL = time.Duration(oc.MaxTTLSecs) * time.Second
+
+ r.Header.Set("testHeaderName", "testHeaderValue")
+
+ _, e := testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // get cache hit coverage too by repeating:
+ _, e = testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestObjectProxyCacheTrueHitNoDocumentErr(t *testing.T) {
+
+ const expected = "nil cacheDocument"
+
+ pr := &proxyRequest{}
+ err := handleTrueCacheHit(pr)
+ if err.Error() != expected {
+ t.Errorf("expected %s got %s", expected, err.Error())
+ }
+}
+
+func TestObjectProxyCacheRequestClientNoCache(t *testing.T) {
+
+ ts, _, r, _, err := setupTestHarnessOPC("", "test", http.StatusOK, nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ r.Header.Set(headers.NameCacheControl, headers.ValueNoCache)
+
+ _, e := testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "proxy-only"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestFetchViaObjectProxyCacheRequestClientNoCache(t *testing.T) {
+
+ ts, _, r, _, err := setupTestHarnessOPC("", "test", http.StatusOK, nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ r.Header.Set(headers.NameCacheControl, headers.ValueNoCache)
+
+ _, e := testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "proxy-only"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ _, _, b := FetchViaObjectProxyCache(r)
+ if b {
+ t.Errorf("expected %t got %t", false, b)
+ }
+}
+
+func TestObjectProxyCacheRequestOriginNoCache(t *testing.T) {
+
+ headers := map[string]string{"Cache-Control": "no-cache"}
+ ts, _, r, _, err := setupTestHarnessOPC("", "test", http.StatusOK, headers)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ _, e := testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestObjectProxyCacheIMS(t *testing.T) {
+
+ hdrs := map[string]string{"Cache-Control": "max-age=1"}
+ ts, _, r, rsc, err := setupTestHarnessOPCRange(hdrs)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ rsc.OriginConfig.RevalidationFactor = 2
+
+ _, e := testFetchOPC(r, http.StatusOK, rangesim.Body, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameIfModifiedSince, "Wed, 01 Jan 2020 00:00:00 UTC")
+
+ _, e = testFetchOPC(r, http.StatusNotModified, "", map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ time.Sleep(time.Millisecond * 1050)
+
+ r.URL.RawQuery = "status=200"
+
+ _, e = testFetchOPC(r, http.StatusNotModified, "", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ //t.Errorf("%s", "foo")
+
+}
+
+func TestObjectProxyCacheIUS(t *testing.T) {
+
+ // TODO: how does this test IUS???
+
+ headers := map[string]string{"Cache-Control": "max-age=60"}
+ ts, _, r, _, err := setupTestHarnessOPC("", "test", http.StatusOK, headers)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ _, e := testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestObjectProxyCacheINM(t *testing.T) {
+
+ rh := map[string]string{headers.NameCacheControl: "max-age=60", headers.NameETag: "test"}
+ ts, _, r, _, err := setupTestHarnessOPC("", "test", http.StatusOK, rh)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ _, e := testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameIfNoneMatch, `"test"`)
+ _, e = testFetchOPC(r, http.StatusNotModified, "", map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameIfNoneMatch, `W/"test2"`)
+ _, e = testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestObjectProxyCacheNoRevalidate(t *testing.T) {
+
+ headers := map[string]string{headers.NameCacheControl: headers.ValueMaxAge + "=1"}
+ ts, _, r, rsc, err := setupTestHarnessOPC("", "test", http.StatusOK, headers)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ p := rsc.PathConfig
+ p.ResponseHeaders = headers
+
+ _, e := testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ time.Sleep(1010 * time.Millisecond)
+
+ _, e = testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestObjectProxyCacheCanRevalidate(t *testing.T) {
+
+ headers := map[string]string{
+ headers.NameCacheControl: headers.ValueMaxAge + "=1",
+ headers.NameETag: "test-etag",
+ }
+ ts, _, r, rsc, err := setupTestHarnessOPCRange(nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ p := rsc.PathConfig
+ p.ResponseHeaders = headers
+ rsc.OriginConfig.RevalidationFactor = 2
+
+ _, e := testFetchOPC(r, http.StatusOK, rangesim.Body, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ time.Sleep(1010 * time.Millisecond)
+
+ _, e = testFetchOPC(r, http.StatusOK, rangesim.Body, map[string]string{"status": "rhit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestObjectProxyCacheRevalidated(t *testing.T) {
+
+ const dt = "Sun, 16 Jun 2019 14:19:04 GMT"
+
+ hdr := map[string]string{
+ headers.NameCacheControl: headers.ValueMaxAge + "=2",
+ headers.NameLastModified: dt,
+ }
+ ts, _, r, rsc, err := setupTestHarnessOPC("", "test", http.StatusOK, hdr)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ rsc.PathConfig.ResponseHeaders = hdr
+
+ _, e := testFetchOPC(r, http.StatusOK, "test", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameIfModifiedSince, dt)
+ _, e = testFetchOPC(r, http.StatusNotModified, "", map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestObjectProxyCacheRequestNegativeCache(t *testing.T) {
+
+ ts, _, r, rsc, err := setupTestHarnessOPC("", "test", http.StatusNotFound, nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ pc := config.NewPathConfig()
+ cfg := rsc.OriginConfig
+ cfg.Paths = map[string]*config.PathConfig{
+ "/": pc,
+ }
+ r = r.WithContext(tc.WithResources(r.Context(), request.NewResources(cfg, pc, rsc.CacheConfig, rsc.CacheClient, rsc.OriginClient)))
+
+ _, e := testFetchOPC(r, http.StatusNotFound, "test", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // request again, should still cache miss, but this time, put 404's into the Negative Cache for 30s
+ cfg.NegativeCache[404] = time.Second * 30
+
+ _, e = testFetchOPC(r, http.StatusNotFound, "test", map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // request again, this time it should be a cache hit.
+ _, e = testFetchOPC(r, http.StatusNotFound, "test", map[string]string{"status": "nchit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+}
+
+func TestHandleCacheRevalidation(t *testing.T) {
+
+ ts, _, r, _, err := setupTestHarnessOPC("", "test", http.StatusNotFound, nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ pr := newProxyRequest(r, nil)
+ pr.cacheStatus = status.LookupStatusRangeMiss
+ pr.cachingPolicy = &CachingPolicy{}
+
+ err = handleCacheRevalidation(pr)
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func getExpectedRangeBody(r *http.Request, boundary string) (string, error) {
+
+ client := &http.Client{}
+ resp, err := client.Do(r)
+ if err != nil {
+ return "", err
+ }
+ b, _ := ioutil.ReadAll(resp.Body)
+ expectedBody := string(b)
+
+ if boundary != "" {
+ expectedBody = strings.Replace(expectedBody, "TestRangeServerBoundary", boundary, -1)
+ }
+
+ return expectedBody, nil
+}
+
+func TestRangesExhaustive(t *testing.T) {
+
+ ts, _, r, rsc, err := setupTestHarnessOPCRange(nil)
+ if err != nil {
+ t.Error(err)
+ }
+ defer ts.Close()
+
+ rsc.OriginConfig.RevalidationFactor = 2
+ rsc.OriginConfig.DearticulateUpstreamRanges = true
+
+ r.URL.Path = "/opc/test/1"
+ r.Header.Set(headers.NameRange, "bytes=0-6,25-32")
+ req := r.Clone(context.Background())
+ expectedBodyA, err := getExpectedRangeBody(req, "563a7014513fc6f0cbb4e8632dd107fc")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e := testFetchOPC(r, http.StatusPartialContent, expectedBodyA, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=0-10,20-28")
+ req = r.Clone(context.Background())
+ expectedBody, err := getExpectedRangeBody(req, "33f2477458123b02034bfbe20c52d949")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=0-6")
+ req = r.Clone(context.Background())
+ expectedBody, err = getExpectedRangeBody(req, "33f2477458123b02034bfbe20c52d949")
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=5-7")
+ req = r.Clone(context.Background())
+ expectedBody, err = getExpectedRangeBody(req, "33f2477458123b02034bfbe20c52d949")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=29-29")
+ req = r.Clone(context.Background())
+ expectedBody, err = getExpectedRangeBody(req, "33f2477458123b02034bfbe20c52d949")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=9-22,28-60")
+ req = r.Clone(context.Background())
+ expectedBody, err = getExpectedRangeBody(req, "1fd80b6b357b4608027dd500ad3f3c21")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Del(headers.NameRange)
+ req = r.Clone(context.Background())
+ expectedBody, err = getExpectedRangeBody(req, "1fd80b6b357b4608027dd500ad3f3c21")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusOK, expectedBody, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=0-10,20-28")
+ req = r.Clone(context.Background())
+ expectedBody, err = getExpectedRangeBody(req, "33f2477458123b02034bfbe20c52d949")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=0-6")
+ req = r.Clone(context.Background())
+ expectedBody, err = getExpectedRangeBody(req, "33f2477458123b02034bfbe20c52d949")
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody, map[string]string{"status": "hit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // Test Range Revalidiations
+
+ rsc.PathConfig.ResponseHeaders = map[string]string{headers.NameCacheControl: "max-age=1"}
+
+ r.URL.Path = "/opc/test/2"
+ r.Header.Set(headers.NameRange, "bytes=0-6")
+ req = r.Clone(context.Background())
+ expectedBody1, err := getExpectedRangeBody(req, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody1, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/3"
+ r.Header.Set(headers.NameRange, "bytes=0-6, 8-10")
+ req = r.Clone(context.Background())
+ expectedBody2, err := getExpectedRangeBody(req, "1b4e59d25d723e317359c5e542d80f5c")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody2, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/4"
+ r.Header.Set(headers.NameRange, "bytes=0-6, 8-10")
+ req = r.Clone(context.Background())
+ expectedBody3, err := getExpectedRangeBody(req, "1b4e59d25d723e317359c5e542d80f5c")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody3, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/5"
+ r.Header.Set(headers.NameRange, "bytes=6-20")
+ req = r.Clone(context.Background())
+ expectedBody4, err := getExpectedRangeBody(req, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody4, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/6"
+ r.Header.Set(headers.NameRange, "bytes=6-20")
+ req = r.Clone(context.Background())
+ expectedBody5, err := getExpectedRangeBody(req, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody5, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/7"
+ r.Header.Set(headers.NameRange, "bytes=6-20")
+ req = r.Clone(context.Background())
+ expectedBody6, err := getExpectedRangeBody(req, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody6, map[string]string{"status": "kmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ // Now sleep to let them expire but not purge
+ time.Sleep(1050 * time.Millisecond)
+
+ // Now make more requests that require a revalidation first.
+
+ r.URL.Path = "/opc/test/2"
+ r.Header.Set(headers.NameRange, "bytes=0-6")
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody1, map[string]string{"status": "rhit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/3"
+ r.Header.Set(headers.NameRange, "bytes=0-6, 8-10")
+ expectedBody2 = strings.Replace(expectedBody2, "TestRangeServerBoundary", "1b4e59d25d723e317359c5e542d80f5c", -1)
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody2, map[string]string{"status": "rhit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/4"
+ r.Header.Set(headers.NameRange, "bytes=5-9")
+ req = r.Clone(context.Background())
+ expectedBody3, err = getExpectedRangeBody(req, "1b4e59d25d723e317359c5e542d80f5c")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody3, map[string]string{"status": "phit"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/5"
+ r.Header.Set(headers.NameRange, "bytes=0-5")
+ req = r.Clone(context.Background())
+ expectedBody4, err = getExpectedRangeBody(req, "")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody4, map[string]string{"status": "rmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/6"
+ r.Header.Set(headers.NameRange, "bytes=0-5,21-30")
+ req = r.Clone(context.Background())
+ expectedBody5, err = getExpectedRangeBody(req, "d51d39834c9650e17cc486c4a52cf572")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody5, map[string]string{"status": "rmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ r.URL.Path = "/opc/test/7"
+ r.Header.Set(headers.NameRange, "bytes=22-30,32-40")
+ req = r.Clone(context.Background())
+ expectedBody6, err = getExpectedRangeBody(req, "bab29463882afe6d6033e88dc74d2bdd")
+ if err != nil {
+ t.Error(err)
+ }
+ _, e = testFetchOPC(r, http.StatusPartialContent, expectedBody6, map[string]string{"status": "rmiss"})
+ for _, err = range e {
+ t.Error(err)
+ }
+
+ //rsc.OriginConfig.DearticulateUpstreamRanges = b
+
+ /*
+ √ curl -v --output - -H "Range: bytes=0-6,25-32" http://127.0.0.1:9091/rpc1/testing && \
+ √ curl -v --output - -H "Range: " http://127.0.0.1:9091/rpc1/testing
+ √ curl -v --output - -H "Range: bytes=0-6" http://127.0.0.1:9091/rpc1/testing && \
+ √ curl -v --output - -H "Range: bytes=5-7" http://127.0.0.1:9091/rpc1/testing && \
+ √ curl -v --output - -H "Range: bytes=29-29" http://127.0.0.1:9091/rpc1/testing && \
+ √ curl -v --output - -H "Range: bytes=9-22,28-60" http://127.0.0.1:9091/rpc1/testing && \
+ curl -v --output - -H "Range: bytes=0-6" http://127.0.0.1:9091/rpc1/testing && \
+ curl -v --output - -H "Range: bytes=0-6,10-20" http://127.0.0.1:9091/rpc1/testing && \
+ curl -v --output - http://127.0.0.1:9091/rpc1/testing && \
+ curl -v --output - -H "Range: bytes=0-6, 10-19" http://127.0.0.1:9091/rpc1/testing && \
+ curl -v --output - -H "Range: bytes=0-6,10-20" http://127.0.0.1:9091/rpc1/testing && \
+ curl -v --output - http://127.0.0.1:9091/rpc1/testing
+ curl -v --output - -H "Range: bytes=0-6,7-1220" http://127.0.0.1:9091/rpc1/testing2 && \
+ curl -v --output - http://127.0.0.1:9091/rpc1/testing2 && \
+ curl -v --output - http://127.0.0.1:9091/rpc1/testing2
+ curl -v --output - -H "Range: bytes=0-6" http://127.0.0.1:9091/rpc1/testing3 && \
+ curl -v --output - -H "Range: bytes=5-20" http://127.0.0.1:9091/rpc1/testing3
+ curl -v --output - -H "Range: bytes=5-20" http://127.0.0.1:9091/rpc1/testing4 && \
+ curl -v --output - -H "Range: bytes=0-6" http://127.0.0.1:9091/rpc1/testing4
+
+ curl -v --output - -H "Range: bytes=0-6" http://127.0.0.1:9091/rpc1/testing && \
+ sleep 6 && curl -v --output - -H "Range: bytes=7-7" http://127.0.0.1:9091/rpc1/testing && \
+ curl -v --output - -H "Range: bytes=0-6" http://127.0.0.1:9091/rpc1/testing
+
+ */
+
+}
+
+func testFetchOPC(r *http.Request, sc int, body string, match map[string]string) (*httptest.ResponseRecorder, []error) {
+
+ e := make([]error, 0)
+
+ w := httptest.NewRecorder()
+
+ ObjectProxyCacheRequest(w, r)
+ resp := w.Result()
+
+ err := testStatusCodeMatch(resp.StatusCode, sc)
+ if err != nil {
+ e = append(e, err)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ e = append(e, err)
+ }
+
+ err = testStringMatch(string(bodyBytes), body)
+ if err != nil {
+ e = append(e, err)
+ }
+
+ err = testResultHeaderPartMatch(resp.Header, match)
+ if err != nil {
+ e = append(e, err)
+ }
+
+ if len(e) == 0 {
+ e = nil
+ }
+
+ return w, e
+
+}
diff --git a/internal/proxy/engines/progressive_collapse_forwarder.go b/internal/proxy/engines/progressive_collapse_forwarder.go
new file mode 100644
index 000000000..c47573a04
--- /dev/null
+++ b/internal/proxy/engines/progressive_collapse_forwarder.go
@@ -0,0 +1,195 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "sync"
+ "sync/atomic"
+)
+
+// ErrReadIndexTooLarge is an error indicating the read index is too large
+var ErrReadIndexTooLarge = errors.New("Read index too large")
+
+// NEED TO DEAL WITH TIMEOUT
+
+// IndexReader implements a reader to read data at a specific index into slice b
+type IndexReader func(index uint64, b []byte) (int, error)
+
+// ProgressiveCollapseForwarder accepts data written through the io.Writer interface, caches it and
+// makes all the data written available to n readers. The readers can request data at index i,
+// to which the PCF may block or return the data immediately.
+type ProgressiveCollapseForwarder interface {
+ AddClient(io.Writer) error
+ Write([]byte) (int, error)
+ Close()
+ IndexRead(uint64, []byte) (int, error)
+ WaitServerComplete()
+ WaitAllComplete()
+ GetBody() ([]byte, error)
+ GetResp() *http.Response
+}
+
+type progressiveCollapseForwarder struct {
+ resp *http.Response
+ rIndex uint64
+ dataIndex uint64
+ data [][]byte
+ dataLen uint64
+ dataStore []byte
+ dataStoreLen uint64
+ readCond *sync.Cond
+ serverReadDone int32
+ clientWaitgroup *sync.WaitGroup
+ serverWaitCond *sync.Cond
+}
+
+// NewPCF returns a new instance of a ProgressiveCollapseForwarder
+func NewPCF(resp *http.Response, contentLength int64) ProgressiveCollapseForwarder {
+ // This contiguous block of memory is just an underlying byte store, references by the slices defined in refs
+ // Thread safety is provided through a read index, an atomic, which the writer must exceed and readers may not exceed
+ // This effectively limits the readers and writer to separate areas in memory.
+ dataStore := make([]byte, contentLength)
+ refs := make([][]byte, ((contentLength/HTTPBlockSize)*2)+1)
+
+ var wg sync.WaitGroup
+ sd := sync.NewCond(&sync.Mutex{})
+ rc := sync.NewCond(&sync.Mutex{})
+
+ pfc := &progressiveCollapseForwarder{
+ resp: resp,
+ rIndex: 0,
+ dataIndex: 0,
+ data: refs,
+ dataLen: uint64(len(refs)),
+ dataStore: dataStore,
+ dataStoreLen: uint64(contentLength),
+ readCond: rc,
+ serverReadDone: 0,
+ clientWaitgroup: &wg,
+ serverWaitCond: sd,
+ }
+
+ return pfc
+}
+
+// AddClient adds an io.Writer client to the ProgressiveCollapseForwarder
+// This client will read all the cached data and read from the live edge if caught up.
+func (pfc *progressiveCollapseForwarder) AddClient(w io.Writer) error {
+ pfc.clientWaitgroup.Add(1)
+ var readIndex uint64
+ var err error
+ remaining := 0
+ n := 0
+ buf := make([]byte, HTTPBlockSize)
+
+ for {
+ n, err = pfc.IndexRead(readIndex, buf)
+ if n > 0 {
+ // Handle the data returned by the read index > HTTPBlockSize
+ if n > HTTPBlockSize {
+ remaining = n
+ for {
+ if remaining > HTTPBlockSize {
+ w.Write(buf[0:HTTPBlockSize])
+ remaining -= HTTPBlockSize
+ } else {
+ w.Write(buf[0:remaining])
+ break
+ }
+ }
+ } else {
+ w.Write(buf[0:n])
+ }
+ readIndex++
+ }
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ break
+ }
+ }
+ pfc.clientWaitgroup.Done()
+ return io.EOF
+}
+
+// WaitServerComplete blocks until the object has been retrieved from the origin server
+// Need to get payload before can send to actual cache
+func (pfc *progressiveCollapseForwarder) WaitServerComplete() {
+ pfc.serverWaitCond.Wait()
+}
+
+// WaitAllComplete will wait till all clients have completed or timedout
+// Need to no abandon goroutines
+func (pfc *progressiveCollapseForwarder) WaitAllComplete() {
+ pfc.clientWaitgroup.Wait()
+}
+
+// GetBody returns the underlying body of the data written into a PCF
+func (pfc *progressiveCollapseForwarder) GetBody() ([]byte, error) {
+ if atomic.LoadInt32(&pfc.serverReadDone) == 0 {
+ return nil, errors.New("Server request not completed")
+ }
+ return pfc.dataStore[0:pfc.dataIndex], nil
+}
+
+// GetResp returns the response from the original request
+func (pfc *progressiveCollapseForwarder) GetResp() *http.Response {
+ return pfc.resp
+}
+
+// Write writes the data in b to the ProgressiveCollapseForwarders data store,
+// adds a reference to that data, and increments the read index.
+func (pfc *progressiveCollapseForwarder) Write(b []byte) (int, error) {
+ n := atomic.LoadUint64(&pfc.rIndex)
+ l := uint64(len(b))
+ if pfc.dataIndex+l > pfc.dataStoreLen || n > pfc.dataLen {
+ return 0, io.ErrShortWrite
+ }
+ pfc.data[n] = pfc.dataStore[pfc.dataIndex : pfc.dataIndex+l]
+ copy(pfc.data[n], b)
+ pfc.dataIndex += l
+ atomic.AddUint64(&pfc.rIndex, 1)
+ pfc.readCond.Broadcast()
+ return len(b), nil
+}
+
+// Close signals all things waiting on the server response body to complete.
+// This should be triggered by the client io.EOF
+func (pfc *progressiveCollapseForwarder) Close() {
+ atomic.AddInt32(&pfc.serverReadDone, 1)
+ pfc.serverWaitCond.Signal()
+ pfc.readCond.Broadcast()
+}
+
+// Read will return the given index data requested by the read is behind the PCF readindex, else blocks and waits for the data
+func (pfc *progressiveCollapseForwarder) IndexRead(index uint64, b []byte) (int, error) {
+ i := atomic.LoadUint64(&pfc.rIndex)
+ if index >= i {
+ // need to check completion and return io.EOF
+ if index > pfc.dataLen {
+ return 0, ErrReadIndexTooLarge
+ } else if atomic.LoadInt32(&pfc.serverReadDone) != 0 {
+ return 0, io.EOF
+ }
+ pfc.readCond.L.Lock()
+ pfc.readCond.Wait()
+ pfc.readCond.L.Unlock()
+ }
+ copy(b, pfc.data[index])
+ return len(pfc.data[index]), nil
+}
diff --git a/internal/proxy/engines/progressive_collapse_forwarder_test.go b/internal/proxy/engines/progressive_collapse_forwarder_test.go
new file mode 100644
index 000000000..509123cad
--- /dev/null
+++ b/internal/proxy/engines/progressive_collapse_forwarder_test.go
@@ -0,0 +1,263 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "bytes"
+ "io"
+ "net/http"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+var testString = "Hey, I'm an http response body string."
+
+func TestPCFReadWriteSingle(t *testing.T) {
+ w := bytes.NewBuffer(make([]byte, 0, len(testString)))
+ r := strings.NewReader(testString)
+ l := len(testString)
+ resp := &http.Response{}
+
+ pcf := NewPCF(resp, int64(l))
+ var n int64
+ go func() {
+ n, _ = io.Copy(pcf, r)
+ pcf.Close()
+ }()
+ pcf.AddClient(w)
+
+ if n != int64(l) {
+ t.Errorf("PCF could not copy full length of reader")
+ }
+
+ if w.String() != testString {
+ t.Errorf("PCF result was not correct, expected: \"%s\" (Len: %d), got: \"%s\" (Len: %d)", testString, len(testString), w.String(), len(w.String()))
+ }
+}
+
+func TestPCFReadWriteMultiple(t *testing.T) {
+ w := bytes.NewBuffer(make([]byte, 0, len(testString)))
+ w1 := bytes.NewBuffer(make([]byte, 0, len(testString)))
+
+ r := strings.NewReader(testString)
+ l := len(testString)
+ resp := &http.Response{}
+
+ pcf := NewPCF(resp, int64(l))
+ var n int64
+ go func() {
+ n, _ = io.Copy(pcf, r)
+ pcf.Close()
+ }()
+ pcf.AddClient(w)
+ pcf.AddClient(w1)
+
+ if n != int64(l) {
+ t.Errorf("PCF could not copy full length of reader")
+ }
+
+ if w.String() != testString {
+ t.Errorf("PCF result was not correct, expected: \"%s\" (Len: %d), got: \"%s\" (Len: %d)", testString, len(testString), w.String(), len(w.String()))
+ }
+
+ if w1.String() != testString {
+ t.Errorf("PCF second client result was not correct, expected: \"%s\" (Len: %d), got: \"%s\" (Len: %d)", testString, len(testString), w1.String(), len(w1.String()))
+ }
+}
+
+func TestPCFReadWriteGetBody(t *testing.T) {
+ w := bytes.NewBuffer(make([]byte, 0, len(testString)))
+ r := strings.NewReader(testString)
+ l := len(testString)
+ resp := &http.Response{}
+
+ pcf := NewPCF(resp, int64(l))
+ var n int64
+
+ _, err := pcf.GetBody()
+ if err == nil {
+ t.Errorf("PCF expected an error on an unwritten body")
+ }
+
+ go func() {
+ n, _ = io.Copy(pcf, r)
+ pcf.Close()
+ }()
+ pcf.AddClient(w)
+
+ if n != int64(l) {
+ t.Errorf("PCF could not copy full length of reader")
+ }
+
+ if w.String() != testString {
+ t.Errorf("PCF result was not correct, expected: \"%s\" (Len: %d), got: \"%s\" (Len: %d)", testString, len(testString), w.String(), len(w.String()))
+ }
+
+ body, err := pcf.GetBody()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(body) != testString {
+ t.Errorf("PCF result was not correct, expected: \"%s\" (Len: %d), got: \"%s\" (Len: %d)", testString, len(testString), string(body), len(body))
+ }
+}
+
+func TestPCFReadWriteClose(t *testing.T) {
+ w := bytes.NewBuffer(make([]byte, 0, len(testString)))
+ r := strings.NewReader(testString)
+ l := len(testString)
+ resp := &http.Response{}
+
+ pcf := NewPCF(resp, int64(l))
+ buf := make([]byte, 2)
+ n, _ := r.Read(buf)
+ pcf.Write(buf)
+ pcf.Close()
+ err := pcf.AddClient(w)
+
+ if err != io.EOF {
+ t.Errorf("PCF Close call did not return io.EOF")
+
+ }
+
+ if n != 2 {
+ t.Errorf("PCF Close read length incorrect, expected 2, got %d", n)
+ }
+}
+
+func TestPCFIndexReadTooLarge(t *testing.T) {
+ r := strings.NewReader(testString)
+ l := len(testString)
+ resp := &http.Response{}
+
+ pcf := NewPCF(resp, int64(l))
+ buf := make([]byte, 2)
+ r.Read(buf)
+ pcf.Write(buf)
+ pcf.Close()
+
+ _, err := pcf.IndexRead(12412, buf)
+
+ if err != ErrReadIndexTooLarge {
+ t.Errorf("PCF did not return ErrReadIndexTooLarge, got %e", err)
+ }
+}
+
+func TestPCFReadLarge(t *testing.T) {
+ r := bytes.NewBuffer(make([]byte, 64000))
+ w := bytes.NewBuffer(make([]byte, 64000))
+ l := r.Len()
+ resp := &http.Response{}
+
+ pcf := NewPCF(resp, int64(l))
+ var n int64
+ go func() {
+ n, _ = io.Copy(pcf, r)
+ pcf.Close()
+ }()
+ pcf.AddClient(w)
+
+ if n != int64(l) {
+ t.Errorf("PCF could not copy full length of reader")
+ }
+
+ if bytes.Equal(r.Bytes(), w.Bytes()) {
+ t.Errorf("PCF result was not correct, expected: \"%s\" (Len: %d), got: \"%s\" (Len: %d)", testString, len(testString), w.String(), len(w.String()))
+ }
+}
+
+func TestPCFResp(t *testing.T) {
+ resp := &http.Response{}
+
+ pcf := NewPCF(resp, 10)
+
+ if !reflect.DeepEqual(resp, pcf.GetResp()) {
+ t.Errorf("PCF GetResp failed to reproduce the original http response.")
+ }
+}
+
+func BenchmarkPCFWrite(b *testing.B) {
+ // 100MB object, simulated actual usecase sizes.
+ b.N = 3200
+
+ testBytes := make([]byte, 32*1024)
+ l := b.N * 32 * 1024
+ resp := &http.Response{}
+
+ pcf := NewPCF(resp, int64(l))
+ b.SetBytes(32 * 1024)
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ pcf.Write(testBytes)
+ }
+}
+
+func BenchmarkPCFRead(b *testing.B) {
+ b.N = 3200
+
+ testBytes := make([]byte, 32*1024)
+ readBuf := make([]byte, 32*1024)
+
+ l := b.N * 32 * 1024
+ resp := &http.Response{}
+
+ var readIndex uint64
+ var err error
+
+ pcf := NewPCF(resp, int64(l))
+ b.SetBytes(32 * 1024)
+ for i := 0; i < b.N; i++ {
+ pcf.Write(testBytes)
+ }
+
+ b.ResetTimer()
+
+ for n := 0; n < b.N; n++ {
+ _, err = pcf.IndexRead(readIndex, readBuf)
+ readIndex++
+ if err != nil {
+ break
+ }
+ }
+}
+
+func BenchmarkPCFWriteRead(b *testing.B) {
+ b.N = 3200
+
+ testBytes := make([]byte, 32*1024)
+ readBuf := make([]byte, 32*1024)
+
+ l := b.N * 32 * 1024
+ resp := &http.Response{}
+
+ var readIndex uint64
+ var err error
+
+ pcf := NewPCF(resp, int64(l))
+ b.SetBytes(32 * 1024)
+
+ b.ResetTimer()
+
+ for n := 0; n < b.N; n++ {
+ pcf.Write(testBytes)
+ _, err = pcf.IndexRead(readIndex, readBuf)
+ readIndex++
+ if err != nil {
+ break
+ }
+ }
+}
diff --git a/internal/proxy/engines/proxy_request.go b/internal/proxy/engines/proxy_request.go
new file mode 100644
index 000000000..8c2ca7434
--- /dev/null
+++ b/internal/proxy/engines/proxy_request.go
@@ -0,0 +1,661 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+ "github.com/Comcast/trickster/internal/config"
+ tctx "github.com/Comcast/trickster/internal/proxy/context"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/ranges/byterange"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/util/log"
+)
+
+type proxyRequest struct {
+ *http.Request
+ responseWriter io.Writer
+ responseBody []byte
+
+ upstreamRequest *http.Request
+ upstreamResponse *http.Response
+ upstreamReader io.Reader
+
+ // for parallel requests
+ originRequests []*http.Request
+ originResponses []*http.Response
+ originReaders []io.ReadCloser
+
+ revalidationRequest *http.Request
+ revalidationResponse *http.Response
+ revalidationReader io.ReadCloser
+
+ cacheDocument *HTTPDocument
+ cacheBuffer *bytes.Buffer
+
+ key string
+ started time.Time
+ elapsed time.Duration
+ cacheStatus status.LookupStatus
+ writeToCache bool
+
+ wantsRanges bool
+ wantedRanges byterange.Ranges
+ neededRanges byterange.Ranges
+ rangeParts byterange.MultipartByteRanges
+
+ isPartialResponse bool
+ contentLength int64
+ revalidation RevalidationStatus
+ wasReconstituted bool
+ trueContentType string
+
+ collapsedForwarder ProgressiveCollapseForwarder
+ cachingPolicy *CachingPolicy
+}
+
+// newProxyRequest accepts the original inbound HTTP Request and Response
+// and returns a proxyRequest object
+func newProxyRequest(r *http.Request, w io.Writer) *proxyRequest {
+
+ pr := &proxyRequest{
+ Request: r,
+ upstreamRequest: r.Clone(context.Background()),
+ contentLength: -1,
+ responseWriter: w,
+ started: time.Now(),
+ }
+
+ rsc := request.GetResources(r)
+ pr.upstreamRequest = pr.upstreamRequest.WithContext(tctx.WithResources(pr.upstreamRequest.Context(), rsc))
+
+ return pr
+}
+
+func (pr *proxyRequest) Clone() *proxyRequest {
+ return &proxyRequest{
+ Request: pr.Request.Clone(context.Background()),
+ cacheDocument: pr.cacheDocument,
+ key: pr.key,
+ cacheStatus: pr.cacheStatus,
+ writeToCache: pr.writeToCache,
+ wantsRanges: pr.wantsRanges,
+ wantedRanges: pr.wantedRanges,
+ neededRanges: pr.neededRanges,
+ rangeParts: pr.rangeParts,
+ collapsedForwarder: pr.collapsedForwarder,
+ cachingPolicy: pr.cachingPolicy,
+ revalidation: pr.revalidation,
+ isPartialResponse: pr.isPartialResponse,
+ started: time.Now(),
+ }
+}
+
+// Fetch makes an HTTP request to the provided Origin URL, bypassing the Cache, and returns the
+// response and elapsed time to the caller.
+func (pr *proxyRequest) Fetch() ([]byte, *http.Response, time.Duration) {
+
+ rsc := request.GetResources(pr.Request)
+ oc := rsc.OriginConfig
+ pc := rsc.PathConfig
+
+ var handlerName string
+ if pc != nil {
+ handlerName = pc.HandlerName
+ }
+
+ start := time.Now()
+ reader, resp, _ := PrepareFetchReader(pr.Request)
+
+ var body []byte
+ var err error
+ if reader != nil {
+ body, err = ioutil.ReadAll(reader)
+ resp.Body.Close()
+ }
+ if err != nil {
+ log.Error("error reading body from http response", log.Pairs{"url": pr.URL.String(), "detail": err.Error()})
+ return []byte{}, resp, 0
+ }
+
+ elapsed := time.Since(start) // includes any time required to decompress the document for deserialization
+
+ if config.Logging.LogLevel == "debug" || config.Logging.LogLevel == "trace" {
+ go logUpstreamRequest(oc.Name, oc.OriginType, handlerName, pr.Method, pr.URL.String(), pr.UserAgent(), resp.StatusCode, len(body), elapsed.Seconds())
+ }
+
+ return body, resp, elapsed
+}
+
+func (pr *proxyRequest) prepareRevalidationRequest() {
+
+ pr.revalidation = RevalStatusInProgress
+ pr.revalidationRequest = request.SetResources(pr.upstreamRequest.Clone(context.Background()), request.GetResources(pr.Request))
+
+ if pr.cacheStatus == status.LookupStatusPartialHit {
+ var rh string
+ d := pr.cacheDocument
+ cl := d.ContentLength
+
+ rsc := request.GetResources(pr.Request)
+ // revalRanges are the ranges we have in cache that have expired, but the user needs
+ // so we revalidate these ranges in parallel with fetching of the uncached ranges
+
+ var wr byterange.Ranges
+
+ if pr.wantedRanges != nil && len(pr.wantedRanges) > 0 {
+ wr = pr.wantedRanges
+ } else {
+ wr = byterange.Ranges{{Start: 0, End: cl}}
+ }
+
+ revalRanges := wr.CalculateDelta(pr.neededRanges, cl)
+ l := len(revalRanges)
+ if (l > 1 && rsc.OriginConfig.DearticulateUpstreamRanges) && len(pr.cacheDocument.Ranges) == 1 {
+ rh = pr.cacheDocument.Ranges.String()
+ } else if l == 1 {
+ rh = revalRanges.String()
+ }
+
+ if rh != "" {
+ pr.revalidationRequest.Header.Set(headers.NameRange, rh)
+ } else {
+ pr.revalidationRequest.Header.Del(headers.NameRange)
+ }
+
+ }
+
+ if pr.cachingPolicy.ETag != "" {
+ pr.revalidationRequest.Header.Set(headers.NameIfNoneMatch, pr.cachingPolicy.ETag)
+ }
+ if !pr.cachingPolicy.LastModified.IsZero() {
+ pr.revalidationRequest.Header.Set(headers.NameIfModifiedSince, pr.cachingPolicy.LastModified.Format(time.RFC1123))
+ }
+
+}
+
+func (pr *proxyRequest) setRangeHeader(h http.Header) {
+ if pr.neededRanges != nil && len(pr.neededRanges) > 0 {
+ pr.cachingPolicy.IsFresh = false
+ h.Set(headers.NameRange, pr.neededRanges.String())
+ }
+}
+
+func (pr *proxyRequest) prepareUpstreamRequests() {
+
+ pr.setRangeHeader(pr.upstreamRequest.Header)
+
+ pr.stripConditionalHeaders()
+ rsc := request.GetResources(pr.Request)
+ if pr.originRequests == nil {
+ var l int
+ if pr.neededRanges == nil {
+ l = 1
+ } else {
+ l = len(pr.neededRanges)
+ }
+ pr.originRequests = make([]*http.Request, 0, l)
+ }
+
+ rsc.OriginConfig.DearticulateUpstreamRanges = true
+ // if we are articulating the origin range requests, break those out here
+ if pr.neededRanges != nil && len(pr.neededRanges) > 0 && rsc.OriginConfig.DearticulateUpstreamRanges {
+ for _, r := range pr.neededRanges {
+ req := request.SetResources(pr.upstreamRequest.Clone(context.Background()), rsc)
+ req.Header.Set(headers.NameRange, "bytes="+r.String())
+ pr.originRequests = append(pr.originRequests, req)
+ }
+ } else { // otherwise it will just be a list of one request.
+ pr.originRequests = []*http.Request{pr.upstreamRequest}
+ }
+}
+
+func (pr *proxyRequest) makeUpstreamRequests() error {
+
+ wg := sync.WaitGroup{}
+
+ if pr.revalidationRequest != nil {
+ wg.Add(1)
+ go func() {
+ pr.revalidationReader, pr.revalidationResponse, _ = PrepareFetchReader(pr.revalidationRequest)
+ wg.Done()
+ }()
+ }
+
+ if pr.originRequests != nil && len(pr.originRequests) > 0 {
+ pr.originResponses = make([]*http.Response, len(pr.originRequests))
+ pr.originReaders = make([]io.ReadCloser, len(pr.originRequests))
+ for i := range pr.originRequests {
+ wg.Add(1)
+ go func(j int) {
+ pr.originReaders[j], pr.originResponses[j], _ = PrepareFetchReader(pr.originRequests[j])
+ wg.Done()
+ }(i)
+ }
+ }
+
+ wg.Wait()
+
+ return nil
+}
+
+func (pr *proxyRequest) checkCacheFreshness() bool {
+ cp := pr.cachingPolicy
+ if pr.cachingPolicy == nil {
+ return false
+ }
+ cp.IsFresh = !cp.LocalDate.Add(time.Duration(cp.FreshnessLifetime) * time.Second).Before(time.Now())
+ return cp.IsFresh
+}
+
+func (pr *proxyRequest) parseRequestRanges() bool {
+ // handle byte range requests
+ var out byterange.Ranges
+ if _, ok := pr.Header[headers.NameRange]; ok {
+ out = byterange.ParseRangeHeader(pr.Header.Get(headers.NameRange))
+ }
+ pr.wantsRanges = out != nil && len(out) > 0
+ pr.wantedRanges = out
+
+ // if the client shouldn't support multipart ranges, force a full range
+ rsc := request.GetResources(pr.Request)
+ if rsc.OriginConfig.MultipartRangesDisabled && len(pr.wantedRanges) > 1 {
+ pr.upstreamRequest.Header.Del(headers.NameRange)
+ pr.wantsRanges = false
+ pr.wantedRanges = nil
+ }
+
+ return pr.wantsRanges
+}
+
+func (pr *proxyRequest) stripConditionalHeaders() {
+ // don't proxy these up, their scope is only between Trickster and client
+ if pr.cachingPolicy != nil && pr.cachingPolicy.IsClientConditional {
+ stripConditionalHeaders(pr.upstreamRequest.Header)
+ }
+}
+
+func (pr *proxyRequest) writeResponseHeader() {
+ headers.SetResultsHeader(pr.upstreamResponse.Header, "ObjectProxyCache", pr.cacheStatus.String(), "", nil)
+}
+
+func (pr *proxyRequest) setBodyWriter() {
+
+ PrepareResponseWriter(pr.responseWriter, pr.upstreamResponse.StatusCode, pr.upstreamResponse.Header)
+
+ if pr.writeToCache && pr.cacheBuffer == nil {
+ pr.cacheBuffer = &bytes.Buffer{}
+
+ if pr.cachingPolicy.IsClientFresh {
+ // don't write response body to the client on a 304 Not Modified
+ pr.responseWriter = pr.cacheBuffer
+ if pr.upstreamResponse.StatusCode == http.StatusNotModified {
+ pr.upstreamResponse.StatusCode = http.StatusOK
+ }
+ } else {
+ // we need to write to both the client over the wire, and the cache buffer
+ pr.responseWriter = io.MultiWriter(pr.responseWriter, pr.cacheBuffer)
+ }
+ } else if pr.upstreamResponse.StatusCode == http.StatusNotModified {
+ pr.responseWriter = nil
+ }
+}
+
+func (pr *proxyRequest) writeResponseBody() {
+ if pr.upstreamReader == nil || pr.responseWriter == nil {
+ return
+ }
+ io.Copy(pr.responseWriter, pr.upstreamReader)
+
+}
+
+func (pr *proxyRequest) determineCacheability() {
+
+ rsc := request.GetResources(pr.Request)
+ resp := pr.upstreamResponse
+
+ if resp != nil && resp.StatusCode >= 400 {
+ pr.writeToCache = pr.cachingPolicy.IsNegativeCache
+ resp.Header.Del(headers.NameCacheControl)
+ resp.Header.Del(headers.NameExpires)
+ resp.Header.Del(headers.NameLastModified)
+ resp.Header.Del(headers.NameETag)
+ resp.Header.Del(headers.NameContentLength)
+ return
+ }
+
+ if pr.revalidation == RevalStatusLocal {
+
+ tpc := pr.cachingPolicy.Clone()
+ tpc.IfModifiedSinceTime = pr.cacheDocument.CachingPolicy.LastModified
+ tpc.IfNoneMatchValue = pr.cacheDocument.CachingPolicy.ETag
+ tpc.IsClientConditional = true
+ tpc.ResolveClientConditionals(pr.cacheStatus)
+ if !tpc.IsClientFresh {
+ // this this case the range miss becomes a key miss since the old range failed revalidation
+ pr.cacheStatus = status.LookupStatusKeyMiss
+ pr.cacheDocument = nil
+ }
+ }
+
+ if rsc.AlternateCacheTTL > 0 {
+ pr.writeToCache = true
+ pr.cachingPolicy = &CachingPolicy{LocalDate: time.Now(), FreshnessLifetime: int(rsc.AlternateCacheTTL.Seconds())}
+ return
+ }
+
+ if pr.cachingPolicy.NoCache || (!pr.cachingPolicy.CanRevalidate && pr.cachingPolicy.FreshnessLifetime <= 0) {
+ pr.writeToCache = false
+ rsc.CacheClient.Remove(pr.key)
+ // is fresh, and we can cache, can revalidate and the freshness is greater than 0
+ } else if !pr.cachingPolicy.IsFresh {
+ pr.writeToCache = true
+ }
+}
+
+func (pr *proxyRequest) store() error {
+
+ if !pr.writeToCache || pr.cacheDocument == nil {
+ return nil
+ }
+
+ d := pr.cacheDocument
+
+ pr.writeToCache = false // in case store is called again before the object has changed
+
+ d.StoredRangeParts = d.RangeParts.PackableMultipartByteRanges()
+
+ if pr.trueContentType != "" {
+ pr.Header.Del(headers.NameContentType)
+ http.Header(d.Headers).Del(headers.NameContentType)
+ d.ContentType = pr.trueContentType
+ }
+
+ rsc := request.GetResources(pr.Request)
+ oc := rsc.OriginConfig
+
+ rf := oc.RevalidationFactor
+ if rsc.AlternateCacheTTL > 0 {
+ rf = 1
+ }
+
+ d.CachingPolicy = pr.cachingPolicy
+ err := WriteCache(rsc.CacheClient, pr.key, d, pr.cachingPolicy.TTL(rf, oc.MaxTTL), oc.CompressableTypes)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (pr *proxyRequest) updateContentLength() {
+
+ resp := pr.upstreamResponse
+ if resp == nil || pr.responseBody == nil || pr.upstreamResponse.StatusCode > 299 {
+ return
+ }
+
+ resp.Header.Del(headers.NameContentLength)
+ pr.contentLength = int64(len(pr.responseBody))
+ resp.ContentLength = pr.contentLength
+
+ pr.upstreamReader = bytes.NewReader(pr.responseBody)
+}
+
+func (pr *proxyRequest) prepareResponse() {
+
+ pr.cachingPolicy.ResolveClientConditionals(pr.cacheStatus)
+
+ d := pr.cacheDocument
+ resp := pr.upstreamResponse
+
+ // if all of the client conditional headers were satisfied,
+ // return 304
+ if pr.cachingPolicy.IsClientFresh {
+ // 304 on an If-None-Match only applies to GET/HEAD requests
+ // this bit will convert an INM-based 304 to a 412 on non-GET/HEAD
+ if (pr.Method != http.MethodGet && pr.Method != http.MethodHead) &&
+ pr.cachingPolicy.HasIfNoneMatch && !pr.cachingPolicy.IfNoneMatchResult {
+ pr.upstreamResponse.StatusCode = http.StatusPreconditionFailed
+ } else {
+ resp.StatusCode = http.StatusNotModified
+ }
+ pr.responseBody = []byte{}
+ pr.updateContentLength()
+
+ return
+ }
+
+ if pr.wantsRanges && (resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent) {
+
+ // since the user wants ranges, we have to extract them from what we have already
+ if !d.isLoaded &&
+ (pr.cacheStatus == status.LookupStatusPartialHit || pr.cacheStatus == status.LookupStatusKeyMiss || pr.cacheStatus == status.LookupStatusRangeMiss) {
+ var b []byte
+ if pr.upstreamReader != nil {
+ b, _ = ioutil.ReadAll(pr.upstreamReader)
+ }
+ d = DocumentFromHTTPResponse(pr.upstreamResponse, b, pr.cachingPolicy)
+ pr.cacheBuffer = bytes.NewBuffer(b)
+ if pr.writeToCache {
+ d.isLoaded = true
+ pr.cacheDocument = d
+ }
+ }
+
+ // we will need to stitch in a temporary content type header if it is a multipart response,
+ // but need the original content type and length if we are also writing to the cache
+ pr.trueContentType = resp.Header.Get(headers.NameContentType)
+ pr.contentLength = d.ContentLength
+
+ resp.StatusCode = http.StatusPartialContent
+
+ if d.Ranges != nil && len(d.Ranges) > 0 {
+ d.LoadRangeParts()
+ }
+ var h http.Header
+ pr.trueContentType = d.ContentType
+ h, pr.responseBody = d.RangeParts.ExtractResponseRange(pr.wantedRanges, d.ContentLength, d.ContentType, d.Body)
+ headers.Merge(resp.Header, h)
+ pr.upstreamReader = bytes.NewBuffer(pr.responseBody)
+ } else if !pr.wantsRanges {
+ if resp.StatusCode == http.StatusPartialContent {
+ resp.StatusCode = http.StatusOK
+ }
+ resp.Header.Del(headers.NameContentRange)
+ if pr.cacheStatus == status.LookupStatusHit || pr.cacheStatus == status.LookupStatusRevalidated || pr.cacheStatus == status.LookupStatusPartialHit {
+ pr.responseBody = d.Body
+ }
+ }
+
+ pr.updateContentLength()
+
+}
+
+// reconstitute will arrange and process multiple responses so that
+// we have just one response for the initial request
+func (pr *proxyRequest) reconstituteResponses() {
+
+ // rsc1 := request.GetResources(pr.Request)
+
+ hasRevalidationRequest := pr.revalidationRequest != nil
+
+ var wasRevalidated bool
+ if hasRevalidationRequest {
+ pr.upstreamRequest = pr.revalidationRequest
+ pr.upstreamResponse = pr.revalidationResponse
+ pr.upstreamReader = pr.upstreamResponse.Body
+ wasRevalidated = hasRevalidationRequest && pr.revalidationResponse.StatusCode == http.StatusNotModified
+ }
+
+ var originCount int
+ if pr.originRequests != nil {
+ originCount = len(pr.originRequests)
+ }
+
+ var requestCount int
+ if hasRevalidationRequest && !wasRevalidated {
+ requestCount = originCount + 1
+ } else {
+ requestCount = originCount
+ }
+
+ if requestCount == 0 && !hasRevalidationRequest {
+ return
+ }
+ // if we have a revalidation request, and its response is a 200 OK, or is the only upstream request
+ // we will set the primary source response to the revalidation response
+ if hasRevalidationRequest &&
+ (originCount == 0 || pr.revalidationResponse.StatusCode == http.StatusOK) {
+ requestCount = 1
+ } else if (!hasRevalidationRequest || wasRevalidated) && originCount == 1 {
+ // if we only have a single request, and it's a normal originRequest, set that to the response
+ // or if we had a revalidation request that was revalidated, and only one other origin request
+ pr.upstreamRequest = pr.originRequests[0]
+ pr.upstreamResponse = pr.originResponses[0]
+ pr.upstreamReader = pr.originResponses[0].Body
+ requestCount = 1
+ }
+
+ // if the revalidation request 304'd, we actually don't have to do anything else with it here.
+ hasRevalidationRequest = hasRevalidationRequest && !wasRevalidated
+
+ // first pass to handle any potential 200 OKs that should trump all other part-based responses
+ if requestCount > 1 {
+ for i := range pr.originRequests {
+ if pr.originResponses[i].StatusCode == http.StatusOK {
+ pr.upstreamRequest = pr.originRequests[i]
+ pr.upstreamResponse = pr.originResponses[i]
+ pr.upstreamReader = pr.originResponses[i].Body
+ //pr.cachingPolicy = &CachingPolicy{}
+ pr.upstreamResponse.Header.Del(headers.NameContentRange)
+ requestCount = 1
+ break
+ }
+ }
+ }
+
+ // if all requests were 206, we have to reconstitute to a single multipart body
+ pr.wasReconstituted = requestCount > 1
+
+ if pr.wasReconstituted {
+
+ // in this case, we should _not_ use the revalidation request as the base upstreamResponse,
+ // since it could have a 304 not modified as the response, instead of a 200 or 206, and this
+ // point assumes fresh
+
+ pr.upstreamReader = nil
+ pr.upstreamResponse = nil
+
+ appendLock := sync.Mutex{}
+ wg := sync.WaitGroup{}
+ parts := &HTTPDocument{}
+
+ if hasRevalidationRequest {
+ // if one of the parallel requests was a revalidation, it means the part we have in cache has expired.
+ // StatusCode will be: 1) 304 Not Modified (the entire cache is still fresh), 2) 206 Partial Content
+ // (cache is stale, returned range is the user-requested range that was stale cached, ready to serve
+ // fresh from the origin (we already handled the case of a 200 further up)
+ resp := pr.revalidationResponse
+
+ // if it's a 304 Not Modified, just don't do anything, since the cached document is good as-is, and
+ // the new responses below will returned to be merged with the existing cache. so we only check for 206 here.
+ if resp.StatusCode == http.StatusPartialContent {
+ wg.Add(1)
+ go func() {
+ // oh snap. so we have some partial content to merge in, but the original cache document
+ // is now invalid. lets go ahead and reset it.
+ b, _ := ioutil.ReadAll(resp.Body)
+ appendLock.Lock()
+ parts.ParsePartialContentBody(resp, b)
+ appendLock.Unlock()
+ wg.Done()
+ }()
+ }
+ }
+
+ for i := range pr.originRequests {
+ wg.Add(1)
+ go func(j int) {
+ r := pr.originRequests[j]
+ resp := pr.originResponses[j]
+
+ if pr.upstreamResponse == nil {
+ // only set the upstream response
+ appendLock.Lock()
+ if pr.upstreamResponse == nil {
+ pr.upstreamRequest = r
+ pr.upstreamResponse = resp
+ }
+ appendLock.Unlock()
+ }
+
+ if resp.StatusCode == http.StatusPartialContent {
+ b, _ := ioutil.ReadAll(resp.Body)
+ appendLock.Lock()
+ parts.ParsePartialContentBody(resp, b)
+ appendLock.Unlock()
+ }
+ wg.Done()
+ }(i)
+ }
+
+ // all the response bodies are loading in parallel. Wait until they are done.
+ wg.Wait()
+
+ resp := pr.upstreamResponse
+
+ parts.Ranges = parts.RangeParts.Ranges()
+
+ bodyFromParts := false
+ if len(parts.Ranges) > 0 {
+ resp.Header.Del(headers.NameContentRange)
+ pr.trueContentType = parts.ContentType
+ if bodyFromParts = len(parts.Ranges) > 1; !bodyFromParts {
+ err := parts.FulfillContentBody()
+ if bodyFromParts = err != nil; !bodyFromParts {
+ pr.upstreamReader = bytes.NewBuffer(parts.Body)
+ resp.StatusCode = http.StatusOK
+ pr.cacheBuffer = bytes.NewBuffer(parts.Body)
+ }
+ }
+ } else {
+ pr.upstreamReader = bytes.NewBuffer(parts.Body)
+ }
+
+ if bodyFromParts {
+ h, b := parts.RangeParts.Body(parts.ContentLength, parts.ContentType)
+ headers.Merge(resp.Header, h)
+ pr.upstreamReader = bytes.NewBuffer(b)
+ }
+ }
+
+ pr.isPartialResponse = pr.upstreamResponse.StatusCode == http.StatusPartialContent
+
+ // now we merge the caching policy of the new upstreams
+ if pr.upstreamResponse.StatusCode != http.StatusNotModified {
+ rsc := request.GetResources(pr.Request)
+ pr.cachingPolicy.Merge(GetResponseCachingPolicy(pr.upstreamResponse.StatusCode,
+ rsc.OriginConfig.NegativeCache, pr.upstreamResponse.Header))
+
+ }
+
+}
diff --git a/internal/proxy/engines/proxy_request_test.go b/internal/proxy/engines/proxy_request_test.go
new file mode 100644
index 000000000..23acf5a08
--- /dev/null
+++ b/internal/proxy/engines/proxy_request_test.go
@@ -0,0 +1,367 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import (
+ "bytes"
+ "errors"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache/status"
+
+ cr "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/ranges/byterange"
+ "github.com/Comcast/trickster/internal/proxy/request"
+)
+
+func TestCheckCacheFreshness(t *testing.T) {
+
+ // CachingPolicy should be nil and will return false
+ pr := proxyRequest{}
+ if pr.checkCacheFreshness() {
+ t.Errorf("got %t expected %t", pr.checkCacheFreshness(), false)
+ }
+
+}
+
+func TestParseRequestRanges(t *testing.T) {
+
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/", nil)
+ r.Header.Set(headers.NameRange, "bytes=0-10")
+
+ oc := &config.OriginConfig{MultipartRangesDisabled: true}
+ r = request.SetResources(r, request.NewResources(oc, nil, nil, nil, nil))
+
+ pr := proxyRequest{
+ Request: r,
+ upstreamRequest: r,
+ }
+ pr.parseRequestRanges()
+
+ if pr.wantedRanges == nil || len(pr.wantedRanges) < 1 {
+ t.Errorf("unexpected range parse: %v", pr.wantedRanges)
+ }
+
+ r.Header.Set(headers.NameRange, "bytes=0-10,12-20")
+ pr.parseRequestRanges()
+
+ if pr.wantedRanges != nil {
+ t.Errorf("unexpected nil got %s", pr.wantedRanges.String())
+ }
+}
+
+func TestStripConditionalHeaders(t *testing.T) {
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/", nil)
+ r.Header.Set(headers.NameIfNoneMatch, "test")
+ pr := proxyRequest{
+ upstreamRequest: r,
+ cachingPolicy: &CachingPolicy{IsClientConditional: true},
+ }
+ pr.stripConditionalHeaders()
+ if v := r.Header.Get(headers.NameIfNoneMatch); v == "test" {
+ t.Errorf("expected header to be stripped: %s", headers.NameIfNoneMatch)
+ }
+}
+
+func TestSetBodyWriter(t *testing.T) {
+
+ buff := make([]byte, 0)
+ pr := proxyRequest{
+ writeToCache: true,
+ contentLength: -1,
+ responseWriter: bytes.NewBuffer(buff),
+ upstreamResponse: &http.Response{StatusCode: http.StatusOK},
+ cachingPolicy: &CachingPolicy{},
+ }
+
+ PrepareResponseWriter(pr.responseWriter, pr.upstreamResponse.StatusCode, pr.upstreamResponse.Header)
+
+ pr.setBodyWriter()
+ if pr.cacheBuffer == nil {
+ t.Error(errors.New("expected non-nil cacheBody"))
+ }
+
+ pr.cachingPolicy.IsClientFresh = true
+ pr.cacheBuffer = nil
+ pr.upstreamResponse.StatusCode = http.StatusNotModified
+
+ pr.setBodyWriter()
+ if pr.cacheBuffer == nil {
+ t.Error(errors.New("expected non-nil cacheBody"))
+ }
+
+}
+
+func TestWriteResponseBody(t *testing.T) {
+
+ pr := proxyRequest{}
+ pr.writeResponseBody()
+ if pr.responseWriter != nil {
+ t.Error(errors.New("expected nil writer"))
+ }
+}
+
+func TestDetermineCacheability(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1", nil)
+ r = request.SetResources(r, request.NewResources(nil, nil, cache.Configuration(), cache, nil))
+
+ pr := proxyRequest{
+ Request: r,
+ cachingPolicy: &CachingPolicy{NoCache: true, LastModified: time.Unix(1, 0)},
+ writeToCache: true,
+ cacheDocument: &HTTPDocument{
+ CachingPolicy: &CachingPolicy{},
+ },
+ }
+ pr.determineCacheability()
+ if pr.writeToCache {
+ t.Errorf("expected %t got %t", false, pr.writeToCache)
+ }
+
+ pr.revalidation = RevalStatusLocal
+ pr.cacheDocument.CachingPolicy.LastModified = time.Unix(0, 0)
+ pr.cachingPolicy.NoCache = false
+ pr.cachingPolicy.HasIfModifiedSince = true
+ pr.determineCacheability()
+
+ if pr.cacheStatus != status.LookupStatusKeyMiss {
+ t.Errorf("expected %s got %s", status.LookupStatusKeyMiss, pr.cacheStatus)
+ }
+}
+
+func TestStoreNoWrite(t *testing.T) {
+ pr := proxyRequest{}
+ err := pr.store()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestUpdateContentLengthNilResponse(t *testing.T) {
+ pr := proxyRequest{contentLength: -1}
+ pr.updateContentLength()
+ if pr.contentLength != -1 {
+ t.Errorf("expected %d got %d", -1, pr.contentLength)
+ }
+}
+
+func TestPrepareResponse(t *testing.T) {
+
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/", nil)
+ r.Header.Set(headers.NameRange, "bytes=0-10")
+
+ oc := &config.OriginConfig{}
+ r = request.SetResources(r, request.NewResources(oc, nil, nil, nil, nil))
+
+ pr := proxyRequest{
+ Request: r,
+ cachingPolicy: &CachingPolicy{},
+ upstreamResponse: &http.Response{StatusCode: http.StatusOK},
+ cacheDocument: &HTTPDocument{},
+ }
+ pr.parseRequestRanges()
+
+ pr.cacheDocument.Ranges = pr.wantedRanges
+
+ if !pr.wantsRanges || len(pr.wantedRanges) < 1 {
+ t.Errorf("unexpected range parse: %v", pr.wantedRanges)
+ }
+
+ pr.prepareResponse()
+
+ // test again with full body and needed ranges
+ pr.upstreamResponse.StatusCode = http.StatusOK
+ pr.cacheStatus = status.LookupStatusKeyMiss
+ pr.writeToCache = true
+ pr.upstreamReader = bytes.NewBufferString("trickster")
+ headers.Merge(pr.upstreamResponse.Header, http.Header{
+ headers.NameContentRange: {"bytes 0-9"},
+ })
+
+ pr.prepareResponse()
+
+ if pr.upstreamResponse.StatusCode != http.StatusPartialContent {
+ t.Errorf("expected %d got %d", http.StatusPartialContent, pr.upstreamResponse.StatusCode)
+ }
+
+ // test again with no ranges
+ pr.wantsRanges = false
+ pr.wantedRanges = nil
+ pr.prepareResponse()
+
+ //t.Errorf("%s", "foo")
+
+}
+
+func TestPrepareResponsePreconditionFailed(t *testing.T) {
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/", nil)
+ pr := proxyRequest{
+ Request: r,
+ cachingPolicy: &CachingPolicy{
+ IsClientConditional: true,
+ IsClientFresh: true,
+ HasIfNoneMatch: true,
+ IfNoneMatchResult: false,
+ ETag: "1234",
+ IfNoneMatchValue: "1234",
+ },
+ upstreamResponse: &http.Response{},
+ cacheDocument: &HTTPDocument{},
+ }
+ pr.Method = http.MethodPost
+ pr.prepareResponse()
+ if pr.upstreamResponse.StatusCode != http.StatusPreconditionFailed {
+ t.Errorf("expected %d got %d", http.StatusPreconditionFailed, pr.upstreamResponse.StatusCode)
+ }
+}
+
+func TestPrepareRevalidationRequest(t *testing.T) {
+
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/", nil)
+ r.Header.Set(headers.NameRange, "bytes=0-10,12-20")
+
+ oc := &config.OriginConfig{DearticulateUpstreamRanges: true}
+ r = request.SetResources(r, request.NewResources(oc, nil, nil, nil, nil))
+
+ pr := proxyRequest{
+ Request: r,
+ upstreamRequest: r,
+ cachingPolicy: &CachingPolicy{},
+ upstreamResponse: &http.Response{},
+ cacheDocument: &HTTPDocument{Ranges: byterange.Ranges{byterange.Range{Start: 30, End: 40}}},
+ cacheStatus: status.LookupStatusPartialHit,
+ wantedRanges: byterange.Ranges{{Start: 0, End: 10}, {Start: 12, End: 20}},
+ }
+ pr.prepareRevalidationRequest()
+
+ v := pr.revalidationRequest.Header.Get(headers.NameRange)
+ expected := pr.cacheDocument.Ranges.String()
+
+ if v != expected {
+ t.Errorf("expected %s got %s", expected, v)
+ }
+
+}
+
+func TestPrepareRevalidationRequestNoRange(t *testing.T) {
+
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/", nil)
+ r.Header.Set(headers.NameRange, "bytes=0-10,12-20")
+
+ oc := &config.OriginConfig{DearticulateUpstreamRanges: true}
+ r = request.SetResources(r, request.NewResources(oc, nil, nil, nil, nil))
+
+ pr := proxyRequest{
+ Request: r,
+ upstreamRequest: r,
+ cachingPolicy: &CachingPolicy{},
+ upstreamResponse: &http.Response{},
+ cacheDocument: &HTTPDocument{}, //Ranges: byterange.Ranges{byterange.Range{Start: 30, End: 40}}},
+ cacheStatus: status.LookupStatusPartialHit,
+ wantedRanges: byterange.Ranges{{Start: 0, End: 10}, {Start: 12, End: 20}},
+ }
+ pr.prepareRevalidationRequest()
+
+ v := pr.revalidationRequest.Header.Get(headers.NameRange)
+ expected := ""
+
+ if v != expected {
+ t.Errorf("expected %s got %s", expected, v)
+ }
+}
+
+func TestPrepareUpstreamRequests(t *testing.T) {
+
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/", nil)
+ r.Header.Set(headers.NameRange, "bytes=0-10,12-20")
+
+ oc := &config.OriginConfig{DearticulateUpstreamRanges: true}
+ r = request.SetResources(r, request.NewResources(oc, nil, nil, nil, nil))
+
+ pr := proxyRequest{
+ Request: r,
+ upstreamRequest: r,
+ cachingPolicy: &CachingPolicy{},
+ upstreamResponse: &http.Response{},
+ cacheDocument: &HTTPDocument{Ranges: byterange.Ranges{byterange.Range{Start: 30, End: 40}}},
+ cacheStatus: status.LookupStatusPartialHit,
+ wantedRanges: byterange.Ranges{{Start: 0, End: 10}, {Start: 12, End: 20}},
+ neededRanges: byterange.Ranges{{Start: 0, End: 10}, {Start: 12, End: 20}},
+ }
+
+ pr.prepareUpstreamRequests()
+
+ expected := 2
+ v := len(pr.originRequests)
+
+ if v != expected {
+ t.Errorf("expected %d got %d", expected, v)
+ }
+}
+
+func TestStoreTrueContentType(t *testing.T) {
+
+ ts, _, r, _, _ := setupTestHarnessOPC("", "test", http.StatusOK, nil)
+ defer ts.Close()
+
+ expected := "1234"
+
+ pr := proxyRequest{
+ Request: r,
+ cachingPolicy: &CachingPolicy{NoCache: true, LastModified: time.Unix(1, 0)},
+ writeToCache: true,
+ cacheDocument: &HTTPDocument{
+ CachingPolicy: &CachingPolicy{},
+ ContentType: "5678",
+ },
+ trueContentType: expected,
+ }
+
+ err := pr.store()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if pr.cacheDocument.ContentType != expected {
+ t.Errorf("expected %s got %s", expected, pr.cacheDocument.ContentType)
+ }
+
+}
+
+func TestReconstituteResponses(t *testing.T) {
+
+ pr := &proxyRequest{}
+
+ pr.reconstituteResponses()
+ if len(pr.originRequests) != 0 {
+ t.Errorf("expected %d got %d", 0, len(pr.originRequests))
+ }
+
+}
diff --git a/internal/proxy/engines/revalidation_status.go b/internal/proxy/engines/revalidation_status.go
new file mode 100644
index 000000000..fd5ef2cb6
--- /dev/null
+++ b/internal/proxy/engines/revalidation_status.go
@@ -0,0 +1,59 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import "strconv"
+
+// RevalidationStatus enumerates the methodologies for maintaining time series cache data
+type RevalidationStatus int
+
+const (
+ // RevalStatusNone indicates the object will not undergo revalidation against the origin
+ RevalStatusNone = RevalidationStatus(iota)
+ // RevalStatusInProgress indicates the object is currently being revalidated against the origin
+ RevalStatusInProgress
+ // RevalStatusLocal is used during a cache Range Miss, and indicates that while the user-requested ranges
+ // for an object are uncached and must be fetched from the origin, other ranges for the object are cached
+ // but require revalidation. When a request is in this state, Trickster will use the response headers from
+ // the range miss to locally revalidate the cached content instead of making a separate request to the
+ // origin for revalidating the cached ranges.
+ RevalStatusLocal
+ // RevalStatusOK indicates the object was successfully revalidated against the origin and is still fresh
+ RevalStatusOK
+ // RevalStatusFailed indicates the origin returned a new object for the URL to replace the cached version
+ RevalStatusFailed
+)
+
+var revalidationStatusNames = map[string]RevalidationStatus{
+ "none": RevalStatusNone,
+ "revalidating": RevalStatusInProgress,
+ "revalidated": RevalStatusOK,
+ "failed": RevalStatusFailed,
+ "local": RevalStatusLocal,
+}
+
+var revalidationStatusValues = map[RevalidationStatus]string{
+ RevalStatusNone: "none",
+ RevalStatusInProgress: "revalidating",
+ RevalStatusOK: "revalidated",
+ RevalStatusFailed: "failed",
+ RevalStatusLocal: "local",
+}
+
+func (s RevalidationStatus) String() string {
+ if v, ok := revalidationStatusValues[s]; ok {
+ return v
+ }
+ return strconv.Itoa(int(s))
+}
diff --git a/internal/proxy/engines/revalidation_status_test.go b/internal/proxy/engines/revalidation_status_test.go
new file mode 100644
index 000000000..91f53c0cf
--- /dev/null
+++ b/internal/proxy/engines/revalidation_status_test.go
@@ -0,0 +1,36 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package engines
+
+import "testing"
+
+func TestRevalidationStatusString(t *testing.T) {
+
+ t1 := RevalStatusNone
+ t2 := RevalStatusInProgress
+ var t3 RevalidationStatus = 10
+
+ if t1.String() != "none" {
+ t.Errorf("expected %s got %s", "none", t1.String())
+ }
+
+ if t2.String() != "revalidating" {
+ t.Errorf("expected %s got %s", "revalidating", t2.String())
+ }
+
+ if t3.String() != "10" {
+ t.Errorf("expected %s got %s", "10", t3.String())
+ }
+
+}
diff --git a/internal/proxy/errors/errors.go b/internal/proxy/errors/errors.go
new file mode 100644
index 000000000..4a1ffd9f1
--- /dev/null
+++ b/internal/proxy/errors/errors.go
@@ -0,0 +1,64 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package errors
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+// ErrStepParse indicates an error parsing the step interval of a time series request
+var ErrStepParse = errors.New("unable to parse timeseries step from downstream request")
+
+// ErrNotSelectStatement indicates an error that the time series request is not a read-only select query
+var ErrNotSelectStatement = errors.New("not a select statement")
+
+// ErrNotTimeRangeQuery indicates an error that the time series request does not contain a query
+var ErrNotTimeRangeQuery = errors.New("not a time range query")
+
+// ErrNoRanges indicates an error that the range request does not contain any usable ranges
+var ErrNoRanges = errors.New("no usable ranges")
+
+// MissingURLParam returns a Formatted Error
+func MissingURLParam(param string) error {
+ return fmt.Errorf("missing URL parameter: [%s]", param)
+}
+
+// TimeArrayEmpty returns a Formatted Error
+func TimeArrayEmpty(param string) error {
+ return fmt.Errorf("time array is nil or empty: [%s]", param)
+}
+
+// InvalidPath returns an error indicating the request path is not valid.
+func InvalidPath(path string) error {
+ return fmt.Errorf("invalid request path: %s", path)
+}
+
+// ParseDuration returns a Duration Parsing Error
+func ParseDuration(input string) (time.Duration, error) {
+ return time.Duration(0), fmt.Errorf("unable to parse duration: %s", input)
+}
+
+// ParseRequestBody returns an error indicating the request body could not
+// parsed into a valid value.
+func ParseRequestBody(err error) error {
+ return fmt.Errorf("unable to parse request body: %v", err)
+}
+
+// MissingRequestParam returns an error indicating the request is missing a
+// required parameter.
+func MissingRequestParam(param string) error {
+ return fmt.Errorf("missing request parameter: %s", param)
+}
diff --git a/internal/proxy/errors/errors_test.go b/internal/proxy/errors/errors_test.go
new file mode 100644
index 000000000..ffc1bee6c
--- /dev/null
+++ b/internal/proxy/errors/errors_test.go
@@ -0,0 +1,59 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package errors
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestMissingURLParam(t *testing.T) {
+ if MissingURLParam("test").Error() != "missing URL parameter: [test]" {
+ t.Errorf("ErrorStepParse failed")
+ }
+}
+
+func TestTimeArrayEmpty(t *testing.T) {
+ if TimeArrayEmpty("test").Error() != "time array is nil or empty: [test]" {
+ t.Errorf("ErrorStepParse failed")
+ }
+}
+
+func TestParseDurationError(t *testing.T) {
+ _, err := ParseDuration("test")
+ if err.Error() != "unable to parse duration: test" {
+ t.Errorf("ErrorParseDuration failed")
+ }
+}
+
+func TestInvalidPath(t *testing.T) {
+ err := InvalidPath("test")
+ if err.Error() != "invalid request path: test" {
+ t.Errorf("ErrorInvalidPath failed, got: %v", err.Error())
+ }
+}
+
+func TestParseRequestBody(t *testing.T) {
+ err := ParseRequestBody(fmt.Errorf("test"))
+ if err.Error() != "unable to parse request body: test" {
+ t.Errorf("ErrorParseDuration failed, got: %v", err.Error())
+ }
+}
+
+func TestMissingRequestParam(t *testing.T) {
+ err := MissingRequestParam("test")
+ if err.Error() != "missing request parameter: test" {
+ t.Errorf("ErrorMissingRequestParam failed, got: %v", err.Error())
+ }
+}
diff --git a/internal/proxy/handlers/config.go b/internal/proxy/handlers/config.go
new file mode 100644
index 000000000..00e4becb8
--- /dev/null
+++ b/internal/proxy/handlers/config.go
@@ -0,0 +1,35 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package handlers
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/routing"
+)
+
+// RegisterConfigHandler registers the application's /ping handler
+func RegisterConfigHandler() {
+ routing.Router.HandleFunc(config.Main.ConfigHandlerPath, configHandler).Methods("GET")
+}
+
+// configHandler responds to an HTTP Request with 200 OK and "pong"
+func configHandler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set(headers.NameContentType, headers.ValueTextPlain)
+ w.Header().Set(headers.NameCacheControl, headers.ValueNoCache)
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(config.Config.String()))
+}
diff --git a/internal/proxy/handlers/config_test.go b/internal/proxy/handlers/config_test.go
new file mode 100644
index 000000000..c26c00d0d
--- /dev/null
+++ b/internal/proxy/handlers/config_test.go
@@ -0,0 +1,54 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package handlers
+
+import (
+ "io/ioutil"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+)
+
+func TestConfigHandler(t *testing.T) {
+
+ config.Load("trickster-test", "test", []string{"-origin-url", "http://1.2.3.4", "-origin-type", "prometheus"})
+
+ RegisterConfigHandler()
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/trickster/config", nil)
+
+ configHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK and "pong"
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(bodyBytes) < 1 {
+ t.Errorf("missing body in response")
+ }
+
+ if bodyBytes[0] != 91 {
+ t.Errorf("response is not toml format")
+ }
+
+}
diff --git a/internal/proxy/handlers/local.go b/internal/proxy/handlers/local.go
new file mode 100644
index 000000000..777683393
--- /dev/null
+++ b/internal/proxy/handlers/local.go
@@ -0,0 +1,39 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package handlers
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/request"
+)
+
+// HandleLocalResponse responds to an HTTP Request based on the local configuration without making any upstream requests
+func HandleLocalResponse(w http.ResponseWriter, r *http.Request) {
+ rsc := request.GetResources(r)
+ p := rsc.PathConfig
+ if p == nil {
+ return
+ }
+ if len(p.ResponseHeaders) > 0 {
+ headers.UpdateHeaders(w.Header(), p.ResponseHeaders)
+ }
+ if p.ResponseCode > 0 {
+ w.WriteHeader(p.ResponseCode)
+ } else {
+ w.WriteHeader(http.StatusOK)
+ }
+ w.Write([]byte(p.ResponseBody))
+}
diff --git a/internal/proxy/handlers/local_test.go b/internal/proxy/handlers/local_test.go
new file mode 100644
index 000000000..8a1e5b48b
--- /dev/null
+++ b/internal/proxy/handlers/local_test.go
@@ -0,0 +1,139 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package handlers
+
+import (
+ "io/ioutil"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+ tc "github.com/Comcast/trickster/internal/proxy/context"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/request"
+)
+
+func TestHandleLocalResponse(t *testing.T) {
+
+ config.Load("trickster-test", "test", []string{"-origin-url", "http://1.2.3.4", "-origin-type", "prometheus"})
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/trickster/", nil)
+
+ pc := &config.PathConfig{
+ ResponseCode: 418,
+ ResponseBody: "[test",
+ ResponseBodyBytes: []byte("[test"),
+ ResponseHeaders: map[string]string{headers.NameTricksterResult: "1234"},
+ }
+
+ r = r.WithContext(tc.WithResources(r.Context(), request.NewResources(nil, pc, nil, nil, nil)))
+
+ HandleLocalResponse(w, r)
+ resp := w.Result()
+
+ // it should return 418 OK and "pong"
+ if resp.StatusCode != 418 {
+ t.Errorf("expected 418 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(bodyBytes) < 1 {
+ t.Errorf("missing body in response")
+ }
+
+ if bodyBytes[0] != 91 {
+ t.Errorf("response is not toml format")
+ }
+
+ if resp.Header.Get(headers.NameTricksterResult) == "" {
+ t.Errorf("expected header valuef for %s", headers.NameTricksterResult)
+ }
+
+}
+
+func TestHandleLocalResponseBadResponseCode(t *testing.T) {
+
+ config.Load("trickster-test", "test", []string{"-origin-url", "http://1.2.3.4", "-origin-type", "prometheus"})
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/trickster/", nil)
+
+ pc := &config.PathConfig{
+ ResponseCode: 0,
+ ResponseBody: "[test",
+ ResponseBodyBytes: []byte("[test"),
+ ResponseHeaders: map[string]string{headers.NameTricksterResult: "1234"},
+ }
+
+ r = r.WithContext(tc.WithResources(r.Context(), request.NewResources(nil, pc, nil, nil, nil)))
+
+ HandleLocalResponse(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK and because we passed 0
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(bodyBytes) < 1 {
+ t.Errorf("missing body in response")
+ }
+
+ if bodyBytes[0] != 91 {
+ t.Errorf("response is not toml format")
+ }
+
+ if resp.Header.Get(headers.NameTricksterResult) == "" {
+ t.Errorf("expected header valuef for %s", headers.NameTricksterResult)
+ }
+
+}
+
+func TestHandleLocalResponseNoPathConfig(t *testing.T) {
+
+ config.Load("trickster-test", "test", []string{"-origin-url", "http://1.2.3.4", "-origin-type", "prometheus"})
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/trickster/", nil)
+
+ r = r.WithContext(tc.WithResources(r.Context(), request.NewResources(nil, nil, nil, nil, nil)))
+
+ HandleLocalResponse(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK and "pong"
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(bodyBytes) > 0 {
+ t.Errorf("body should be empty")
+ }
+
+}
diff --git a/internal/proxy/handlers/ping.go b/internal/proxy/handlers/ping.go
new file mode 100644
index 000000000..d795c46e5
--- /dev/null
+++ b/internal/proxy/handlers/ping.go
@@ -0,0 +1,35 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package handlers
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/routing"
+)
+
+// RegisterPingHandler registers the application's /ping handler
+func RegisterPingHandler() {
+ routing.Router.HandleFunc(config.Main.PingHandlerPath, pingHandler).Methods("GET")
+}
+
+// pingHandler responds to an HTTP Request with 200 OK and "pong"
+func pingHandler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set(headers.NameContentType, headers.ValueTextPlain)
+ w.Header().Set(headers.NameCacheControl, headers.ValueNoCache)
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("pong"))
+}
diff --git a/internal/proxy/handlers/ping_test.go b/internal/proxy/handlers/ping_test.go
new file mode 100644
index 000000000..d68fa3c6d
--- /dev/null
+++ b/internal/proxy/handlers/ping_test.go
@@ -0,0 +1,49 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package handlers
+
+import (
+ "io/ioutil"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+)
+
+func TestPingHandler(t *testing.T) {
+
+ config.Load("trickster-test", "test", nil)
+ RegisterPingHandler()
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/trickster/ping", nil)
+
+ pingHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK and "pong"
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "pong" {
+ t.Errorf("expected 'pong' got %s.", bodyBytes)
+ }
+
+}
diff --git a/internal/proxy/headers/headers.go b/internal/proxy/headers/headers.go
new file mode 100644
index 000000000..df30991c4
--- /dev/null
+++ b/internal/proxy/headers/headers.go
@@ -0,0 +1,218 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package headers
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/runtime"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+const (
+ // Common HTTP Header Values
+
+ // ValueApplicationJSON represents the HTTP Header Value of "application/json"
+ ValueApplicationJSON = "application/json"
+ // ValueMaxAge represents the HTTP Header Value of "max-age"
+ ValueMaxAge = "max-age"
+ // ValueMultipartFormData represents the HTTP Header Value of "multipart/form-data"
+ ValueMultipartFormData = "multipart/form-data"
+ // ValueMustRevalidate represents the HTTP Header Value of "must-revalidate"
+ ValueMustRevalidate = "must-revalidate"
+ // ValueNoCache represents the HTTP Header Value of "no-cache"
+ ValueNoCache = "no-cache"
+ // ValueNoStore represents the HTTP Header Value of "no-store"
+ ValueNoStore = "no-store"
+ // ValueNoTransform represents the HTTP Header Value of "no-transform"
+ ValueNoTransform = "no-transform"
+ // ValuePrivate represents the HTTP Header Value of "private"
+ ValuePrivate = "private"
+ // ValueProxyRevalidate represents the HTTP Header Value of "proxy-revalidate"
+ ValueProxyRevalidate = "proxy-revalidate"
+ // ValuePublic represents the HTTP Header Value of "public"
+ ValuePublic = "public"
+ // ValueSharedMaxAge represents the HTTP Header Value of "s-maxage"
+ ValueSharedMaxAge = "s-maxage"
+ // ValueTextPlain represents the HTTP Header Value of "text/plain"
+ ValueTextPlain = "text/plain"
+ // ValueXFormURLEncoded represents the HTTP Header Value of "application/x-www-form-urlencoded"
+ ValueXFormURLEncoded = "application/x-www-form-urlencoded"
+
+ // ValueMultipartByteRanges represents the HTTP Header prefix for a Multipart Byte Range response
+ ValueMultipartByteRanges = "multipart/byteranges; boundary="
+
+ // Common HTTP Header Names
+
+ // NameCacheControl represents the HTTP Header Name of "Cache-Control"
+ NameCacheControl = "Cache-Control"
+ // NameAllowOrigin represents the HTTP Header Name of "Access-Control-Allow-Origin"
+ NameAllowOrigin = "Access-Control-Allow-Origin"
+ // NameContentType represents the HTTP Header Name of "Content-Type"
+ NameContentType = "Content-Type"
+ // NameContentEncoding represents the HTTP Header Name of "Content-Encoding"
+ NameContentEncoding = "Content-Encoding"
+ // NameContentLength represents the HTTP Header Name of "Content-Length"
+ NameContentLength = "Content-Length"
+ // NameAuthorization represents the HTTP Header Name of "Authorization"
+ NameAuthorization = "Authorization"
+ // NameContentRange represents the HTTP Header Name of "Content-Range"
+ NameContentRange = "Content-Range"
+ // NameTricksterResult represents the HTTP Header Name of "X-Trickster-Result"
+ NameTricksterResult = "X-Trickster-Result"
+ // NameVia represents the HTTP Header Name of "Via"
+ NameVia = "Via"
+ // NameXForwardedFor represents the HTTP Header Name of "X-Forwarded-For"
+ NameXForwardedFor = "X-Forwarded-For"
+ // NameAcceptEncoding represents the HTTP Header Name of "Accept-Encoding"
+ NameAcceptEncoding = "Accept-Encoding"
+ // NameSetCookie represents the HTTP Header Name of "Set-Cookie"
+ NameSetCookie = "Set-Cookie"
+ // NameRange represents the HTTP Header Name of "Range"
+ NameRange = "Range"
+ // NameTransferEncoding represents the HTTP Header Name of "Transfer-Encoding"
+ NameTransferEncoding = "Transfer-Encoding"
+ // NameIfModifiedSince represents the HTTP Header Name of "If-Modified-Since"
+ NameIfModifiedSince = "If-Modified-Since"
+ // NameIfUnmodifiedSince represents the HTTP Header Name of "If-Unodified-Since"
+ NameIfUnmodifiedSince = "If-Unmodified-Since"
+ // NameIfNoneMatch represents the HTTP Header Name of "If-None-Match"
+ NameIfNoneMatch = "If-None-Match"
+ // NameIfMatch represents the HTTP Header Name of "If-Match"
+ NameIfMatch = "If-Match"
+ // NameDate represents the HTTP Header Name of "date"
+ NameDate = "Date"
+ // NamePragma represents the HTTP Header Name of "pragma"
+ NamePragma = "Pragma"
+ // NameLastModified represents the HTTP Header Name of "last-modified"
+ NameLastModified = "Last-Modified"
+ // NameExpires represents the HTTP Header Name of "expires"
+ NameExpires = "Expires"
+ // NameETag represents the HTTP Header Name of "etag"
+ NameETag = "Etag"
+)
+
+// Merge merges the source http.Header map into destination map.
+// If a key exists in both maps, the source value wins.
+// If the destination map is nil, the source map will not be merged
+func Merge(dst, src http.Header) {
+ if src == nil || len(src) == 0 || dst == nil {
+ return
+ }
+ for k, sv := range src {
+ if len(sv) == 0 {
+ continue
+ }
+ dst[k] = []string{sv[0]}
+ }
+}
+
+// UpdateHeaders updates the provided headers collection with the provided updates
+func UpdateHeaders(headers http.Header, updates map[string]string) {
+ if headers == nil || updates == nil || len(updates) == 0 {
+ return
+ }
+ for k, v := range updates {
+ if len(k) == 0 {
+ continue
+ }
+ if k[0:1] == "-" {
+ k = k[1:]
+ headers.Del(k)
+ continue
+ }
+ if k[0:1] == "+" {
+ k = k[1:]
+ headers.Add(k, v)
+ continue
+ }
+ headers.Set(k, v)
+ }
+}
+
+// AddProxyHeaders injects standard Trickster headers into proxied upstream HTTP requests
+func AddProxyHeaders(remoteAddr string, headers http.Header) {
+ if remoteAddr != "" {
+ headers.Set(NameXForwardedFor, remoteAddr)
+ headers.Set(NameVia, runtime.ApplicationName+" "+runtime.ApplicationVersion)
+ }
+}
+
+// AddResponseHeaders injects standard Trickster headers into downstream HTTP responses
+func AddResponseHeaders(headers http.Header) {
+ // We're read only and a harmless API, so allow all CORS
+ headers.Set(NameAllowOrigin, "*")
+ headers.Set(NameVia, runtime.ApplicationName+" "+runtime.ApplicationVersion)
+}
+
+// SetResultsHeader adds a response header summarizing Trickster's handling of the HTTP request
+func SetResultsHeader(headers http.Header, engine, status, ffstatus string, fetched timeseries.ExtentList) {
+
+ if headers == nil || engine == "" {
+ return
+ }
+
+ parts := append(make([]string, 0, 4), fmt.Sprintf("engine=%s", engine))
+
+ if status != "" {
+ parts = append(parts, fmt.Sprintf("status=%s", status))
+ }
+
+ if fetched != nil && len(fetched) > 0 {
+ fp := make([]string, 0, len(fetched))
+ for _, v := range fetched {
+ fp = append(fp, fmt.Sprintf("%d:%d", v.Start.Unix(), v.End.Unix()))
+ }
+ parts = append(parts, fmt.Sprintf("fetched=[%s]", strings.Join(fp, ",")))
+ }
+
+ if ffstatus != "" {
+ parts = append(parts, fmt.Sprintf("ffstatus=%s", ffstatus))
+ }
+
+ headers.Set(NameTricksterResult, strings.Join(parts, "; "))
+
+}
+
+// ExtractHeader returns the value for the provided header name, and a boolean indicating if the header was present
+func ExtractHeader(headers http.Header, header string) (string, bool) {
+ if Value, ok := headers[header]; ok {
+ return strings.Join(Value, "; "), true
+ }
+ return "", false
+}
+
+// RemoveClientHeaders strips certain headers from the HTTP request to facililate acceleration
+func RemoveClientHeaders(headers http.Header) {
+ headers.Del(NameAcceptEncoding)
+}
+
+// String returns the string representation of the headers as if
+// they were transmitted over the wire (Header1: value1\nHeader2: value2\n\n)
+func String(h http.Header) string {
+ if h == nil || len(h) == 0 {
+ return "\n\n"
+ }
+ sb := strings.Builder{}
+ for k, v := range h {
+ if len(v) > 0 {
+ sb.WriteString(fmt.Sprintf("%s: %s\n", k, v[0]))
+ }
+ }
+ // add the header section end new line
+ sb.WriteString("\n")
+ return sb.String()
+}
diff --git a/internal/proxy/headers/headers_test.go b/internal/proxy/headers/headers_test.go
new file mode 100644
index 000000000..ffcdb3fba
--- /dev/null
+++ b/internal/proxy/headers/headers_test.go
@@ -0,0 +1,187 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package headers
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/runtime"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+func TestAddProxyHeaders(t *testing.T) {
+
+ headers := http.Header{}
+ runtime.ApplicationName = "trickster-test"
+ runtime.ApplicationVersion = "tests"
+
+ AddProxyHeaders("0.0.0.0", headers)
+
+ if _, ok := headers[NameXForwardedFor]; !ok {
+ t.Errorf("missing header %s", NameXForwardedFor)
+ }
+
+ if _, ok := headers[NameVia]; !ok {
+ t.Errorf("missing header %s", NameVia)
+ }
+
+}
+
+func TestExtractHeader(t *testing.T) {
+
+ headers := http.Header{}
+
+ const appName = "trickster-test"
+ const appVer = "tests"
+ const appString = appName + " " + appVer
+
+ runtime.ApplicationName = appName
+ runtime.ApplicationVersion = appVer
+
+ const testIP = "0.0.0.0"
+
+ AddProxyHeaders(testIP, headers)
+
+ if h, ok := ExtractHeader(headers, NameXForwardedFor); !ok {
+ t.Errorf("missing header %s", NameXForwardedFor)
+ } else {
+ if h != testIP {
+ t.Errorf(`expected "%s". got "%s"`, testIP, h)
+ }
+ }
+
+ if h, ok := ExtractHeader(headers, NameVia); !ok {
+ t.Errorf("missing header %s", NameVia)
+ } else {
+ if h != appString {
+ t.Errorf(`expected "%s". got "%s"`, appString, h)
+ }
+ }
+
+ if _, ok := ExtractHeader(headers, NameAllowOrigin); ok {
+ t.Errorf("unexpected header %s", NameAllowOrigin)
+ }
+
+}
+
+func TestUpdateHeaders(t *testing.T) {
+ headers := http.Header{"Foo1": {"foo"}, "Foo2": {"x"}, "Foo3": {"foo"}}
+ expected := http.Header{"Foo1": {"bar"}, "Foo3": {"foo", "bar"}, "Foo4": {"bar"}, "Foo5": {"bar"}}
+
+ UpdateHeaders(headers, nil)
+ if len(headers) != 3 {
+ t.Errorf("expected %d got %d", len(headers), 3)
+ }
+
+ UpdateHeaders(headers, map[string]string{"": "ineffectual", "foo1": "bar", "-foo2": "", "+foo3": "bar", "foo4": "bar", "+foo5": "bar", "-foo6": ""})
+ if !reflect.DeepEqual(headers, expected) {
+ fmt.Printf("mismatch\nexpected: %v\n got: %v\n", expected, headers)
+ }
+
+}
+
+func TestRemoveClientHeaders(t *testing.T) {
+
+ headers := http.Header{}
+ headers.Set(NameAcceptEncoding, "test")
+
+ RemoveClientHeaders(headers)
+
+ if _, ok := ExtractHeader(headers, NameAcceptEncoding); ok {
+ t.Errorf("unexpected header %s", NameAcceptEncoding)
+ }
+
+}
+
+func TestMerge(t *testing.T) {
+ h1 := make(http.Header)
+ h1.Set("test", "pass")
+ h2 := make(http.Header)
+ h2.Set("test2", "pass")
+
+ Merge(h2, h1)
+ if h2.Get("test") != "pass" {
+ t.Errorf("expected 'pass' got '%s'", h2.Get("test"))
+ }
+
+ Merge(h2, nil)
+ if h2.Get("test") != "pass" {
+ t.Errorf("expected 'pass' got '%s'", h2.Get("test"))
+ }
+
+ h2["test2"] = make([]string, 0)
+
+ Merge(h1, h2)
+ if h1.Get("test") != "pass" {
+ t.Errorf("expected 'pass' got '%s'", h1.Get("test"))
+ }
+
+}
+
+func TestAddResponseHeaders(t *testing.T) {
+
+ headers := http.Header{}
+ runtime.ApplicationName = "trickster-test"
+ runtime.ApplicationVersion = "tests"
+
+ AddResponseHeaders(headers)
+
+ if _, ok := headers[NameAllowOrigin]; !ok {
+ t.Errorf("missing header %s", NameAllowOrigin)
+ }
+
+ if _, ok := headers[NameVia]; !ok {
+ t.Errorf("missing header %s", NameVia)
+ }
+
+}
+
+func TestSetResultsHeader(t *testing.T) {
+ h := http.Header{}
+ SetResultsHeader(h, "test-engine", "test-status", "test-ffstatus", timeseries.ExtentList{timeseries.Extent{Start: time.Unix(1, 0), End: time.Unix(2, 0)}})
+ const expected = "engine=test-engine; status=test-status; fetched=[1:2]; ffstatus=test-ffstatus"
+ if h.Get(NameTricksterResult) != expected {
+ t.Errorf("expected %s got %s", expected, h.Get(NameTricksterResult))
+ }
+}
+
+func TestSetResultsHeaderEmtpy(t *testing.T) {
+ h := http.Header{}
+ SetResultsHeader(h, "", "test-status", "test-ffstatus", timeseries.ExtentList{timeseries.Extent{Start: time.Unix(1, 0), End: time.Unix(2, 0)}})
+ if len(h) > 0 {
+ t.Errorf("Expected header length of %d", 0)
+ }
+}
+
+func TestString(t *testing.T) {
+
+ expected := "test: test\n\n"
+ h := http.Header{"test": {"test"}}
+ x := String(h)
+ if x != expected {
+ t.Errorf("expected %s got %s", expected, x)
+ }
+
+ expected = "\n\n"
+ h = http.Header{}
+ x = String(h)
+ if x != expected {
+ t.Errorf("expected %s got %s", expected, x)
+ }
+
+}
diff --git a/internal/proxy/methods/methods.go b/internal/proxy/methods/methods.go
new file mode 100644
index 000000000..af98dcf19
--- /dev/null
+++ b/internal/proxy/methods/methods.go
@@ -0,0 +1,41 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package methods
+
+import "net/http"
+
+const (
+
+ // Methods not currently in the base golang http package
+
+ // MethodPurge is the PURGE HTTP Method
+ MethodPurge = "PURGE"
+)
+
+// AllHTTPMethods returns a list of all known HTTP methods
+func AllHTTPMethods() []string {
+ return []string{http.MethodGet, http.MethodHead, http.MethodPost, http.MethodPut, http.MethodDelete,
+ http.MethodConnect, http.MethodOptions, http.MethodTrace, http.MethodPatch, MethodPurge}
+}
+
+// CacheableHTTPMethods returns a list of HTTP methods that are generally considered cacheable
+func CacheableHTTPMethods() []string {
+ return []string{http.MethodGet, http.MethodHead}
+}
+
+// UncacheableHTTPMethods returns a list of HTTP methods that are generally considered uncacheable
+func UncacheableHTTPMethods() []string {
+ return []string{http.MethodPost, http.MethodPut, http.MethodDelete, http.MethodConnect,
+ http.MethodOptions, http.MethodTrace, http.MethodPatch, MethodPurge}
+}
diff --git a/internal/proxy/methods/methods_test.go b/internal/proxy/methods/methods_test.go
new file mode 100644
index 000000000..9b71441fc
--- /dev/null
+++ b/internal/proxy/methods/methods_test.go
@@ -0,0 +1,42 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package methods
+
+import (
+ "testing"
+)
+
+func TestAllHTTPMethods(t *testing.T) {
+ expected := 10
+ l := len(AllHTTPMethods())
+ if l != expected {
+ t.Errorf("expected %d got %d", expected, l)
+ }
+}
+
+func TestCacheableHTTPMethods(t *testing.T) {
+ expected := 2
+ l := len(CacheableHTTPMethods())
+ if l != expected {
+ t.Errorf("expected %d got %d", expected, l)
+ }
+}
+
+func TestUncacheableHTTPMethods(t *testing.T) {
+ expected := 8
+ l := len(UncacheableHTTPMethods())
+ if l != expected {
+ t.Errorf("expected %d got %d", expected, l)
+ }
+}
diff --git a/internal/proxy/origins/clickhouse/clickhouse.go b/internal/proxy/origins/clickhouse/clickhouse.go
new file mode 100644
index 000000000..5e6a186ef
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/clickhouse.go
@@ -0,0 +1,113 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ tt "github.com/Comcast/trickster/internal/proxy/timeconv"
+ "github.com/Comcast/trickster/internal/proxy/urls"
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/internal/util/regexp/matching"
+)
+
+// Client Implements the Proxy Client Interface
+type Client struct {
+ name string
+ config *config.OriginConfig
+ cache cache.Cache
+ webClient *http.Client
+ handlers map[string]http.Handler
+ handlersRegistered bool
+
+ healthURL *url.URL
+ healthMethod string
+ healthHeaders http.Header
+}
+
+// NewClient returns a new Client Instance
+func NewClient(name string, oc *config.OriginConfig, cache cache.Cache) (*Client, error) {
+ c, err := proxy.NewHTTPClient(oc)
+ return &Client{name: name, config: oc, cache: cache, webClient: c}, err
+}
+
+// Configuration returns the upstream Configuration for this Client
+func (c *Client) Configuration() *config.OriginConfig {
+ return c.config
+}
+
+// HTTPClient returns the HTTP Transport the client is using
+func (c *Client) HTTPClient() *http.Client {
+ return c.webClient
+}
+
+// Cache returns and handle to the Cache instance used by the Client
+func (c *Client) Cache() cache.Cache {
+ return c.cache
+}
+
+// Name returns the name of the upstream Configuration proxied by the Client
+func (c *Client) Name() string {
+ return c.name
+}
+
+// SetCache sets the Cache object the client will use for caching origin content
+func (c *Client) SetCache(cc cache.Cache) {
+ c.cache = cc
+}
+
+// ParseTimeRangeQuery parses the key parts of a TimeRangeQuery from the inbound HTTP Request
+func (c *Client) ParseTimeRangeQuery(r *http.Request) (*timeseries.TimeRangeQuery, error) {
+
+ trq := ×eries.TimeRangeQuery{Extent: timeseries.Extent{}}
+ trq.TemplateURL = urls.Clone(r.URL)
+ qi := trq.TemplateURL.Query()
+ if p, ok := qi[upQuery]; ok {
+ trq.Statement = p[0]
+ } else {
+ return nil, errors.MissingURLParam(upQuery)
+ }
+
+ mp := []string{"step", "timeField"}
+ found := matching.GetNamedMatches(reTimeFieldAndStep, trq.Statement, mp)
+
+ for _, f := range mp {
+ v, ok := found[f]
+ if !ok || v == "" {
+ return nil, errors.ErrNotTimeRangeQuery
+ }
+ switch f {
+ case "timeField":
+ trq.TimestampFieldName = v
+ case "step":
+ trq.Step, _ = tt.ParseDuration(v + "s")
+ }
+ }
+
+ var err error
+ trq.Statement, trq.Extent, _, err = getQueryParts(trq.Statement, trq.TimestampFieldName)
+ if err != nil {
+ return nil, err
+ }
+
+ // Swap in the Tokenzed Query in the Url Params
+ qi.Set(upQuery, trq.Statement)
+ trq.TemplateURL.RawQuery = qi.Encode()
+ return trq, nil
+}
diff --git a/internal/proxy/origins/clickhouse/clickhouse_test.go b/internal/proxy/origins/clickhouse/clickhouse_test.go
new file mode 100644
index 000000000..46b0d4bed
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/clickhouse_test.go
@@ -0,0 +1,193 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "net/http"
+ "net/url"
+ "testing"
+
+ cr "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/origins"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+func init() {
+ metrics.Init()
+}
+
+func TestClickhouseClientInterfacing(t *testing.T) {
+
+ // this test ensures the client will properly conform to the
+ // Client and TimeseriesClient interfaces
+
+ c := &Client{name: "test"}
+ var oc origins.Client = c
+ var tc origins.TimeseriesClient = c
+
+ if oc.Name() != "test" {
+ t.Errorf("expected %s got %s", "test", oc.Name())
+ }
+
+ if tc.Name() != "test" {
+ t.Errorf("expected %s got %s", "test", tc.Name())
+ }
+}
+
+func TestNewClient(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-origin-type", "clickhouse", "-origin-url", "http://1"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ oc := &config.OriginConfig{OriginType: "TEST_CLIENT"}
+ c, err := NewClient("default", oc, cache)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if c.Name() != "default" {
+ t.Errorf("expected %s got %s", "default", c.Name())
+ }
+
+ if c.Cache().Configuration().CacheType != "memory" {
+ t.Errorf("expected %s got %s", "memory", c.Cache().Configuration().CacheType)
+ }
+
+ if c.Configuration().OriginType != "TEST_CLIENT" {
+ t.Errorf("expected %s got %s", "TEST_CLIENT", c.Configuration().OriginType)
+ }
+}
+
+func TestConfiguration(t *testing.T) {
+ oc := &config.OriginConfig{OriginType: "TEST"}
+ client := Client{config: oc}
+ c := client.Configuration()
+ if c.OriginType != "TEST" {
+ t.Errorf("expected %s got %s", "TEST", c.OriginType)
+ }
+}
+
+func TestCache(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-origin-type", "clickhouse", "-origin-url", "http://1"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+ client := Client{cache: cache}
+ c := client.Cache()
+
+ if c.Configuration().CacheType != "memory" {
+ t.Errorf("expected %s got %s", "memory", c.Configuration().CacheType)
+ }
+}
+
+func TestName(t *testing.T) {
+
+ client := Client{name: "TEST"}
+ c := client.Name()
+
+ if c != "TEST" {
+ t.Errorf("expected %s got %s", "TEST", c)
+ }
+
+}
+
+func TestHTTPClient(t *testing.T) {
+ oc := &config.OriginConfig{OriginType: "TEST"}
+
+ client, err := NewClient("test", oc, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if client.HTTPClient() == nil {
+ t.Errorf("missing http client")
+ }
+}
+
+func TestSetCache(t *testing.T) {
+ c, err := NewClient("test", config.NewOriginConfig(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+ c.SetCache(nil)
+ if c.Cache() != nil {
+ t.Errorf("expected nil cache for client named %s", "test")
+ }
+}
+
+func TestParseTimeRangeQuery(t *testing.T) {
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: testRawQuery(),
+ }}
+ client := &Client{}
+ res, err := client.ParseTimeRangeQuery(req)
+ if err != nil {
+ t.Error(err)
+ } else {
+
+ if res.Step.Seconds() != 60 {
+ t.Errorf("expected 60 got %f", res.Step.Seconds())
+ }
+
+ if res.Extent.End.Sub(res.Extent.Start).Hours() != 6 {
+ t.Errorf("expected 6 got %f", res.Extent.End.Sub(res.Extent.Start).Hours())
+ }
+ }
+
+ req.URL.RawQuery = ""
+ _, err = client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf("expected error for: %s", "missing URL parameter: [query]")
+ }
+
+ req.URL.RawQuery = url.Values(map[string][]string{"query": {
+ `SELECT (intDiv(toUInt32(abc), 6z0) * 6z0) * 1000 AS t, countMerge(some_count) AS cnt, field1, field2 ` +
+ `FROM testdb.test_table WHERE abc BETWEEN toDateTime(1516665600) AND toDateTime(1516687200) ` +
+ `AND date_column >= toDate(1516665600) AND toDate(1516687200) ` +
+ `AND field1 > 0 AND field2 = 'some_value' GROUP BY t, field1, field2 ORDER BY t, field1 FORMAT JSON`}}).Encode()
+ _, err = client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf("expected error for: %s", "not a time range query")
+ }
+
+ req.URL.RawQuery = url.Values(map[string][]string{"query": {
+ `SELECT (intDiv(toUInt32(0^^^), 60) * 60) * 1000 AS t, countMerge(some_count) AS cnt, field1, field2 ` +
+ `FROM testdb.test_table WHERE 0^^^ BETWEEN toDateTime(1516665600) AND toDateTime(1516687200) ` +
+ `AND date_column >= toDate(1516665600) AND toDate(1516687200) ` +
+ `AND field1 > 0 AND field2 = 'some_value' GROUP BY t, field1, field2 ORDER BY t, field1 FORMAT JSON`}}).Encode()
+ _, err = client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf("expected error for: %s", "not a time range query")
+ }
+
+}
diff --git a/internal/proxy/origins/clickhouse/handler_health.go b/internal/proxy/origins/clickhouse/handler_health.go
new file mode 100644
index 000000000..47975948c
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/handler_health.go
@@ -0,0 +1,73 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+const (
+ healthQuery = "SELECT 1 FORMAT JSON"
+)
+
+// HealthHandler checks the health of the Configured Upstream Origin
+func (c *Client) HealthHandler(w http.ResponseWriter, r *http.Request) {
+
+ if c.healthURL == nil {
+ c.populateHeathCheckRequestValues()
+ }
+
+ if c.healthMethod == "-" {
+ w.WriteHeader(400)
+ w.Write([]byte("Health Check URL not Configured for origin: " + c.config.Name))
+ return
+ }
+
+ req, _ := http.NewRequest(c.healthMethod, c.healthURL.String(), nil)
+ req = req.WithContext(r.Context())
+
+ req.Header = c.healthHeaders
+ engines.DoProxy(w, req)
+
+}
+
+func (c *Client) populateHeathCheckRequestValues() {
+
+ oc := c.config
+
+ if oc.HealthCheckUpstreamPath == "-" {
+ oc.HealthCheckUpstreamPath = "/"
+ }
+ if oc.HealthCheckVerb == "-" {
+ oc.HealthCheckVerb = http.MethodGet
+ }
+ if oc.HealthCheckQuery == "-" {
+ q := url.Values{"query": {healthQuery}}
+ oc.HealthCheckQuery = q.Encode()
+ }
+
+ c.healthURL = c.BaseURL()
+ c.healthURL.Path += oc.HealthCheckUpstreamPath
+ c.healthURL.RawQuery = oc.HealthCheckQuery
+ c.healthMethod = oc.HealthCheckVerb
+
+ if oc.HealthCheckHeaders != nil {
+ c.healthHeaders = http.Header{}
+ headers.UpdateHeaders(c.healthHeaders, oc.HealthCheckHeaders)
+ }
+}
diff --git a/internal/proxy/origins/clickhouse/handler_health_test.go b/internal/proxy/origins/clickhouse/handler_health_test.go
new file mode 100644
index 000000000..979c7561e
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/handler_health_test.go
@@ -0,0 +1,104 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "io/ioutil"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/util/metrics"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func init() {
+ metrics.Init()
+}
+
+func TestHealthHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "clickhouse", "/health", "debug")
+
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.HealthHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+
+ client.healthMethod = "-"
+
+ w = httptest.NewRecorder()
+ client.HealthHandler(w, r)
+ resp = w.Result()
+ if resp.StatusCode != 400 {
+ t.Errorf("Expected status: 400 got %d.", resp.StatusCode)
+ }
+
+}
+
+func TestHealthHandlerCustomPath(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("../../../../testdata/test.custom_health.conf", client.DefaultPathConfigs, 200, "{}", nil, "clickhouse", "/health", "debug")
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+
+ client.webClient = hc
+ client.config.HTTPClient = hc
+
+ client.HealthHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+
+}
diff --git a/internal/proxy/origins/clickhouse/handler_proxy.go b/internal/proxy/origins/clickhouse/handler_proxy.go
new file mode 100644
index 000000000..c3270f2c6
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/handler_proxy.go
@@ -0,0 +1,26 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// ProxyHandler sends a request through the basic reverse proxy to the origin, and services non-cacheable InfluxDB API calls
+func (c *Client) ProxyHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DoProxy(w, r)
+}
diff --git a/internal/proxy/origins/clickhouse/handler_proxy_test.go b/internal/proxy/origins/clickhouse/handler_proxy_test.go
new file mode 100644
index 000000000..b4f6aeba8
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/handler_proxy_test.go
@@ -0,0 +1,55 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestProxyHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "test", nil, "clickhouse", "/health", "debug")
+
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.ProxyHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "test" {
+ t.Errorf("expected 'test' got %s.", bodyBytes)
+ }
+
+}
diff --git a/internal/proxy/origins/clickhouse/handler_query.go b/internal/proxy/origins/clickhouse/handler_query.go
new file mode 100644
index 000000000..b2cb31ac2
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/handler_query.go
@@ -0,0 +1,36 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// QueryHandler handles timeseries requests for ClickHouse and processes them through the delta proxy cache
+func (c *Client) QueryHandler(w http.ResponseWriter, r *http.Request) {
+
+ rqlc := strings.Replace(strings.ToLower(r.URL.RawQuery), "%20", "+", -1)
+ // if it's not a select statement, just proxy it instead
+ if (!strings.HasPrefix(rqlc, "query=select+")) && (!(strings.Index(rqlc, "&query=select+") > 0)) &&
+ (!strings.HasSuffix(rqlc, "format+json")) {
+ c.ProxyHandler(w, r)
+ return
+ }
+
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DeltaProxyCacheRequest(w, r)
+}
diff --git a/internal/proxy/origins/clickhouse/handler_query_test.go b/internal/proxy/origins/clickhouse/handler_query_test.go
new file mode 100644
index 000000000..3a40c077f
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/handler_query_test.go
@@ -0,0 +1,102 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func testRawQuery() string {
+ return url.Values(map[string][]string{"query": {
+ `SELECT (intDiv(toUInt32(time_column), 60) * 60) * 1000 AS t, countMerge(some_count) AS cnt, field1, field2 ` +
+ `FROM testdb.test_table WHERE time_column BETWEEN toDateTime(1516665600) AND toDateTime(1516687200) ` +
+ `AND date_column >= toDate(1516665600) AND toDate(1516687200) ` +
+ `AND field1 > 0 AND field2 = 'some_value' GROUP BY t, field1, field2 ORDER BY t, field1 FORMAT JSON`}}).Encode()
+}
+
+func testNonSelectQuery() string {
+ return url.Values(map[string][]string{"query": {
+ `UPDATE (intDiv(toUInt32(time_column), 60) * 60) * 1000 AS t`}}).Encode()
+ // not a real query, just something to trigger a non-select proxy-only request
+}
+
+func TestQueryHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "clickhouse", "/?"+testRawQuery(), "debug")
+ ctx := r.Context()
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, ok := client.config.Paths["/"]
+ if !ok {
+ t.Errorf("could not find path config named %s", "/")
+ }
+
+ client.QueryHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+
+ r, _ = http.NewRequest(http.MethodGet, ts.URL+"/?"+testNonSelectQuery(), nil)
+ w = httptest.NewRecorder()
+
+ r = r.WithContext(ctx)
+
+ client.QueryHandler(w, r)
+
+ resp = w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+
+}
diff --git a/internal/proxy/origins/clickhouse/model.go b/internal/proxy/origins/clickhouse/model.go
new file mode 100644
index 000000000..f3fb69c70
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/model.go
@@ -0,0 +1,337 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/pkg/sort/times"
+)
+
+const (
+ millisPerSecond = int64(time.Second / time.Millisecond)
+ nanosPerMillisecond = int64(time.Millisecond / time.Nanosecond)
+)
+
+func msToTime(ms string) (time.Time, error) {
+ msInt, err := strconv.ParseInt(ms, 10, 64)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Unix(msInt/millisPerSecond,
+ (msInt%millisPerSecond)*nanosPerMillisecond), nil
+}
+
+// FieldDefinition ...
+type FieldDefinition struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+}
+
+// ResponseValue ...
+type ResponseValue map[string]interface{}
+
+// DataSet ...
+type DataSet struct {
+ Metric map[string]interface{}
+ Points []Point
+}
+
+// Points ...
+type Points []Point
+
+// Point ...
+type Point struct {
+ Timestamp time.Time
+ Value float64
+}
+
+// Response is the JSON responose document structure for ClickHouse query results
+type Response struct {
+ Meta []FieldDefinition `json:"meta"`
+ RawData []ResponseValue `json:"data"`
+ Rows int `json:"rows"`
+ Order []string `json:"-"`
+ StepDuration time.Duration `json:"step,omitempty"`
+ ExtentList timeseries.ExtentList `json:"extents,omitempty"`
+}
+
+// ResultsEnvelope is the ClickHouse document structure optimized for time series manipulation
+type ResultsEnvelope struct {
+ Meta []FieldDefinition `json:"meta"`
+ Data map[string]*DataSet `json:"data"`
+ StepDuration time.Duration `json:"step,omitempty"`
+ ExtentList timeseries.ExtentList `json:"extents,omitempty"`
+ Serializers map[string]func(interface{}) `json:"-"`
+ SeriesOrder []string `json:"series_order,omitempty"`
+
+ timestamps map[time.Time]bool // tracks unique timestamps in the matrix data
+ tslist times.Times
+ isSorted bool // tracks if the matrix data is currently sorted
+ isCounted bool // tracks if timestamps slice is up-to-date
+}
+
+// MarshalTimeseries converts a Timeseries into a JSON blob
+func (c *Client) MarshalTimeseries(ts timeseries.Timeseries) ([]byte, error) {
+ // Marshal the Envelope back to a json object for Cache Storage
+ return json.Marshal(ts.(*ResultsEnvelope))
+}
+
+// UnmarshalTimeseries converts a JSON blob into a Timeseries
+func (c *Client) UnmarshalTimeseries(data []byte) (timeseries.Timeseries, error) {
+ re := &ResultsEnvelope{}
+ err := json.Unmarshal(data, re)
+ return re, err
+}
+
+// Parts ...
+func (rv ResponseValue) Parts(timeKey, valKey string) (string, time.Time, float64, ResponseValue) {
+
+ if len(rv) < 3 {
+ return noParts()
+ }
+
+ labels := make([]string, 0, len(rv)-2)
+ var t time.Time
+ var val float64
+ var err error
+
+ meta := make(ResponseValue)
+
+ for k, v := range rv {
+ switch k {
+ case timeKey:
+ t, err = msToTime(v.(string))
+ if err != nil {
+ return noParts()
+ }
+ case valKey:
+ if av, ok := v.(float64); ok {
+ val = av
+ continue
+ }
+ val, err = strconv.ParseFloat(v.(string), 64)
+ if err != nil {
+ return noParts()
+ }
+ default:
+ meta[k] = v
+ labels = append(labels, fmt.Sprintf("%s=%v", k, v))
+ }
+ }
+ sort.Strings(labels)
+ return fmt.Sprintf("{%s}", strings.Join(labels, ";")), t, val, meta
+}
+
+func noParts() (string, time.Time, float64, ResponseValue) {
+ return "{}", time.Time{}, 0.0, ResponseValue{}
+}
+
+// MarshalJSON ...
+func (re ResultsEnvelope) MarshalJSON() ([]byte, error) {
+
+ if len(re.Meta) < 2 {
+ return nil, fmt.Errorf("Must have at least two fields; only have %d", len(re.Meta))
+ }
+
+ var mpl, fl int
+ for _, v := range re.Data {
+ lp := len(v.Points)
+ fl += lp
+ if mpl < lp {
+ mpl = lp
+ }
+ }
+
+ rsp := &Response{
+ Meta: re.Meta,
+ RawData: make([]ResponseValue, 0, fl),
+ Rows: re.ValueCount(),
+ StepDuration: re.StepDuration,
+ ExtentList: re.ExtentList,
+ }
+
+ rsp.Order = make([]string, 0, len(re.Meta))
+ for _, k := range re.Meta {
+ rsp.Order = append(rsp.Order, k.Name)
+ }
+
+ // Assume the first item in the meta array is the time, and the second is the value
+ timestampFieldName := rsp.Order[0]
+ valueFieldName := rsp.Order[1]
+
+ tm := make(map[time.Time][]ResponseValue)
+ tl := make(times.Times, 0, mpl)
+
+ l := len(re.Data)
+
+ prepareMarshalledPoints := func(ds *DataSet) {
+
+ var ok bool
+ var t []ResponseValue
+
+ for _, p := range ds.Points {
+
+ t, ok = tm[p.Timestamp]
+ if !ok {
+ tl = append(tl, p.Timestamp)
+ t = make([]ResponseValue, 0, l)
+ }
+
+ r := ResponseValue{
+ timestampFieldName: strconv.FormatInt(p.Timestamp.UnixNano()/int64(time.Millisecond), 10),
+ valueFieldName: strconv.FormatFloat(p.Value, 'f', -1, 64),
+ }
+ for k2, v2 := range ds.Metric {
+ r[k2] = v2
+ }
+
+ t = append(t, r)
+ tm[p.Timestamp] = t
+ }
+ }
+
+ for _, key := range re.SeriesOrder {
+ if ds, ok := re.Data[key]; ok {
+ prepareMarshalledPoints(ds)
+ }
+ }
+
+ sort.Sort(tl)
+
+ for _, t := range tl {
+ rsp.RawData = append(rsp.RawData, tm[t]...)
+ }
+
+ bytes, err := json.Marshal(rsp)
+ if err != nil {
+ return nil, err
+ }
+
+ return bytes, nil
+}
+
+// MarshalJSON ...
+func (rsp *Response) MarshalJSON() ([]byte, error) {
+
+ buf := &bytes.Buffer{}
+ buf.WriteString(`{"meta":`)
+ meta, _ := json.Marshal(rsp.Meta)
+ buf.Write(meta)
+ buf.WriteString(`,"data":[`)
+ d := make([]string, 0, len(rsp.RawData))
+ for _, rd := range rsp.RawData {
+ d = append(d, string(rd.ToJSON(rsp.Order)))
+ }
+ buf.WriteString(strings.Join(d, ",") + "]")
+ buf.WriteString(fmt.Sprintf(`,"rows": %d`, rsp.Rows))
+
+ if rsp.ExtentList != nil && len(rsp.ExtentList) > 0 {
+ el, _ := json.Marshal(rsp.ExtentList)
+ buf.WriteString(fmt.Sprintf(`,"extents": %s`, string(el)))
+ }
+
+ buf.WriteString("}")
+
+ b := buf.Bytes()
+
+ return b, nil
+}
+
+// ToJSON ...
+func (rv ResponseValue) ToJSON(order []string) []byte {
+ buf := &bytes.Buffer{}
+ buf.WriteString("{")
+ lines := make([]string, 0, len(rv))
+ for _, k := range order {
+ if v, ok := rv[k]; ok {
+
+ // cleanup here
+ j, err := json.Marshal(v)
+ if err != nil {
+ continue
+ }
+ lines = append(lines, fmt.Sprintf(`"%s":%s`, k, string(j)))
+ }
+ }
+ buf.WriteString(strings.Join(lines, ",") + "}")
+ return buf.Bytes()
+}
+
+// UnmarshalJSON ...
+func (re *ResultsEnvelope) UnmarshalJSON(b []byte) error {
+
+ response := Response{}
+ err := json.Unmarshal(b, &response)
+ if err != nil {
+ return err
+ }
+
+ if len(response.Meta) < 2 {
+ return fmt.Errorf("Must have at least two fields; only have %d", len(response.Meta))
+ }
+
+ re.Meta = response.Meta
+ re.ExtentList = response.ExtentList
+ re.StepDuration = response.StepDuration
+ re.SeriesOrder = make([]string, 0)
+
+ // Assume the first item in the meta array is the time field, and the second is the value field
+ timestampFieldName := response.Meta[0].Name
+ valueFieldName := response.Meta[1].Name
+
+ registeredMetrics := make(map[string]bool)
+
+ re.Data = make(map[string]*DataSet)
+ l := len(response.RawData)
+ for _, v := range response.RawData {
+ metric, ts, val, meta := v.Parts(timestampFieldName, valueFieldName)
+ if _, ok := registeredMetrics[metric]; !ok {
+ registeredMetrics[metric] = true
+ re.SeriesOrder = append(re.SeriesOrder, metric)
+ }
+ if !ts.IsZero() {
+ a, ok := re.Data[metric]
+ if !ok {
+ a = &DataSet{Metric: meta, Points: make([]Point, 0, l)}
+ }
+ a.Points = append(a.Points, Point{Timestamp: ts, Value: val})
+ re.Data[metric] = a
+ }
+ }
+
+ return nil
+}
+
+// Len returns the length of a slice of time series data points
+func (p Points) Len() int {
+ return len(p)
+}
+
+// Less returns true if i comes before j
+func (p Points) Less(i, j int) bool {
+ return p[i].Timestamp.Before(p[j].Timestamp)
+}
+
+// Swap modifies a slice of time series data points by swapping the values in indexes i and j
+func (p Points) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
diff --git a/internal/proxy/origins/clickhouse/model_test.go b/internal/proxy/origins/clickhouse/model_test.go
new file mode 100644
index 000000000..f94baa10b
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/model_test.go
@@ -0,0 +1,333 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "encoding/json"
+ "reflect"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+func TestParts(t *testing.T) {
+
+ rv1 := ResponseValue{
+ "t": "1557766080000",
+ "cnt": "27",
+ "meta1": 200,
+ "meta2": "value3",
+ }
+
+ metric, ts, val, _ := rv1.Parts("t", "cnt")
+
+ expectedTs := time.Unix(1557766080, 0)
+ expectedMetric := "{meta1=200;meta2=value3}"
+ var expectedValue float64 = 27
+
+ if ts != expectedTs {
+ t.Errorf("expected %d got %d", expectedTs.Unix(), ts.Unix())
+ }
+
+ if metric != expectedMetric {
+ t.Errorf("expected %s got %s", expectedMetric, metric)
+ }
+
+ if val != expectedValue {
+ t.Errorf("expected %f got %f", expectedValue, val)
+ }
+
+ rv2 := ResponseValue{
+ "t": "1557766080000",
+ "cnt": "27",
+ }
+
+ metric, _, _, _ = rv2.Parts("t", "cnt")
+ if metric != "{}" {
+ t.Errorf("expected '{}' got %s", metric)
+ }
+
+ rv3 := ResponseValue{
+ "t": "A557766080000",
+ "cnt": "27",
+ "meta1": 200,
+ }
+
+ metric, _, _, _ = rv3.Parts("t", "cnt")
+ if metric != "{}" {
+ t.Errorf("expected '{}' got %s", metric)
+ }
+
+ rv4 := ResponseValue{
+ "t": "1557766080000",
+ "cnt": "2a7",
+ "meta1": 200,
+ }
+
+ metric, _, _, _ = rv4.Parts("t", "cnt")
+ if metric != "{}" {
+ t.Errorf("expected '{}' got %s", metric)
+ }
+
+ rv5 := ResponseValue{
+ "t": "1557766080000",
+ "cnt": 27.5,
+ "meta1": 200,
+ }
+
+ metric, _, _, _ = rv5.Parts("t", "cnt")
+ if metric != "{meta1=200}" {
+ t.Errorf("expected '{meta1=200}' got %s", metric)
+ }
+
+}
+
+var testJSON1 = []byte(`{"meta":[{"name":"t","type":"UInt64"},{"name":"cnt","type":"UInt64"},{"name":"meta1","type":"UInt16"},{"name":"meta2","type":"String"}],"data":[{"t":"1557766080000","cnt":"12648509","meta1":200,"meta2":"value2"},{"t":"1557766080000","cnt":"10260032","meta1":200,"meta2":"value3"},{"t":"1557766080000","cnt":"1","meta1":206,"meta2":"value3"}],"rows":3}`)
+var testJSON2 = []byte(`{"meta":[{"name":"t"}],"data":[{"t":"1557766080000","cnt":"12648509","meta1":200,"meta2":"value2"},{"t":"1557766080000","cnt":"10260032","meta1":200,"meta2":"value3"},{"t":"1557766080000","cnt":"1","meta1":206,"meta2":"value3"}],"rows":3}`) // should generate error
+
+var testRE1 = &ResultsEnvelope{
+ Meta: []FieldDefinition{
+ {
+ Name: "t",
+ Type: "UInt64",
+ },
+ {
+ Name: "cnt",
+ Type: "UInt64",
+ },
+ {
+ Name: "meta1",
+ Type: "UInt16",
+ },
+ {
+ Name: "meta2",
+ Type: "String",
+ },
+ },
+
+ SeriesOrder: []string{"1", "2", "3"},
+
+ Data: map[string]*DataSet{
+ "1": {
+ Metric: map[string]interface{}{
+ "meta1": 200,
+ "meta2": "value2",
+ },
+ Points: []Point{
+ {
+ Timestamp: time.Unix(1557766080, 0),
+ Value: 12648509,
+ },
+ },
+ },
+ "2": {
+ Metric: map[string]interface{}{
+ "meta1": 200,
+ "meta2": "value3",
+ },
+ Points: []Point{
+ {
+ Timestamp: time.Unix(1557766080, 0),
+ Value: 10260032,
+ },
+ },
+ },
+ "3": {
+ Metric: map[string]interface{}{
+ "meta1": 206,
+ "meta2": "value3",
+ },
+ Points: []Point{
+ {
+ Timestamp: time.Unix(1557766080, 0),
+ Value: 1,
+ },
+ },
+ },
+ },
+}
+
+func TestREMarshalJSON(t *testing.T) {
+
+ expectedLen := len(testJSON1)
+
+ re := ResultsEnvelope{}
+ err := re.UnmarshalJSON(testJSON1)
+ if err != nil {
+ t.Error(err)
+ }
+
+ bytes, err := re.MarshalJSON()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(bytes) != expectedLen {
+ t.Errorf("expected %d got %d", expectedLen, len(bytes))
+ }
+
+ re.Meta = re.Meta[:0]
+ _, err = re.MarshalJSON()
+ if err == nil {
+ t.Errorf("expected error: %s", `Must have at least two fields; only have 0`)
+ }
+
+}
+
+func TestRSPMarshalJSON(t *testing.T) {
+
+ rsp := &Response{ExtentList: timeseries.ExtentList{{Start: time.Unix(0, 0), End: time.Unix(5, 0)}}}
+
+ bytes, err := rsp.MarshalJSON()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ rsp1 := &Response{}
+ json.Unmarshal(bytes, rsp1)
+
+ if rsp.ExtentList[0].Start.Unix() != rsp1.ExtentList[0].Start.Unix() {
+ t.Errorf("expected %d got %d", rsp.ExtentList[0].Start.Unix(), rsp.ExtentList[0].Start.Unix())
+ }
+
+ if rsp.ExtentList[0].End.Unix() != rsp1.ExtentList[0].End.Unix() {
+ t.Errorf("expected %d got %d", rsp.ExtentList[0].End.Unix(), rsp.ExtentList[0].End.Unix())
+ }
+
+}
+
+func TestUnmarshalTimeseries(t *testing.T) {
+
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries(testJSON1)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ re := ts.(*ResultsEnvelope)
+
+ if len(re.Meta) != 4 {
+ t.Errorf(`expected 4. got %d`, len(re.Meta))
+ return
+ }
+
+ if len(re.Data) != 3 {
+ t.Errorf(`expected 3. got %d`, len(re.Data))
+ return
+ }
+
+ _, err = client.UnmarshalTimeseries(nil)
+ if err == nil {
+ t.Errorf("expected error: %s", `unexpected end of JSON input`)
+ return
+ }
+
+ _, err = client.UnmarshalTimeseries(testJSON2)
+ if err == nil {
+ t.Errorf("expected error: %s", `Must have at least two fields; only have 1`)
+ return
+ }
+
+}
+
+func TestMarshalTimeseries(t *testing.T) {
+ expectedLen := len(testJSON1)
+ client := &Client{}
+ bytes, err := client.MarshalTimeseries(testRE1)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if !reflect.DeepEqual(testJSON1, bytes) {
+ t.Errorf("expected %d got %d", expectedLen, len(bytes))
+ }
+}
+
+func TestUnmarshalJSON(t *testing.T) {
+
+ re := ResultsEnvelope{}
+ err := re.UnmarshalJSON(testJSON1)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ if len(re.Meta) != 4 {
+ t.Errorf(`expected 4. got %d`, len(re.Meta))
+ return
+ }
+
+ m := re.Meta[2]
+ if m.Name != "meta1" {
+ t.Errorf(`expected meta1 found %s`, m.Name)
+ return
+ }
+
+ if len(re.Data) != 3 {
+ t.Errorf(`expected 3. got %d`, len(re.Data))
+ return
+ }
+
+ key := "{meta1=206;meta2=value3}"
+ v, ok := re.Data[key]
+ if !ok {
+ t.Errorf(`expected to find key %s`, key)
+ return
+ }
+
+ if len(v.Points) != 1 {
+ t.Errorf(`expected 1 got %d`, len(v.Points))
+ return
+ }
+
+ if v.Points[0].Value != 1 {
+ t.Errorf(`expected 1 got %f`, v.Points[0].Value)
+ return
+ }
+
+ err = re.UnmarshalJSON(nil)
+ if err == nil {
+ t.Errorf("expected error: %s", `unexpected end of JSON input`)
+ return
+ }
+
+ err = re.UnmarshalJSON(testJSON2)
+ if err == nil {
+ t.Errorf("expected error: %s", `Must have at least two fields; only have 1`)
+ return
+ }
+
+}
+
+func TestMSToTime(t *testing.T) {
+ _, err := msToTime("bad")
+ if err == nil {
+ t.Errorf("expected error for invalid syntax")
+ }
+}
+
+func TestSortPoints(t *testing.T) {
+
+ p := Points{{Timestamp: time.Unix(1, 0), Value: 12}, {Timestamp: time.Unix(0, 0), Value: 13}, {Timestamp: time.Unix(2, 0), Value: 22}}
+ sort.Sort(p)
+
+ if p[0].Timestamp.Unix() != 0 {
+ t.Errorf("expected %d got %d", 0, p[0].Timestamp.Unix())
+ }
+
+}
diff --git a/internal/proxy/origins/clickhouse/routes.go b/internal/proxy/origins/clickhouse/routes.go
new file mode 100644
index 000000000..a3d99545f
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/routes.go
@@ -0,0 +1,54 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/config"
+)
+
+func (c *Client) registerHandlers() {
+ c.handlersRegistered = true
+ c.handlers = make(map[string]http.Handler)
+ // This is the registry of handlers that Trickster supports for InfluxDB,
+ // and are able to be referenced by name (map key) in Config Files
+ c.handlers["health"] = http.HandlerFunc(c.HealthHandler)
+ c.handlers["query"] = http.HandlerFunc(c.QueryHandler)
+ c.handlers["proxy"] = http.HandlerFunc(c.ProxyHandler)
+}
+
+// Handlers returns a map of the HTTP Handlers the client has registered
+func (c *Client) Handlers() map[string]http.Handler {
+ if !c.handlersRegistered {
+ c.registerHandlers()
+ }
+ return c.handlers
+}
+
+// DefaultPathConfigs returns the default PathConfigs for the given OriginType
+func (c *Client) DefaultPathConfigs(oc *config.OriginConfig) map[string]*config.PathConfig {
+ paths := map[string]*config.PathConfig{
+ "/": {
+ Path: "/",
+ HandlerName: "query",
+ Methods: []string{http.MethodGet, http.MethodPost},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ CacheKeyParams: []string{"query", "database"},
+ OriginConfig: oc,
+ },
+ }
+ return paths
+}
diff --git a/internal/proxy/origins/clickhouse/routes_test.go b/internal/proxy/origins/clickhouse/routes_test.go
new file mode 100644
index 000000000..85b265b54
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/routes_test.go
@@ -0,0 +1,60 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestRegisterHandlers(t *testing.T) {
+ c := &Client{}
+ c.registerHandlers()
+ if _, ok := c.handlers["query"]; !ok {
+ t.Errorf("expected to find handler named: %s", "query")
+ }
+}
+
+func TestHandlers(t *testing.T) {
+ c := &Client{}
+ m := c.Handlers()
+ if _, ok := m["query"]; !ok {
+ t.Errorf("expected to find handler named: %s", "query")
+ }
+}
+
+func TestDefaultPathConfigs(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, _, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 204, "", nil, "clickhouse", "/", "debug")
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if _, ok := client.config.Paths["/"]; !ok {
+ t.Errorf("expected to find path named: %s", "/")
+ }
+
+ const expectedLen = 1
+ if len(client.config.Paths) != expectedLen {
+ t.Errorf("expected %d got %d", expectedLen, len(client.config.Paths))
+ }
+
+}
diff --git a/internal/proxy/origins/clickhouse/series.go b/internal/proxy/origins/clickhouse/series.go
new file mode 100644
index 000000000..856d73ab5
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/series.go
@@ -0,0 +1,509 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/pkg/sort/times"
+)
+
+// Step returns the step for the Timeseries
+func (re *ResultsEnvelope) Step() time.Duration {
+ return re.StepDuration
+}
+
+// SetStep sets the step for the Timeseries
+func (re *ResultsEnvelope) SetStep(step time.Duration) {
+ re.StepDuration = step
+}
+
+// Merge merges the provided Timeseries list into the base Timeseries (in the order provided) and optionally sorts the merged Timeseries
+func (re *ResultsEnvelope) Merge(sort bool, collection ...timeseries.Timeseries) {
+
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+
+ for _, ts := range collection {
+ if ts != nil {
+ re2 := ts.(*ResultsEnvelope)
+ for k, s := range re2.Data {
+ wg.Add(1)
+ go func(l string, d *DataSet) {
+ mtx.Lock()
+ if _, ok := re.Data[l]; !ok {
+ re.Data[l] = d
+ mtx.Unlock()
+ wg.Done()
+ return
+ }
+ re.Data[l].Points = append(re.Data[l].Points, d.Points...)
+ mtx.Unlock()
+ wg.Done()
+ }(k, s)
+ }
+ wg.Wait()
+ re.mergeSeriesOrder(re2.SeriesOrder)
+ re.ExtentList = append(re.ExtentList, re2.ExtentList...)
+ }
+ }
+
+ re.ExtentList = re.ExtentList.Compress(re.StepDuration)
+ re.isSorted = false
+ re.isCounted = false
+ if sort {
+ re.Sort()
+ }
+}
+
+func (re *ResultsEnvelope) mergeSeriesOrder(so2 []string) {
+
+ if len(so2) == 0 {
+ return
+ }
+
+ if len(re.SeriesOrder) == 0 {
+ re.SeriesOrder = so2
+ return
+ }
+
+ so1 := make([]string, len(re.SeriesOrder), len(re.SeriesOrder)+len(so2))
+ copy(so1, re.SeriesOrder)
+ adds := make([]string, 0, len(so2))
+ added := make(map[string]bool)
+
+ for _, n := range so2 {
+ if _, ok := re.Data[n]; !ok {
+ if _, ok2 := added[n]; !ok2 {
+ adds = append(adds, n)
+ added[n] = true
+ }
+ continue
+ }
+
+ if len(adds) > 0 {
+ for i, v := range so1 {
+ if v == n {
+ adds = append(adds, so1[i:]...)
+ so1 = append(so1[0:i], adds...)
+ }
+ }
+ adds = adds[:0]
+ }
+ }
+
+ if len(adds) > 0 {
+ so1 = append(so1, adds...)
+ }
+
+ re.SeriesOrder = so1
+
+}
+
+// Clone returns a perfect copy of the base Timeseries
+func (re *ResultsEnvelope) Clone() timeseries.Timeseries {
+ re2 := &ResultsEnvelope{
+ isCounted: re.isCounted,
+ isSorted: re.isSorted,
+ StepDuration: re.StepDuration,
+ }
+
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+
+ if re.SeriesOrder != nil {
+ re2.SeriesOrder = make([]string, len(re.SeriesOrder))
+ copy(re2.SeriesOrder, re.SeriesOrder)
+ }
+
+ if re.ExtentList != nil {
+ re2.ExtentList = make(timeseries.ExtentList, len(re.ExtentList))
+ copy(re2.ExtentList, re.ExtentList)
+ }
+
+ if re.tslist != nil {
+ re2.tslist = make(times.Times, len(re.tslist))
+ copy(re2.tslist, re.tslist)
+ }
+
+ if re.Meta != nil {
+ re2.Meta = make([]FieldDefinition, len(re.Meta))
+ copy(re2.Meta, re.Meta)
+ }
+
+ if re.Serializers != nil {
+ re2.Serializers = make(map[string]func(interface{}))
+ wg.Add(1)
+ go func() {
+ for k, s := range re.Serializers {
+ re2.Serializers[k] = s
+ }
+ wg.Done()
+ }()
+ }
+
+ if re.timestamps != nil {
+ re2.timestamps = make(map[time.Time]bool)
+ for k, v := range re.timestamps {
+ wg.Add(1)
+ go func(t time.Time, b bool) {
+ mtx.Lock()
+ re2.timestamps[t] = b
+ mtx.Unlock()
+ wg.Done()
+ }(k, v)
+ }
+ }
+
+ if re.Data != nil {
+ re2.Data = make(map[string]*DataSet)
+ wg.Add(1)
+ go func() {
+ for k, ds := range re.Data {
+ ds2 := &DataSet{Metric: make(map[string]interface{})}
+ for l, v := range ds.Metric {
+ ds2.Metric[l] = v
+ }
+ ds2.Points = ds.Points[:]
+ re2.Data[k] = ds2
+ }
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+
+ return re2
+}
+
+// CropToSize reduces the number of elements in the Timeseries to the provided count, by evicting elements
+// using a least-recently-used methodology. Any timestamps newer than the provided time are removed before
+// sizing, in order to support backfill tolerance. The provided extent will be marked as used during crop.
+func (re *ResultsEnvelope) CropToSize(sz int, t time.Time, lur timeseries.Extent) {
+ re.isCounted = false
+ re.isSorted = false
+ x := len(re.ExtentList)
+ // The Series has no extents, so no need to do anything
+ if x < 1 {
+ re.Data = make(map[string]*DataSet)
+ re.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // Crop to the Backfill Tolerance Value if needed
+ if re.ExtentList[x-1].End.After(t) {
+ re.CropToRange(timeseries.Extent{Start: re.ExtentList[0].Start, End: t})
+ }
+
+ tc := re.TimestampCount()
+ el := timeseries.ExtentListLRU(re.ExtentList).UpdateLastUsed(lur, re.StepDuration)
+ sort.Sort(el)
+ if len(re.Data) == 0 || tc <= sz {
+ return
+ }
+
+ rc := tc - sz // # of required timestamps we must delete to meet the rentention policy
+ removals := make(map[time.Time]bool)
+ done := false
+ var ok bool
+
+ for _, x := range el {
+ for ts := x.Start; !x.End.Before(ts) && !done; ts = ts.Add(re.StepDuration) {
+ if _, ok = re.timestamps[ts]; ok {
+ removals[ts] = true
+ done = len(removals) >= rc
+ }
+ }
+ if done {
+ break
+ }
+ }
+
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+
+ for _, s := range re.Data {
+ tmp := s.Points[:0]
+ for _, r := range s.Points {
+ wg.Add(1)
+ go func(p Point) {
+ mtx.Lock()
+ if _, ok := removals[p.Timestamp]; !ok {
+ tmp = append(tmp, p)
+ }
+ mtx.Unlock()
+ wg.Done()
+ }(r)
+ }
+ wg.Wait()
+ s.Points = tmp
+ }
+
+ tl := times.FromMap(removals)
+ sort.Sort(tl)
+
+ for _, t := range tl {
+ for i, e := range el {
+ if e.StartsAt(t) {
+ el[i].Start = e.Start.Add(re.StepDuration)
+ }
+ }
+ }
+ wg.Wait()
+
+ re.ExtentList = timeseries.ExtentList(el).Compress(re.StepDuration)
+ re.Sort()
+}
+
+// CropToRange reduces the Timeseries down to timestamps contained within the provided Extents (inclusive).
+// CropToRange assumes the base Timeseries is already sorted, and will corrupt an unsorted Timeseries
+func (re *ResultsEnvelope) CropToRange(e timeseries.Extent) {
+ re.isCounted = false
+ x := len(re.ExtentList)
+ // The Series has no extents, so no need to do anything
+ if x < 1 {
+ re.Data = make(map[string]*DataSet)
+ re.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // if the extent of the series is entirely outside the extent of the crop range, return empty set and bail
+ if re.ExtentList.OutsideOf(e) {
+ re.Data = make(map[string]*DataSet)
+ re.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // if the series extent is entirely inside the extent of the crop range, simply adjust down its ExtentList
+ if re.ExtentList.InsideOf(e) {
+ if re.ValueCount() == 0 {
+ re.Data = make(map[string]*DataSet)
+ }
+ re.ExtentList = re.ExtentList.Crop(e)
+ return
+ }
+
+ if len(re.Data) == 0 {
+ re.ExtentList = re.ExtentList.Crop(e)
+ return
+ }
+
+ deletes := make(map[string]bool)
+
+ for i, s := range re.Data {
+ start := -1
+ end := -1
+ for j, val := range s.Points {
+ t := val.Timestamp
+ if t.Equal(e.End) {
+ // for cases where the first element is the only qualifying element,
+ // start must be incremented or an empty response is returned
+ if j == 0 || t.Equal(e.Start) || start == -1 {
+ start = j
+ }
+ end = j + 1
+ break
+ }
+ if t.After(e.End) {
+ end = j
+ break
+ }
+ if t.Before(e.Start) {
+ continue
+ }
+ if start == -1 && (t.Equal(e.Start) || (e.End.After(t) && t.After(e.Start))) {
+ start = j
+ }
+ }
+ if start != -1 && len(s.Points) > 0 {
+ if end == -1 {
+ end = len(s.Points)
+ }
+ re.Data[i].Points = s.Points[start:end]
+ } else {
+ deletes[i] = true
+ }
+ }
+
+ for i := range deletes {
+ delete(re.Data, i)
+ }
+
+ re.ExtentList = re.ExtentList.Crop(e)
+}
+
+// Sort sorts all Values in each Series chronologically by their timestamp
+func (re *ResultsEnvelope) Sort() {
+
+ if re.isSorted || len(re.Data) == 0 {
+ return
+ }
+
+ tsm := map[time.Time]bool{}
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+
+ for i, s := range re.Data {
+ m := make(map[time.Time]Point)
+ keys := make(times.Times, 0, len(s.Points))
+ for _, v := range s.Points {
+ wg.Add(1)
+ go func(sp Point) {
+ mtx.Lock()
+ if _, ok := m[sp.Timestamp]; !ok {
+ keys = append(keys, sp.Timestamp)
+ m[sp.Timestamp] = sp
+ }
+ tsm[sp.Timestamp] = true
+ mtx.Unlock()
+ wg.Done()
+ }(v)
+ }
+ wg.Wait()
+ sort.Sort(keys)
+ sm := make(Points, 0, len(keys))
+ for _, key := range keys {
+ sm = append(sm, m[key])
+ }
+ re.Data[i].Points = sm
+ }
+
+ sort.Sort(re.ExtentList)
+
+ re.timestamps = tsm
+ re.tslist = times.FromMap(tsm)
+ re.isCounted = true
+ re.isSorted = true
+}
+
+func (re *ResultsEnvelope) updateTimestamps() {
+
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+
+ if re.isCounted {
+ return
+ }
+ m := make(map[time.Time]bool)
+ for _, s := range re.Data {
+ for _, v := range s.Points {
+ wg.Add(1)
+ go func(t time.Time) {
+ mtx.Lock()
+ m[t] = true
+ mtx.Unlock()
+ wg.Done()
+ }(v.Timestamp)
+ }
+ }
+ wg.Wait()
+ re.timestamps = m
+ re.tslist = times.FromMap(m)
+ re.isCounted = true
+}
+
+// SetExtents overwrites a Timeseries's known extents with the provided extent list
+func (re *ResultsEnvelope) SetExtents(extents timeseries.ExtentList) {
+ re.isCounted = false
+ re.ExtentList = extents
+}
+
+// Extents returns the Timeseries's ExentList
+func (re *ResultsEnvelope) Extents() timeseries.ExtentList {
+ return re.ExtentList
+}
+
+// TimestampCount returns the number of unique timestamps across the timeseries
+func (re *ResultsEnvelope) TimestampCount() int {
+ re.updateTimestamps()
+ return len(re.timestamps)
+}
+
+// SeriesCount returns the number of individual Series in the Timeseries object
+func (re *ResultsEnvelope) SeriesCount() int {
+ return len(re.Data)
+}
+
+// ValueCount returns the count of all values across all Series in the Timeseries object
+func (re *ResultsEnvelope) ValueCount() int {
+ c := 0
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+ for i := range re.Data {
+ wg.Add(1)
+ go func(j int) {
+ mtx.Lock()
+ c += j
+ mtx.Unlock()
+ wg.Done()
+ }(len(re.Data[i].Points))
+ }
+ wg.Wait()
+ return c
+}
+
+// Size returns the approximate memory utilization in bytes of the timeseries
+func (re *ResultsEnvelope) Size() int {
+
+ var size int
+ wg := sync.WaitGroup{}
+
+ var a int
+ ma := sync.Mutex{}
+ for i := range re.Meta {
+ wg.Add(1)
+ go func(j int) {
+ ma.Lock()
+ a += len(re.Meta[j].Name) + len(re.Meta[j].Type)
+ ma.Unlock()
+ wg.Done()
+ }(i)
+ }
+
+ var b int
+ mb := sync.Mutex{}
+ for k, v := range re.Data {
+ b += len(k)
+ wg.Add(1)
+ go func(d *DataSet) {
+ mb.Lock()
+ b += len(d.Points) * 16
+ mb.Unlock()
+ wg.Done()
+ }(v)
+ }
+
+ var c int
+ mc := sync.Mutex{}
+ for _, s := range re.SeriesOrder {
+ wg.Add(1)
+ go func(t string) {
+ mc.Lock()
+ c += len(t)
+ mc.Unlock()
+ wg.Done()
+ }(s)
+ }
+
+ // ExtentList + StepDuration + Timestamps + Times + isCounted + isSorted
+ d := (len(re.ExtentList) * 24) + 8 + (len(re.timestamps) * 9) + (len(re.tslist) * 8) + 2
+
+ wg.Wait()
+ size = a + b + c + d
+ return size
+
+}
diff --git a/internal/proxy/origins/clickhouse/series_test.go b/internal/proxy/origins/clickhouse/series_test.go
new file mode 100644
index 000000000..3fe9c9fbe
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/series_test.go
@@ -0,0 +1,1505 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/pkg/sort/times"
+)
+
+func TestSetStep(t *testing.T) {
+ re := ResultsEnvelope{}
+ const step = time.Duration(300) * time.Minute
+ re.SetStep(step)
+ if re.StepDuration != step {
+ t.Errorf(`expected "%s". got "%s"`, testStep, re.StepDuration)
+ }
+}
+
+func TestStep(t *testing.T) {
+ re := ResultsEnvelope{}
+ const step = time.Duration(300) * time.Minute
+ re.SetStep(step)
+ if re.Step() != step {
+ t.Errorf(`expected "%s". got "%s"`, testStep, re.Step())
+ }
+}
+
+func TestMerge(t *testing.T) {
+ tests := []struct {
+ a, b, merged *ResultsEnvelope
+ }{
+ // Run 0: Series that adhere to rule
+ {
+ a: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(10, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10, 0), End: time.Unix(10, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ b: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(5, 0), Value: 1.5},
+ {Timestamp: time.Unix(15, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(5, 0), End: time.Unix(5, 0)},
+ timeseries.Extent{Start: time.Unix(15, 0), End: time.Unix(15, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ merged: &ResultsEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(5, 0), time.Unix(10, 0), time.Unix(15, 0)},
+ timestamps: map[time.Time]bool{time.Unix(5, 0): true, time.Unix(10, 0): true, time.Unix(15, 0): true},
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(5, 0), Value: 1.5},
+ {Timestamp: time.Unix(10, 0), Value: 1.5},
+ {Timestamp: time.Unix(15, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(5, 0), End: time.Unix(15, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ },
+ // Run 1: Empty second series
+ {
+ a: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(10000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ b: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{},
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ merged: &ResultsEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(10000, 0)},
+ timestamps: map[time.Time]bool{time.Unix(10000, 0): true},
+ Data: map[string]*DataSet{
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(10000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ },
+ // Run 2: second series has new metric
+ {
+ a: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(10000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ b: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(15000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ merged: &ResultsEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(10000, 0), time.Unix(15000, 0)},
+ timestamps: map[time.Time]bool{time.Unix(10000, 0): true, time.Unix(15000, 0): true},
+ Data: map[string]*DataSet{
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ },
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ },
+ // Run 3: merge one metric, one metric unchanged
+ {
+ a: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ },
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(10000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ b: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(15000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ merged: &ResultsEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(10000, 0), time.Unix(15000, 0)},
+ timestamps: map[time.Time]bool{time.Unix(10000, 0): true, time.Unix(15000, 0): true},
+ Data: map[string]*DataSet{
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ },
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ },
+ // Run 4: merge multiple extents
+ {
+ a: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ b: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(30000, 0), Value: 1.5},
+ {Timestamp: time.Unix(35000, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(30000, 0), Value: 1.5},
+ {Timestamp: time.Unix(35000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(30000, 0), End: time.Unix(35000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ merged: &ResultsEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(10000, 0), time.Unix(15000, 0), time.Unix(30000, 0), time.Unix(35000, 0)},
+ timestamps: map[time.Time]bool{time.Unix(10000, 0): true, time.Unix(15000, 0): true, time.Unix(30000, 0): true, time.Unix(35000, 0): true},
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ {Timestamp: time.Unix(30000, 0), Value: 1.5},
+ {Timestamp: time.Unix(35000, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ {Timestamp: time.Unix(30000, 0), Value: 1.5},
+ {Timestamp: time.Unix(35000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(15000, 0)},
+ timeseries.Extent{Start: time.Unix(30000, 0), End: time.Unix(35000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ },
+ //
+ //
+ // Run 5: merge with some overlapping extents
+ {
+ a: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ b: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ {Timestamp: time.Unix(20000, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ {Timestamp: time.Unix(20000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(15000, 0), End: time.Unix(20000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ merged: &ResultsEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(10000, 0), time.Unix(15000, 0), time.Unix(20000, 0)},
+ timestamps: map[time.Time]bool{time.Unix(10000, 0): true, time.Unix(15000, 0): true, time.Unix(20000, 0): true},
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ {Timestamp: time.Unix(20000, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(10000, 0), Value: 1.5},
+ {Timestamp: time.Unix(15000, 0), Value: 1.5},
+ {Timestamp: time.Unix(20000, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(20000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ },
+ }
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.a.Merge(true, test.b)
+ if !reflect.DeepEqual(test.merged, test.a) {
+ t.Errorf("mismatch\n actual=%v\nexpected=%v", test.a, test.merged)
+ }
+ })
+ }
+}
+
+func TestCropToRange(t *testing.T) {
+ tests := []struct {
+ before, after *ResultsEnvelope
+ extent timeseries.Extent
+ }{
+ // Run 0: Case where the very first element in the matrix has a timestamp matching the extent's end
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1644004600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1644004600, 0), End: time.Unix(1644004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1644004600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1644004600, 0), End: time.Unix(1644004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1644004600, 0),
+ },
+ },
+ // Run 1: Case where we trim nothing
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1544004600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1544004600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1644004600, 0),
+ },
+ },
+ // Run 2: Case where we trim everything (all data is too late)
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(1544004600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{},
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(10, 0),
+ },
+ },
+ // Run 3: Case where we trim everything (all data is too early)
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(100, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{},
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(10000, 0),
+ End: time.Unix(20000, 0),
+ },
+ },
+ // Run 4: Case where we trim some off the beginning
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "d": {
+ Metric: map[string]interface{}{"__name__": "d"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "d": {
+ Metric: map[string]interface{}{"__name__": "d"},
+ Points: []Point{
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(300, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(300, 0),
+ End: time.Unix(400, 0),
+ },
+ },
+ // Run 5: Case where we trim some off the ends
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "e": {
+ Metric: map[string]interface{}{"__name__": "e"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "e": {
+ Metric: map[string]interface{}{"__name__": "e"},
+ Points: []Point{
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(200, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(200, 0),
+ End: time.Unix(200, 0),
+ },
+ },
+ // Run 6: Case where the last datapoint is on the Crop extent
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "f": {
+ Metric: map[string]interface{}{"__name__": "f"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "f": {
+ Metric: map[string]interface{}{"__name__": "f"},
+ Points: []Point{
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(200, 0),
+ End: time.Unix(300, 0),
+ },
+ },
+ // Run 7: Case where we aren't given any datapoints
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "g": {
+ Metric: map[string]interface{}{"__name__": "g"},
+ Points: []Point{},
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{},
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(200, 0),
+ End: time.Unix(300, 0),
+ },
+ },
+
+ // Run 9: Case where after cropping, an inner series is empty/removed
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ },
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(400, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(400, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ // Run 10: Case where after cropping, the front series is empty/removed
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(400, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(400, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ // Run 11: Case where after cropping, the back series is empty/removed
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(100, 0), Value: 1.5},
+ {Timestamp: time.Unix(200, 0), Value: 1.5},
+ {Timestamp: time.Unix(300, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(400, 0), Value: 1.5},
+ {Timestamp: time.Unix(500, 0), Value: 1.5},
+ {Timestamp: time.Unix(600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(400, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(400, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ // Run 12: Case where we short circuit since the dataset is already entirely inside the crop range
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{},
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{},
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{},
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{},
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(100, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ // Run 13: Case where we short circuit since the dataset is empty
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{},
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{},
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(300, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(300, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.before.CropToRange(test.extent)
+ if !reflect.DeepEqual(test.before, test.after) {
+ t.Errorf("mismatch\nexpected=%v\ngot=%v", test.after, test.before)
+ }
+ })
+ }
+}
+
+const testStep = time.Duration(10) * time.Second
+
+func TestCropToSize(t *testing.T) {
+
+ now := time.Now().Truncate(testStep)
+
+ tests := []struct {
+ before, after *ResultsEnvelope
+ size int
+ bft time.Time
+ extent timeseries.Extent
+ }{
+ // case 0: where we already have the number of timestamps we are cropping to
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1444004600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1444004600, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004600, 0)},
+ },
+ StepDuration: testStep,
+ timestamps: map[time.Time]bool{time.Unix(1444004600, 0): true},
+ tslist: times.Times{time.Unix(1444004600, 0)},
+ isCounted: true,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1444004600, 0),
+ },
+ size: 1,
+ bft: now,
+ },
+
+ // case 1
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1444004600, 0), Value: 1.5},
+ {Timestamp: time.Unix(1444004610, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004610, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1444004610, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004610, 0), End: time.Unix(1444004610, 0)},
+ },
+ StepDuration: testStep,
+ timestamps: map[time.Time]bool{time.Unix(1444004610, 0): true},
+ tslist: times.Times{time.Unix(1444004610, 0)},
+ isCounted: true,
+ isSorted: true,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1444004610, 0),
+ },
+ size: 1,
+ bft: now,
+ },
+
+ // case 2 - empty extent list
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{},
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: testStep,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{},
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{},
+ size: 1,
+ bft: now,
+ },
+
+ // case 3 - backfill tolerance
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1444004610, 0), Value: 1.5},
+ {Timestamp: now, Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004610, 0), End: now},
+ },
+ StepDuration: testStep,
+ },
+ after: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1444004610, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004610, 0), End: now.Add(-5 * time.Minute)},
+ },
+ StepDuration: testStep,
+ timestamps: map[time.Time]bool{time.Unix(1444004610, 0): true},
+ tslist: times.Times{time.Unix(1444004610, 0)},
+ isCounted: true,
+ isSorted: false,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: now,
+ },
+ size: 2,
+ bft: now.Add(-5 * time.Minute),
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.before.CropToSize(test.size, test.bft, test.extent)
+
+ for i := range test.before.ExtentList {
+ test.before.ExtentList[i].LastUsed = time.Time{}
+ }
+
+ if !reflect.DeepEqual(test.before, test.after) {
+ t.Errorf("mismatch\nexpected=%v\n got=%v", test.after, test.before)
+ }
+ })
+ }
+}
+
+func TestUpdateTimestamps(t *testing.T) {
+
+ // test edge condition here (core functionality is tested across this file)
+ re := ResultsEnvelope{isCounted: true}
+ re.updateTimestamps()
+ if re.timestamps != nil {
+ t.Errorf("expected nil map, got size %d", len(re.timestamps))
+ }
+
+}
+
+func TestClone(t *testing.T) {
+
+ tests := []struct {
+ before *ResultsEnvelope
+ }{
+ // Run 0
+ {
+ before: &ResultsEnvelope{
+ Meta: []FieldDefinition{{Name: "1", Type: "string"}},
+ Serializers: map[string]func(interface{}){"test": nil},
+ tslist: times.Times{time.Unix(1644001200, 0)},
+ timestamps: map[time.Time]bool{time.Unix(1644001200, 0): true},
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1644001200, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1644001200, 0), End: time.Unix(1644001200, 0)},
+ },
+ StepDuration: time.Duration(3600) * time.Second,
+ SeriesOrder: []string{"a"},
+ },
+ },
+
+ // Run 1
+ {
+ before: &ResultsEnvelope{
+ tslist: times.Times{time.Unix(1644001200, 0), time.Unix(1644004800, 0)},
+ timestamps: map[time.Time]bool{time.Unix(1644001200, 0): true, time.Unix(1644004800, 0): true},
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1644001200, 0), Value: 1.5},
+ },
+ },
+
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(1644001200, 0), Value: 1.5},
+ {Timestamp: time.Unix(1644004800, 0), Value: 1.5},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1644001200, 0), End: time.Unix(1644004800, 0)},
+ },
+ StepDuration: time.Duration(3600) * time.Second,
+ SeriesOrder: []string{"a", "b"},
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ after := test.before.Clone()
+ if !reflect.DeepEqual(test.before, after) {
+ t.Errorf("mismatch\nexpected %v\nactual %v", test.before, after)
+ }
+ })
+ }
+
+}
+
+func TestSort(t *testing.T) {
+ tests := []struct {
+ before, after *ResultsEnvelope
+ extent timeseries.Extent
+ }{
+ // Case where we trim nothing
+ {
+ before: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1544004200, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004600, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004800, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004000, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004000, 0), Value: 1.5}, // sort should also dupe kill
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(1544004600, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004200, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004000, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004800, 0), Value: 1.5},
+ },
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(1544004800, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004200, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004000, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004600, 0), Value: 1.5},
+ },
+ },
+ },
+ },
+ after: &ResultsEnvelope{
+ isSorted: true,
+ isCounted: true,
+ tslist: []time.Time{time.Unix(1544004000, 0), time.Unix(1544004200, 0), time.Unix(1544004600, 0), time.Unix(1544004800, 0)},
+ timestamps: map[time.Time]bool{time.Unix(1544004000, 0): true, time.Unix(1544004200, 0): true,
+ time.Unix(1544004600, 0): true, time.Unix(1544004800, 0): true},
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(1544004000, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004200, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004600, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004800, 0), Value: 1.5},
+ },
+ },
+ "b": {
+ Metric: map[string]interface{}{"__name__": "b"},
+ Points: []Point{
+ {Timestamp: time.Unix(1544004000, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004200, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004600, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004800, 0), Value: 1.5},
+ },
+ },
+ "c": {
+ Metric: map[string]interface{}{"__name__": "c"},
+ Points: []Point{
+ {Timestamp: time.Unix(1544004000, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004200, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004600, 0), Value: 1.5},
+ {Timestamp: time.Unix(1544004800, 0), Value: 1.5},
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.before.isSorted = false
+ test.before.Sort()
+ if !reflect.DeepEqual(test.before, test.after) {
+ t.Errorf("mismatch\nexpected=%v\n actual=%v", test.after, test.before)
+ }
+ // test isSorted short circuit
+ test.before.Sort()
+ if !reflect.DeepEqual(test.before, test.after) {
+ t.Errorf("mismatch\nexpected=%v\n actual=%v", test.after, test.before)
+ }
+ })
+ }
+}
+
+func TestSetExtents(t *testing.T) {
+ re := &ResultsEnvelope{}
+ ex := timeseries.ExtentList{timeseries.Extent{Start: time.Time{}, End: time.Time{}}}
+ re.SetExtents(ex)
+ if len(re.ExtentList) != 1 {
+ t.Errorf(`expected 1. got %d`, len(re.ExtentList))
+ }
+}
+
+func TestExtents(t *testing.T) {
+ re := &ResultsEnvelope{}
+ ex := timeseries.ExtentList{timeseries.Extent{Start: time.Time{}, End: time.Time{}}}
+ re.SetExtents(ex)
+ e := re.Extents()
+ if len(e) != 1 {
+ t.Errorf(`expected 1. got %d`, len(re.ExtentList))
+ }
+}
+
+func TestSeriesCount(t *testing.T) {
+ re := &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "d": {
+ Metric: map[string]interface{}{"__name__": "d"},
+ Points: []Point{
+ {Timestamp: time.Unix(99, 0), Value: 1.5},
+ {Timestamp: time.Unix(199, 0), Value: 1.5},
+ {Timestamp: time.Unix(299, 0), Value: 1.5},
+ },
+ },
+ },
+ }
+ if re.SeriesCount() != 1 {
+ t.Errorf("expected 1 got %d.", re.SeriesCount())
+ }
+}
+
+func TestValueCount(t *testing.T) {
+ re := &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "d": {
+ Metric: map[string]interface{}{"__name__": "d"},
+ Points: []Point{
+ {Timestamp: time.Unix(99, 0), Value: 1.5},
+ {Timestamp: time.Unix(199, 0), Value: 1.5},
+ {Timestamp: time.Unix(299, 0), Value: 1.5},
+ },
+ },
+ },
+ }
+ if re.ValueCount() != 3 {
+ t.Errorf("expected 3 got %d.", re.ValueCount())
+ }
+}
+
+func TestTimestampCount(t *testing.T) {
+
+ tests := []struct {
+ ts *ResultsEnvelope
+ expected int
+ }{
+ {
+ ts: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "d": {
+ Metric: map[string]interface{}{"__name__": "d"},
+ Points: []Point{
+ {Timestamp: time.Unix(99, 0), Value: 1.5},
+ {Timestamp: time.Unix(199, 0), Value: 1.5},
+ {Timestamp: time.Unix(299, 0), Value: 1.5},
+ },
+ },
+ },
+ },
+ expected: 3,
+ },
+
+ {
+ ts: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "d": {
+ Metric: map[string]interface{}{"__name__": "d"},
+ Points: []Point{
+ {Timestamp: time.Unix(99, 0), Value: 1.5},
+ {Timestamp: time.Unix(199, 0), Value: 1.5},
+ },
+ },
+ },
+ },
+ expected: 2,
+ },
+
+ {
+ ts: &ResultsEnvelope{
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "d"},
+ Points: []Point{
+ {Timestamp: time.Unix(99, 0), Value: 1.5},
+ {Timestamp: time.Unix(199, 0), Value: 1.5},
+ },
+ },
+ "e": {
+ Metric: map[string]interface{}{"__name__": "e"},
+ Points: []Point{
+ {Timestamp: time.Unix(99, 0), Value: 1.5},
+ {Timestamp: time.Unix(299, 0), Value: 1.5},
+ },
+ },
+ },
+ },
+ expected: 3,
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ tc := test.ts.TimestampCount()
+ if tc != test.expected {
+ t.Errorf("expected %d got %d.", test.expected, tc)
+ }
+ })
+ }
+}
+
+func TestMergeSeriesOrder(t *testing.T) {
+
+ re := ResultsEnvelope{}
+ so1 := []string{"a", "e"}
+ re.mergeSeriesOrder(so1)
+ if !reflect.DeepEqual(re.SeriesOrder, so1) {
+ t.Errorf("expected [%s] got [%s]", strings.Join(so1, ","), strings.Join(re.SeriesOrder, ","))
+ }
+ re.Data = map[string]*DataSet{"a": nil, "e": nil}
+
+ so2 := []string{"d", "e"}
+ ex2 := []string{"a", "d", "e"}
+ re.mergeSeriesOrder(so2)
+ if !reflect.DeepEqual(re.SeriesOrder, ex2) {
+ t.Errorf("expected [%s] got [%s]", strings.Join(ex2, ","), strings.Join(re.SeriesOrder, ","))
+ }
+ re.Data = map[string]*DataSet{"a": nil, "d": nil, "e": nil}
+
+ so3 := []string{"b", "c", "e"}
+ ex3 := []string{"a", "d", "b", "c", "e"}
+ re.mergeSeriesOrder(so3)
+ if !reflect.DeepEqual(re.SeriesOrder, ex3) {
+ t.Errorf("expected [%s] got [%s]", strings.Join(ex3, ","), strings.Join(re.SeriesOrder, ","))
+ }
+ re.Data = map[string]*DataSet{"a": nil, "d": nil, "b": nil, "c": nil, "e": nil}
+
+ so4 := []string{"f"}
+ ex4 := []string{"a", "d", "b", "c", "e", "f"}
+ re.mergeSeriesOrder(so4)
+ if !reflect.DeepEqual(re.SeriesOrder, ex4) {
+ t.Errorf("expected [%s] got [%s]", strings.Join(ex4, ","), strings.Join(re.SeriesOrder, ","))
+ }
+
+}
+
+func TestSize(t *testing.T) {
+ r := &ResultsEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(5, 0), time.Unix(10, 0), time.Unix(15, 0)},
+ timestamps: map[time.Time]bool{time.Unix(5, 0): true, time.Unix(10, 0): true, time.Unix(15, 0): true},
+ Data: map[string]*DataSet{
+ "a": {
+ Metric: map[string]interface{}{"__name__": "a"},
+ Points: []Point{
+ {Timestamp: time.Unix(5, 0), Value: 1.5},
+ {Timestamp: time.Unix(10, 0), Value: 1.5},
+ {Timestamp: time.Unix(15, 0), Value: 1.5},
+ },
+ },
+ },
+ Meta: []FieldDefinition{{Name: "test", Type: "Test"}},
+ SeriesOrder: []string{"test"},
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(5, 0), End: time.Unix(15, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ }
+ i := r.Size()
+ const expected = 146
+ if i != expected {
+ t.Errorf("expected %d got %d", expected, i)
+ }
+}
diff --git a/internal/proxy/origins/clickhouse/stubs.go b/internal/proxy/origins/clickhouse/stubs.go
new file mode 100644
index 000000000..bc849dedc
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/stubs.go
@@ -0,0 +1,41 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// This file holds funcs required by the Proxy Client or Timeseries interfaces,
+// but are (currently) unused by the ClickHouse implementation.
+
+// Series (timeseries.Timeseries Interface) stub funcs
+
+// FastForwardURL is not used for ClickHouse and is here to conform to the Proxy Client interface
+func (c *Client) FastForwardURL(r *http.Request) (*url.URL, error) {
+ return nil, nil
+}
+
+// ClickHouse Client (proxy.Client Interface) stub funcs
+
+// UnmarshalInstantaneous is not used for ClickHouse and is here to conform to the Proxy Client interface
+func (c *Client) UnmarshalInstantaneous(data []byte) (timeseries.Timeseries, error) {
+ return nil, nil
+}
+
+// QueryRangeHandler is not used for ClickHouse and is here to conform to the Proxy Client interface
+func (c *Client) QueryRangeHandler(w http.ResponseWriter, r *http.Request) {}
diff --git a/internal/proxy/origins/clickhouse/stubs_test.go b/internal/proxy/origins/clickhouse/stubs_test.go
new file mode 100644
index 000000000..1f924e131
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/stubs_test.go
@@ -0,0 +1,51 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "testing"
+)
+
+func TestFastForwardURL(t *testing.T) {
+
+ client := &Client{}
+ u, err := client.FastForwardURL(nil)
+ if u != nil {
+ t.Errorf("Expected nil url, got %s", u)
+ }
+
+ if err != nil {
+ t.Errorf("Expected nil err, got %s", err)
+ }
+}
+
+func TestUnmarshalInstantaneous(t *testing.T) {
+
+ client := &Client{}
+ tr, err := client.UnmarshalInstantaneous(nil)
+
+ if tr != nil {
+ t.Errorf("Expected nil timeseries, got %s", tr)
+ }
+
+ if err != nil {
+ t.Errorf("Expected nil err, got %s", err)
+ }
+
+}
+
+func TestQueryRangeHandler(t *testing.T) {
+ client := &Client{}
+ client.QueryRangeHandler(nil, nil)
+}
diff --git a/internal/proxy/origins/clickhouse/tokenization.go b/internal/proxy/origins/clickhouse/tokenization.go
new file mode 100644
index 000000000..84285e776
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/tokenization.go
@@ -0,0 +1,138 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/internal/util/regexp/matching"
+)
+
+// This file handles tokenization of time parameters within ClickHouse queries
+// for cache key hashing and delta proxy caching.
+
+// Tokens for String Interpolation
+const (
+ tkTimestamp1 = "<$TIMESTAMP1$>"
+ tkTimestamp2 = "<$TIMESTAMP2$>"
+)
+
+var reTimeFieldAndStep, reTimeClauseAlt *regexp.Regexp
+
+func init() {
+ reTimeFieldAndStep = regexp.MustCompile(`(?i)select\s+\(\s*intdiv\s*\(\s*touint32\s*\(\s*(?P[a-zA-Z0-9\._-]+)\s*\)\s*,\s*(?P[0-9]+)\s*\)\s*\*\s*[0-9]+\s*\)`)
+ reTimeClauseAlt = regexp.MustCompile(`(?i)\s+(?P(?P>=|>|=|between)\s+(?PtoDate(Time)?)\((?P[0-9]+)\)(?P\s+and\s+toDate(Time)?\((?P[0-9]+)\))?)`)
+}
+
+func interpolateTimeQuery(template, timeField string, extent *timeseries.Extent) string {
+ return strings.Replace(strings.Replace(template, tkTimestamp1, strconv.Itoa(int(extent.Start.Unix())), -1), tkTimestamp2, strconv.Itoa(int(extent.End.Unix())), -1)
+}
+
+var compiledRe = make(map[string]*regexp.Regexp)
+
+const timeClauseRe = `(?i)(?Pwhere|and)\s+#TIME_FIELD#\s+(?P(?P>=|>|=|between)\s+(?PtoDate(Time)?)\((?P[0-9]+)\))(?P\s+and\s+toDate(Time)?\((?P[0-9]+)\))?`
+
+func getQueryParts(query string, timeField string) (string, timeseries.Extent, bool, error) {
+
+ tcKey := timeField + "-tc"
+ trex, ok := compiledRe[tcKey]
+ if !ok {
+ trex = regexp.MustCompile(strings.Replace(timeClauseRe, "#TIME_FIELD#", timeField, -1))
+ compiledRe[tcKey] = trex
+ }
+
+ m := matching.GetNamedMatches(trex, query, nil)
+ if len(m) == 0 {
+ return "", timeseries.Extent{}, false, fmt.Errorf("unable to parse time from query: %s", query)
+ }
+
+ ext, isRelativeTime, err := parseQueryExtents(query, m)
+ if err != nil {
+ return "", timeseries.Extent{}, false, err
+ }
+
+ tq := tokenizeQuery(query, m)
+ return tq, ext, isRelativeTime, err
+}
+
+// tokenizeQuery will take a ClickHouse query and replace all time conditionals with a single <$TIME_TOKEN$>
+func tokenizeQuery(query string, timeParts map[string]string) string {
+ // First check the existence of timeExpr1, and if exists, tokenize
+ if expr, ok := timeParts["timeExpr1"]; ok {
+ if modifier, ok := timeParts["modifier"]; ok {
+ query = strings.Replace(query, expr, fmt.Sprintf("BETWEEN %s(%s) AND %s(%s)", modifier, tkTimestamp1, modifier, tkTimestamp2), -1)
+ // Then check the existence of timeExpr2, and if exists, remove from tokenized version
+ if expr, ok := timeParts["timeExpr2"]; ok {
+ query = strings.Replace(query, expr, "", -1)
+ }
+ }
+ }
+
+ if ts1, ok := timeParts["ts1"]; ok {
+ if strings.Contains(query, "("+ts1+")") {
+ m := matching.GetNamedMatches(reTimeClauseAlt, query, nil)
+ if len(m) > 0 {
+ if modifier, ok := m["modifier"]; ok {
+ if expression, ok := m["expression"]; ok {
+ query = strings.Replace(query, expression, fmt.Sprintf("BETWEEN %s(%s) AND %s(%s)", modifier, tkTimestamp1, modifier, tkTimestamp2), -1)
+ }
+ }
+ }
+ }
+ }
+
+ return query
+}
+
+func parseQueryExtents(query string, timeParts map[string]string) (timeseries.Extent, bool, error) {
+
+ var e timeseries.Extent
+
+ isRelativeTime := true
+
+ op, ok := timeParts["operator"]
+ if !ok {
+ return e, false, fmt.Errorf("failed to parse query: %s", "could not find operator")
+ }
+
+ if t, ok := timeParts["ts1"]; ok {
+ i, err := strconv.ParseInt(t, 10, 64)
+ if err != nil {
+ return e, false, fmt.Errorf("failed to parse query: %s", "could not find start time")
+ }
+ e.Start = time.Unix(i, 0)
+ }
+
+ if strings.ToLower(op) == "between" {
+ isRelativeTime = false
+ if t, ok := timeParts["ts2"]; ok {
+ i, err := strconv.ParseInt(t, 10, 64)
+ if err != nil {
+ return e, false, fmt.Errorf("failed to parse query: %s", "could not determine end time")
+ }
+ e.End = time.Unix(i, 0)
+ } else {
+ return e, false, fmt.Errorf("failed to parse query: %s", "could not find end time")
+ }
+ } else {
+ e.End = time.Now()
+ }
+
+ return e, isRelativeTime, nil
+}
diff --git a/internal/proxy/origins/clickhouse/tokenization_test.go b/internal/proxy/origins/clickhouse/tokenization_test.go
new file mode 100644
index 000000000..d2664ffab
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/tokenization_test.go
@@ -0,0 +1,56 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "testing"
+)
+
+func TestGetQueryPartsFailure(t *testing.T) {
+ query := "this should fail to parse"
+ _, _, _, err := getQueryParts(query, "")
+ if err == nil {
+ t.Errorf("should have produced error")
+ }
+
+}
+
+func TestParseQueryExtents(t *testing.T) {
+
+ _, _, err := parseQueryExtents("", map[string]string{})
+ if err == nil {
+ t.Errorf("expected error: %s", `failed to parse query: could not find operator`)
+ }
+
+ _, _, err = parseQueryExtents("", map[string]string{"operator": "", "ts1": "a"})
+ if err == nil {
+ t.Errorf("expected error: %s", `failed to parse query: could not find start time`)
+ }
+
+ _, _, err = parseQueryExtents("", map[string]string{"operator": "between", "ts1": "1", "ts2": "a"})
+ if err == nil {
+ t.Errorf("expected error: %s", `failed to parse query: could not determine end time`)
+ }
+
+ _, _, err = parseQueryExtents("", map[string]string{"operator": "between", "ts1": "1"})
+ if err == nil {
+ t.Errorf("expected error: %s", `failed to parse query: could not find end time`)
+ }
+
+ _, _, err = parseQueryExtents("", map[string]string{"operator": "x", "ts1": "1"})
+ if err != nil {
+ t.Error(err)
+ }
+
+}
diff --git a/internal/proxy/origins/clickhouse/url.go b/internal/proxy/origins/clickhouse/url.go
new file mode 100644
index 000000000..277cd6768
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/url.go
@@ -0,0 +1,70 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// Common URL Parameter Names
+const (
+ upQuery = "query"
+)
+
+// BaseURL returns a URL in the form of scheme://host/path based on the proxy configuration
+func (c *Client) BaseURL() *url.URL {
+ u := &url.URL{}
+ u.Scheme = c.config.Scheme
+ u.Host = c.config.Host
+ u.Path = c.config.PathPrefix
+ return u
+}
+
+// BuildUpstreamURL will merge the downstream request with the BaseURL to construct the full upstream URL
+func (c *Client) BuildUpstreamURL(r *http.Request) *url.URL {
+ u := c.BaseURL()
+
+ if strings.HasPrefix(r.URL.Path, "/"+c.name+"/") {
+ u.Path += strings.Replace(r.URL.Path, "/"+c.name+"/", "/", 1)
+ } else {
+ u.Path += r.URL.Path
+ }
+
+ u.RawQuery = r.URL.RawQuery
+ u.Fragment = r.URL.Fragment
+ u.User = r.URL.User
+ return u
+}
+
+// SetExtent will change the upstream request query to use the provided Extent
+func (c *Client) SetExtent(r *http.Request, trq *timeseries.TimeRangeQuery, extent *timeseries.Extent) {
+
+ if extent == nil || r == nil || trq == nil {
+ return
+ }
+
+ p := r.URL.Query()
+ t := trq.TemplateURL.Query()
+ q := t.Get(upQuery)
+
+ if q != "" {
+ p.Set(upQuery, interpolateTimeQuery(q, trq.TimestampFieldName, extent))
+ }
+
+ r.URL.RawQuery = p.Encode()
+}
diff --git a/internal/proxy/origins/clickhouse/url_test.go b/internal/proxy/origins/clickhouse/url_test.go
new file mode 100644
index 000000000..eccd6948d
--- /dev/null
+++ b/internal/proxy/origins/clickhouse/url_test.go
@@ -0,0 +1,68 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package clickhouse
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+func TestSetExtent(t *testing.T) {
+
+ start := time.Now().Add(time.Duration(-6) * time.Hour)
+ end := time.Now()
+ expected := "query=select+%28intdiv%28touint32%28myTimeField%29%2C+60%29+%2A+60%29+%2A+where+myTimeField+BETWEEN+toDateTime%28" +
+ fmt.Sprintf("%d", start.Unix()) + "%29+AND+toDateTime%28" + fmt.Sprintf("%d", end.Unix()) + "%29+end"
+
+ client := &Client{}
+ tu := &url.URL{RawQuery: "query=select (intdiv(touint32(myTimeField), 60) * 60) * where myTimeField BETWEEN toDateTime(<$TIMESTAMP1$>) AND toDateTime(<$TIMESTAMP2$>) end"}
+ e := ×eries.Extent{Start: start, End: end}
+
+ r, _ := http.NewRequest(http.MethodGet, tu.String(), nil)
+ trq := ×eries.TimeRangeQuery{TimestampFieldName: "myTimeField", TemplateURL: tu}
+
+ client.SetExtent(r, trq, e)
+ if expected != r.URL.RawQuery {
+ t.Errorf("\nexpected [%s]\ngot [%s]", expected, r.URL.RawQuery)
+ }
+
+ client.SetExtent(r, trq, nil)
+ if expected != r.URL.RawQuery {
+ t.Errorf("\nexpected [%s]\ngot [%s]", expected, r.URL.RawQuery)
+ }
+
+}
+
+func TestBuildUpstreamURL(t *testing.T) {
+
+ cfg := config.NewConfig()
+ oc := cfg.Origins["default"]
+ oc.Scheme = "http"
+ oc.Host = "0"
+ oc.PathPrefix = ""
+
+ client := &Client{name: "default", config: oc}
+ r, err := http.NewRequest(http.MethodGet, "http://0/default/?query=SELECT+1+FORMAT+JSON", nil)
+ if err != nil {
+ t.Error(err)
+ }
+ client.BuildUpstreamURL(r)
+
+}
diff --git a/internal/proxy/origins/client.go b/internal/proxy/origins/client.go
new file mode 100644
index 000000000..99052a7d4
--- /dev/null
+++ b/internal/proxy/origins/client.go
@@ -0,0 +1,37 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package origins
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+)
+
+// Client is the primary interface for interoperating with Trickster and upstream TSDB's
+type Client interface {
+ // Handlers returns a map of the HTTP Handlers the client has registered
+ Handlers() map[string]http.Handler
+ // DefaultPathConfigs returns the default PathConfigs for the given OriginType
+ DefaultPathConfigs(*config.OriginConfig) map[string]*config.PathConfig
+ // Configuration returns the configuration for the Proxy Client
+ Configuration() *config.OriginConfig
+ // Name returns the name of the origin the Proxy Client is handling
+ Name() string
+ // HTTPClient will return the HTTP Client for this Origin
+ HTTPClient() *http.Client
+ // SetCache sets the Cache object the client will use when caching origin content
+ SetCache(cache.Cache)
+}
diff --git a/internal/proxy/origins/influxdb/handler_health.go b/internal/proxy/origins/influxdb/handler_health.go
new file mode 100644
index 000000000..73d752930
--- /dev/null
+++ b/internal/proxy/origins/influxdb/handler_health.go
@@ -0,0 +1,68 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+// HealthHandler checks the health of the Configured Upstream Origin
+func (c *Client) HealthHandler(w http.ResponseWriter, r *http.Request) {
+
+ if c.healthURL == nil {
+ c.populateHeathCheckRequestValues()
+ }
+
+ if c.healthMethod == "-" {
+ w.WriteHeader(400)
+ w.Write([]byte("Health Check URL not Configured for origin: " + c.config.Name))
+ return
+ }
+
+ req, _ := http.NewRequest(c.healthMethod, c.healthURL.String(), nil)
+ req = req.WithContext(r.Context())
+
+ req.Header = c.healthHeaders
+ engines.DoProxy(w, req)
+}
+
+func (c *Client) populateHeathCheckRequestValues() {
+
+ oc := c.config
+
+ if oc.HealthCheckUpstreamPath == "-" {
+ oc.HealthCheckUpstreamPath = "/"
+ }
+ if oc.HealthCheckVerb == "-" {
+ oc.HealthCheckVerb = http.MethodGet
+ }
+ if oc.HealthCheckQuery == "-" {
+ q := url.Values{"query": {oc.HealthCheckQuery}}
+ oc.HealthCheckQuery = q.Encode()
+ }
+
+ c.healthURL = c.BaseURL()
+ c.healthURL.Path += oc.HealthCheckUpstreamPath
+ c.healthURL.RawQuery = oc.HealthCheckQuery
+ c.healthMethod = oc.HealthCheckVerb
+
+ if oc.HealthCheckHeaders != nil {
+ c.healthHeaders = http.Header{}
+ headers.UpdateHeaders(c.healthHeaders, oc.HealthCheckHeaders)
+ }
+}
diff --git a/internal/proxy/origins/influxdb/handler_health_test.go b/internal/proxy/origins/influxdb/handler_health_test.go
new file mode 100644
index 000000000..e33664ea1
--- /dev/null
+++ b/internal/proxy/origins/influxdb/handler_health_test.go
@@ -0,0 +1,103 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "io/ioutil"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestHealthHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "influxdb", "/health", "debug")
+
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.HealthHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+
+ client.healthMethod = "-"
+
+ w = httptest.NewRecorder()
+ client.HealthHandler(w, r)
+ resp = w.Result()
+ if resp.StatusCode != 400 {
+ t.Errorf("Expected status: 400 got %d.", resp.StatusCode)
+ }
+
+}
+
+func TestHealthHandlerCustomPath(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "", nil, "influxdb", "/health", "debug")
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+
+ client.config.HealthCheckUpstreamPath = "-"
+ client.config.HealthCheckVerb = "-"
+ client.config.HealthCheckQuery = "-"
+
+ client.webClient = hc
+ client.config.HTTPClient = hc
+
+ client.HealthHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "" {
+ t.Errorf("expected '' got %s.", bodyBytes)
+ }
+
+}
diff --git a/internal/proxy/origins/influxdb/handler_proxy.go b/internal/proxy/origins/influxdb/handler_proxy.go
new file mode 100644
index 000000000..d06e61aee
--- /dev/null
+++ b/internal/proxy/origins/influxdb/handler_proxy.go
@@ -0,0 +1,26 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// ProxyHandler sends a request through the basic reverse proxy to the origin, and services non-cacheable InfluxDB API calls
+func (c *Client) ProxyHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DoProxy(w, r)
+}
diff --git a/internal/proxy/origins/influxdb/handler_proxy_test.go b/internal/proxy/origins/influxdb/handler_proxy_test.go
new file mode 100644
index 000000000..ef94aebbb
--- /dev/null
+++ b/internal/proxy/origins/influxdb/handler_proxy_test.go
@@ -0,0 +1,54 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestProxyHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "test", nil, "influxdb", "/", "debug")
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.ProxyHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "test" {
+ t.Errorf("expected 'test' got %s.", bodyBytes)
+ }
+
+}
diff --git a/internal/proxy/origins/influxdb/handler_query.go b/internal/proxy/origins/influxdb/handler_query.go
new file mode 100644
index 000000000..9972abebf
--- /dev/null
+++ b/internal/proxy/origins/influxdb/handler_query.go
@@ -0,0 +1,74 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/timeconv"
+ "github.com/Comcast/trickster/internal/proxy/urls"
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/internal/util/regexp/matching"
+)
+
+// QueryHandler handles timeseries requests for InfluxDB and processes them through the delta proxy cache
+func (c *Client) QueryHandler(w http.ResponseWriter, r *http.Request) {
+
+ rqlc := strings.Replace(strings.ToLower(r.URL.RawQuery), "%20", "+", -1)
+ // if it's not a select statement, just proxy it instead
+ if (!strings.HasPrefix(rqlc, "q=select+")) && (!(strings.Index(rqlc, "&q=select+") > 0)) {
+ c.ProxyHandler(w, r)
+ return
+ }
+
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DeltaProxyCacheRequest(w, r)
+}
+
+// ParseTimeRangeQuery parses the key parts of a TimeRangeQuery from the inbound HTTP Request
+func (c *Client) ParseTimeRangeQuery(r *http.Request) (*timeseries.TimeRangeQuery, error) {
+
+ trq := ×eries.TimeRangeQuery{Extent: timeseries.Extent{}}
+ trq.TemplateURL = urls.Clone(r.URL)
+
+ qi := trq.TemplateURL.Query()
+ if p, ok := qi[upQuery]; ok {
+ trq.Statement = p[0]
+ } else {
+ return nil, errors.MissingURLParam(upQuery)
+ }
+
+ // if the Step wasn't found in the query (e.g., "group by time(1m)"), just proxy it instead
+ step, found := matching.GetNamedMatch("step", reStep, trq.Statement)
+ if !found {
+ return nil, errors.ErrStepParse
+ }
+
+ stepDuration, err := timeconv.ParseDuration(step)
+ if err != nil {
+ return nil, errors.ErrStepParse
+ }
+ trq.Step = stepDuration
+
+ trq.Statement, trq.Extent = getQueryParts(trq.Statement)
+
+ // Swap in the Tokenzed Query in the Url Params
+ qi.Set(upQuery, trq.Statement)
+ trq.TemplateURL.RawQuery = qi.Encode()
+ return trq, nil
+
+}
diff --git a/internal/proxy/origins/influxdb/handler_query_test.go b/internal/proxy/origins/influxdb/handler_query_test.go
new file mode 100644
index 000000000..374e9f0a9
--- /dev/null
+++ b/internal/proxy/origins/influxdb/handler_query_test.go
@@ -0,0 +1,208 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/util/metrics"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+
+ "github.com/influxdata/influxdb/pkg/testing/assert"
+)
+
+func init() {
+ metrics.Init()
+}
+
+func TestParseTimeRangeQuery(t *testing.T) {
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{"q": {`SELECT mean("value") FROM "monthly"."rollup.1min" WHERE ("application" = 'web') AND time >= now() - 6h GROUP BY time(15s), "cluster" fill(null)`}, "epoch": {"ms"}}).Encode(),
+ }}
+ client := &Client{}
+ res, err := client.ParseTimeRangeQuery(req)
+ if err != nil {
+ t.Error(err)
+ } else {
+ assert.Equal(t, int(res.Step.Seconds()), 15)
+ assert.Equal(t, int(res.Extent.End.Sub(res.Extent.Start).Hours()), 6)
+ }
+}
+
+func TestQueryHandlerWithSelect(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "influxdb", "/query?q=select%20test", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.QueryHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
+
+func TestQueryHandlerNotSelect(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "influxdb", "/query", "debug")
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.QueryHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
+
+func TestParseTimeRangeQueryMissingQuery(t *testing.T) {
+ expected := errors.MissingURLParam(upQuery).Error()
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "q_": {`SELECT mean("value") FROM "monthly"."rollup.1min" WHERE ("application" = 'web') AND time >= now() - 6h GROUP BY time(15s), "cluster" fill(null)`},
+ "epoch": {"ms"},
+ }).Encode(),
+ }}
+ client := &Client{}
+ _, err := client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf(`Expected "%s", got NO ERROR`, expected)
+ return
+ }
+ if err.Error() != expected {
+ t.Errorf(`Expected "%s", got "%s"`, expected, err.Error())
+ }
+}
+
+func TestParseTimeRangeQueryBadDuration(t *testing.T) {
+
+ expected := errors.ErrStepParse
+
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "q": {`SELECT mean("value") FROM "monthly"."rollup.1min" WHERE ("application" = 'web') AND time >= now() - 6h GROUP BY times(15s), "cluster" fill(null)`},
+ "epoch": {"ms"},
+ }).Encode(),
+ }}
+ client := &Client{}
+ _, err := client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf(`Expected "%s", got NO ERROR`, expected)
+ return
+ }
+ if err != expected {
+ t.Errorf(`Expected "%s", got "%s"`, expected.Error(), err.Error())
+ }
+}
+
+// func TestParseTimeRangeQueryWithBothTimes(t *testing.T) {
+// req := &http.Request{URL: &url.URL{
+// Scheme: "https",
+// Host: "blah.com",
+// Path: "/",
+// RawQuery: url.Values(map[string][]string{"q": []string{`SELECT mean("value") FROM "monthly"."rollup.1min" WHERE ("application" = 'web') AND time >= now() - 6h AND time < now() - 3h GROUP BY time(15s), "cluster" fill(null)`}, "epoch": []string{"ms"}}).Encode(),
+// }}
+// client := &Client{}
+// res, err := client.ParseTimeRangeQuery(&model.Request{ClientRequest: req, URL: req.URL, TemplateURL: req.URL})
+// if err != nil {
+// } else {
+// assert.Equal(t, int(res.Step), 15)
+// assert.Equal(t, int(res.Extent.End.Sub(res.Extent.Start).Hours()), 3)
+// }
+// }
+
+// func TestParseTimeRangeQueryWithoutNow(t *testing.T) {
+// req := &http.Request{URL: &url.URL{
+// Scheme: "https",
+// Host: "blah.com",
+// Path: "/",
+// RawQuery: url.Values(map[string][]string{"q": []string{`SELECT mean("value") FROM "monthly"."rollup.1min" WHERE ("application" = 'web') AND time > 2052926911485ms AND time < 52926911486ms GROUP BY time(15s), "cluster" fill(null)`}, "epoch": []string{"ms"}}).Encode(),
+// }}
+// client := &Client{}
+// res, err := client.ParseTimeRangeQuery(&model.Request{ClientRequest: req, URL: req.URL, TemplateURL: req.URL})
+// if err != nil {
+// } else {
+// assert.Equal(t, int(res.Step), 15)
+// assert.Equal(t, res.Extent.End.UTC().Second()-res.Extent.Start.UTC().Second(), 1)
+// }
+// }
+
+// func TestParseTimeRangeQueryWithAbsoluteTime(t *testing.T) {
+// req := &http.Request{URL: &url.URL{
+// Scheme: "https",
+// Host: "blah.com",
+// Path: "/",
+// RawQuery: url.Values(map[string][]string{"q": []string{`SELECT mean("value") FROM "monthly"."rollup.1min" WHERE ("application" = 'web') AND time < 2052926911486ms GROUP BY time(15s), "cluster" fill(null)`}, "epoch": []string{"ms"}}).Encode(),
+// }}
+// client := &Client{}
+// res, err := client.ParseTimeRangeQuery(&model.Request{ClientRequest: req, URL: req.URL, TemplateURL: req.URL})
+// if err != nil {
+// } else {
+// assert.Equal(t, int(res.Step), 15)
+// assert.Equal(t, res.Extent.Start.UTC().IsZero(), true)
+// }
+// }
diff --git a/internal/proxy/origins/influxdb/influxdb.go b/internal/proxy/origins/influxdb/influxdb.go
new file mode 100644
index 000000000..7736f589a
--- /dev/null
+++ b/internal/proxy/origins/influxdb/influxdb.go
@@ -0,0 +1,68 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy"
+)
+
+// Client Implements the Proxy Client Interface
+type Client struct {
+ name string
+ config *config.OriginConfig
+ cache cache.Cache
+ webClient *http.Client
+ handlers map[string]http.Handler
+ handlersRegistered bool
+
+ healthURL *url.URL
+ healthHeaders http.Header
+ healthMethod string
+}
+
+// NewClient returns a new Client Instance
+func NewClient(name string, oc *config.OriginConfig, cache cache.Cache) (*Client, error) {
+ c, err := proxy.NewHTTPClient(oc)
+ return &Client{name: name, config: oc, cache: cache, webClient: c}, err
+}
+
+// Configuration returns the upstream Configuration for this Client
+func (c *Client) Configuration() *config.OriginConfig {
+ return c.config
+}
+
+// HTTPClient returns the HTTP Transport the client is using
+func (c *Client) HTTPClient() *http.Client {
+ return c.webClient
+}
+
+// Cache returns and handle to the Cache instance used by the Client
+func (c *Client) Cache() cache.Cache {
+ return c.cache
+}
+
+// Name returns the name of the upstream Configuration proxied by the Client
+func (c *Client) Name() string {
+ return c.name
+}
+
+// SetCache sets the Cache object the client will use for caching origin content
+func (c *Client) SetCache(cc cache.Cache) {
+ c.cache = cc
+}
diff --git a/internal/proxy/origins/influxdb/influxdb_test.go b/internal/proxy/origins/influxdb/influxdb_test.go
new file mode 100644
index 000000000..e66616d25
--- /dev/null
+++ b/internal/proxy/origins/influxdb/influxdb_test.go
@@ -0,0 +1,136 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "testing"
+
+ cr "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/origins"
+)
+
+func TestInfluxDBClientInterfacing(t *testing.T) {
+
+ // this test ensures the client will properly conform to the
+ // Client and TimeseriesClient interfaces
+
+ c := &Client{name: "test"}
+ var oc origins.Client = c
+ var tc origins.TimeseriesClient = c
+
+ if oc.Name() != "test" {
+ t.Errorf("expected %s got %s", "test", oc.Name())
+ }
+
+ if tc.Name() != "test" {
+ t.Errorf("expected %s got %s", "test", tc.Name())
+ }
+}
+
+func TestNewClient(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-origin-type", "influxdb", "-origin-url", "http://1"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ oc := &config.OriginConfig{OriginType: "TEST_CLIENT"}
+ c, err := NewClient("default", oc, cache)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if c.Name() != "default" {
+ t.Errorf("expected %s got %s", "default", c.Name())
+ }
+
+ if c.Cache().Configuration().CacheType != "memory" {
+ t.Errorf("expected %s got %s", "memory", c.Cache().Configuration().CacheType)
+ }
+
+ if c.Configuration().OriginType != "TEST_CLIENT" {
+ t.Errorf("expected %s got %s", "TEST_CLIENT", c.Configuration().OriginType)
+ }
+}
+
+func TestConfiguration(t *testing.T) {
+ oc := &config.OriginConfig{OriginType: "TEST"}
+ client := Client{config: oc}
+ c := client.Configuration()
+ if c.OriginType != "TEST" {
+ t.Errorf("expected %s got %s", "TEST", c.OriginType)
+ }
+}
+
+func TestCache(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-origin-type", "influxdb", "-origin-url", "http://1"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+ client := Client{cache: cache}
+ c := client.Cache()
+
+ if c.Configuration().CacheType != "memory" {
+ t.Errorf("expected %s got %s", "memory", c.Configuration().CacheType)
+ }
+}
+
+func TestName(t *testing.T) {
+
+ client := Client{name: "TEST"}
+ c := client.Name()
+
+ if c != "TEST" {
+ t.Errorf("expected %s got %s", "TEST", c)
+ }
+
+}
+
+func TestHTTPClient(t *testing.T) {
+ oc := &config.OriginConfig{OriginType: "TEST"}
+
+ c, err := NewClient("test", oc, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if c.HTTPClient() == nil {
+ t.Errorf("missing http client")
+ }
+}
+
+func TestSetCache(t *testing.T) {
+ c, err := NewClient("test", config.NewOriginConfig(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+ c.SetCache(nil)
+ if c.Cache() != nil {
+ t.Errorf("expected nil cache for client named %s", "test")
+ }
+}
diff --git a/internal/proxy/origins/influxdb/model.go b/internal/proxy/origins/influxdb/model.go
new file mode 100644
index 000000000..bd0550b74
--- /dev/null
+++ b/internal/proxy/origins/influxdb/model.go
@@ -0,0 +1,56 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/pkg/sort/times"
+ "github.com/influxdata/influxdb/models"
+)
+
+// SeriesEnvelope represents a response object from the InfluxDB HTTP API
+type SeriesEnvelope struct {
+ Results []Result `json:"results"`
+ Err string `json:"error,omitempty"`
+ StepDuration time.Duration `json:"step,omitempty"`
+ ExtentList timeseries.ExtentList `json:"extents,omitempty"`
+
+ timestamps map[time.Time]bool // tracks unique timestamps in the matrix data
+ tslist times.Times
+ isSorted bool // tracks if the matrix data is currently sorted
+ isCounted bool // tracks if timestamps slice is up-to-date
+}
+
+// Result represents a Result returned from the InfluxDB HTTP API
+type Result struct {
+ StatementID int `json:"statement_id"`
+ Series []models.Row `json:"series,omitempty"`
+ Err string `json:"error,omitempty"`
+}
+
+// MarshalTimeseries converts a Timeseries into a JSON blob
+func (c Client) MarshalTimeseries(ts timeseries.Timeseries) ([]byte, error) {
+ // Marshal the Envelope back to a json object for Cache Storage
+ return json.Marshal(ts)
+}
+
+// UnmarshalTimeseries converts a JSON blob into a Timeseries
+func (c Client) UnmarshalTimeseries(data []byte) (timeseries.Timeseries, error) {
+ se := &SeriesEnvelope{}
+ err := json.Unmarshal(data, se)
+ return se, err
+}
diff --git a/internal/proxy/origins/influxdb/model_test.go b/internal/proxy/origins/influxdb/model_test.go
new file mode 100644
index 000000000..fb2c02f0e
--- /dev/null
+++ b/internal/proxy/origins/influxdb/model_test.go
@@ -0,0 +1,99 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "testing"
+
+ "github.com/influxdata/influxdb/models"
+)
+
+func TestMarshalTimeseries(t *testing.T) {
+
+ se := &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1000), 1.5},
+ {float64(5000), 1.5},
+ {float64(10000), 1.5},
+ },
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName2": "tagValue2"},
+ Values: [][]interface{}{
+ {float64(1000), 2.5},
+ {float64(5000), 2.1},
+ {float64(10000), 2.4},
+ },
+ },
+ },
+ },
+ },
+ }
+
+ expected := `{"results":[{"statement_id":0,"series":[{"name":"a","tags":{"tagName1":"tagValue1"},"columns":["time","units"],"values":[[1000,1.5],[5000,1.5],[10000,1.5]]},{"name":"b","tags":{"tagName2":"tagValue2"},"columns":["time","units"],"values":[[1000,2.5],[5000,2.1],[10000,2.4]]}]}]}`
+ client := &Client{}
+ bytes, err := client.MarshalTimeseries(se)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ if string(bytes) != expected {
+ t.Errorf("expected [%s] got [%s]", expected, string(bytes))
+ }
+
+}
+
+func TestUnmarshalTimeseries(t *testing.T) {
+
+ bytes := []byte(`{"results":[{"statement_id":0,"series":[{"name":"a","tags":{"tagName1":"tagValue1"},"columns":["time","units"],"values":[[1000,1.5],[5000,1.5],[10000,1.5]]},{"name":"b","tags":{"tagName2":"tagValue2"},"columns":["time","units"],"values":[[1000,2.5],[5000,2.1],[10000,2.4]]}]}]}`)
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries(bytes)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts.(*SeriesEnvelope)
+
+ if len(se.Results) != 1 {
+ t.Errorf(`expected 1. got %d`, len(se.Results))
+ return
+ }
+
+ if len(se.Results[0].Series) != 2 {
+ t.Errorf(`expected 2. got %d`, len(se.Results[0].Series))
+ return
+ }
+
+ if len(se.Results[0].Series[0].Values) != 3 {
+ t.Errorf(`expected 3. got %d`, len(se.Results[0].Series[0].Values))
+ return
+ }
+
+ if len(se.Results[0].Series[1].Values) != 3 {
+ t.Errorf(`expected 3. got %d`, len(se.Results[0].Series[1].Values))
+ return
+ }
+
+}
diff --git a/internal/proxy/origins/influxdb/routes.go b/internal/proxy/origins/influxdb/routes.go
new file mode 100644
index 000000000..b15aa61fb
--- /dev/null
+++ b/internal/proxy/origins/influxdb/routes.go
@@ -0,0 +1,76 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/config"
+)
+
+func (c *Client) registerHandlers() {
+ c.handlersRegistered = true
+ c.handlers = make(map[string]http.Handler)
+ // This is the registry of handlers that Trickster supports for InfluxDB,
+ // and are able to be referenced by name (map key) in Config Files
+ c.handlers["health"] = http.HandlerFunc(c.HealthHandler)
+ c.handlers["query"] = http.HandlerFunc(c.QueryHandler)
+ c.handlers["proxy"] = http.HandlerFunc(c.ProxyHandler)
+}
+
+// Handlers returns a map of the HTTP Handlers the client has registered
+func (c *Client) Handlers() map[string]http.Handler {
+ if !c.handlersRegistered {
+ c.registerHandlers()
+ }
+ return c.handlers
+}
+
+func populateHeathCheckRequestValues(oc *config.OriginConfig) {
+ if oc.HealthCheckUpstreamPath == "-" {
+ oc.HealthCheckUpstreamPath = "/ping"
+ }
+ if oc.HealthCheckVerb == "-" {
+ oc.HealthCheckVerb = http.MethodGet
+ }
+ if oc.HealthCheckQuery == "-" {
+ oc.HealthCheckQuery = ""
+ }
+}
+
+// DefaultPathConfigs returns the default PathConfigs for the given OriginType
+func (c *Client) DefaultPathConfigs(oc *config.OriginConfig) map[string]*config.PathConfig {
+
+ populateHeathCheckRequestValues(oc)
+
+ paths := map[string]*config.PathConfig{
+ "/" + mnQuery: {
+ Path: "/" + mnQuery,
+ HandlerName: mnQuery,
+ Methods: []string{http.MethodGet, http.MethodPost},
+ CacheKeyParams: []string{upDB, upQuery, "u", "p"},
+ CacheKeyHeaders: []string{},
+ MatchTypeName: "exact",
+ MatchType: config.PathMatchTypeExact,
+ },
+ "/": {
+ Path: "/",
+ HandlerName: "proxy",
+ Methods: []string{http.MethodGet, http.MethodPost},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+ }
+ return paths
+}
diff --git a/internal/proxy/origins/influxdb/routes_test.go b/internal/proxy/origins/influxdb/routes_test.go
new file mode 100644
index 000000000..480ac5c11
--- /dev/null
+++ b/internal/proxy/origins/influxdb/routes_test.go
@@ -0,0 +1,60 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestRegisterHandlers(t *testing.T) {
+ c := &Client{}
+ c.registerHandlers()
+ if _, ok := c.handlers[mnQuery]; !ok {
+ t.Errorf("expected to find handler named: %s", mnQuery)
+ }
+}
+
+func TestHandlers(t *testing.T) {
+ c := &Client{}
+ m := c.Handlers()
+ if _, ok := m[mnQuery]; !ok {
+ t.Errorf("expected to find handler named: %s", mnQuery)
+ }
+}
+
+func TestDefaultPathConfigs(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, _, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 204, "", nil, "influxdb", "/", "debug")
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if _, ok := client.config.Paths["/"]; !ok {
+ t.Errorf("expected to find path named: %s", "/")
+ }
+
+ const expectedLen = 2
+ if len(client.config.Paths) != expectedLen {
+ t.Errorf("expected ordered length to be: %d", expectedLen)
+ }
+
+}
diff --git a/internal/proxy/origins/influxdb/series.go b/internal/proxy/origins/influxdb/series.go
new file mode 100644
index 000000000..c882027fe
--- /dev/null
+++ b/internal/proxy/origins/influxdb/series.go
@@ -0,0 +1,471 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+ str "github.com/Comcast/trickster/internal/util/strings"
+ "github.com/Comcast/trickster/pkg/sort/times"
+
+ "github.com/influxdata/influxdb/models"
+)
+
+// SetExtents overwrites a Timeseries's known extents with the provided extent list
+func (se *SeriesEnvelope) SetExtents(extents timeseries.ExtentList) {
+ se.ExtentList = make(timeseries.ExtentList, len(extents))
+ copy(se.ExtentList, extents)
+ se.isCounted = false
+}
+
+// Extents returns the Timeseries's ExentList
+func (se *SeriesEnvelope) Extents() timeseries.ExtentList {
+ return se.ExtentList
+}
+
+// ValueCount returns the count of all values across all series in the Timeseries
+func (se *SeriesEnvelope) ValueCount() int {
+ c := 0
+ for i := range se.Results {
+ for j := range se.Results[i].Series {
+ c += len(se.Results[i].Series[j].Values)
+ }
+ }
+ return c
+}
+
+// TimestampCount returns the count unique timestampes in across all series in the Timeseries
+func (se *SeriesEnvelope) TimestampCount() int {
+ if se.timestamps == nil {
+ se.timestamps = make(map[time.Time]bool)
+ }
+ se.updateTimestamps()
+ return len(se.timestamps)
+}
+
+func (se *SeriesEnvelope) updateTimestamps() {
+ if se.isCounted {
+ return
+ }
+ m := make(map[time.Time]bool)
+
+ for i := range se.Results {
+ for j, s := range se.Results[i].Series {
+ ti := str.IndexOfString(s.Columns, "time")
+ if ti < 0 {
+ continue
+ }
+ for k := range se.Results[i].Series[j].Values {
+ m[time.Unix(int64(se.Results[i].Series[j].Values[k][ti].(float64)/1000), 0)] = true
+ }
+ }
+ }
+
+ se.timestamps = m
+ se.tslist = times.FromMap(m)
+ se.isCounted = true
+}
+
+// SeriesCount returns the count of all Results in the Timeseries
+// it is called SeriesCount due to Interface conformity and the disparity in nomenclature between various TSDBs.
+func (se *SeriesEnvelope) SeriesCount() int {
+ return len(se.Results)
+}
+
+// Step returns the step for the Timeseries
+func (se *SeriesEnvelope) Step() time.Duration {
+ return se.StepDuration
+}
+
+// SetStep sets the step for the Timeseries
+func (se *SeriesEnvelope) SetStep(step time.Duration) {
+ se.StepDuration = step
+}
+
+type seriesKey struct {
+ ResultID int
+ StatementID int
+ Name string
+ Tags string
+ Columns string
+}
+
+type tags map[string]string
+
+func (t tags) String() string {
+ var pairs string
+ for k, v := range t {
+ pairs += fmt.Sprintf("%s=%s;", k, v)
+ }
+ return pairs
+}
+
+// Merge merges the provided Timeseries list into the base Timeseries (in the order provided) and optionally sorts the merged Timeseries
+func (se *SeriesEnvelope) Merge(sort bool, collection ...timeseries.Timeseries) {
+
+ mtx := sync.Mutex{}
+ wg := sync.WaitGroup{}
+
+ series := make(map[seriesKey]*models.Row)
+ for i, r := range se.Results {
+ for j := range se.Results[i].Series {
+ wg.Add(1)
+ go func(s *models.Row) {
+ mtx.Lock()
+ series[seriesKey{ResultID: i, StatementID: r.StatementID, Name: s.Name, Tags: tags(s.Tags).String(), Columns: strings.Join(s.Columns, ",")}] = s
+ mtx.Unlock()
+ wg.Done()
+ }(&se.Results[i].Series[j])
+ }
+ }
+ wg.Wait()
+
+ for _, ts := range collection {
+ if ts != nil {
+ se2 := ts.(*SeriesEnvelope)
+ for g, r := range se2.Results {
+
+ if g >= len(se.Results) {
+ mtx.Lock()
+ se.Results = append(se.Results, se2.Results[g:]...)
+ mtx.Unlock()
+ break
+ }
+
+ for i := range r.Series {
+ wg.Add(1)
+ go func(s *models.Row, resultID int) {
+ mtx.Lock()
+ sk := seriesKey{ResultID: g, StatementID: r.StatementID, Name: s.Name, Tags: tags(s.Tags).String(), Columns: strings.Join(s.Columns, ",")}
+ if _, ok := series[sk]; !ok {
+ series[sk] = s
+ se.Results[resultID].Series = append(se.Results[resultID].Series, *s)
+ mtx.Unlock()
+ wg.Done()
+ return
+ }
+ series[sk].Values = append(series[sk].Values, s.Values...)
+ mtx.Unlock()
+ wg.Done()
+ }(&r.Series[i], g)
+ }
+ }
+ wg.Wait()
+ mtx.Lock()
+ se.ExtentList = append(se.ExtentList, se2.ExtentList...)
+ mtx.Unlock()
+ }
+ }
+
+ se.ExtentList = se.ExtentList.Compress(se.StepDuration)
+ se.isSorted = false
+ se.isCounted = false
+ if sort {
+ se.Sort()
+ }
+}
+
+// Clone returns a perfect copy of the base Timeseries
+func (se *SeriesEnvelope) Clone() timeseries.Timeseries {
+ resultSe := &SeriesEnvelope{
+ Err: se.Err,
+ Results: make([]Result, len(se.Results)),
+ StepDuration: se.StepDuration,
+ ExtentList: make(timeseries.ExtentList, len(se.ExtentList)),
+ }
+ copy(resultSe.ExtentList, se.ExtentList)
+ for index := range se.Results {
+ resResult := se.Results[index]
+ resResult.Err = se.Results[index].Err
+ resResult.StatementID = se.Results[index].StatementID
+ for seriesIndex := range se.Results[index].Series {
+ serResult := se.Results[index].Series[seriesIndex]
+ serResult.Name = se.Results[index].Series[seriesIndex].Name
+ serResult.Partial = se.Results[index].Series[seriesIndex].Partial
+
+ serResult.Columns = make([]string, len(se.Results[index].Series[seriesIndex].Columns))
+ copy(serResult.Columns, se.Results[index].Series[seriesIndex].Columns)
+
+ serResult.Tags = make(map[string]string)
+
+ // Copy from the original map to the target map
+ for key, value := range se.Results[index].Series[seriesIndex].Tags {
+ serResult.Tags[key] = value
+ }
+
+ serResult.Values = make([][]interface{}, len(se.Results[index].Series[seriesIndex].Values))
+ for i := range se.Results[index].Series[seriesIndex].Values {
+ serResult.Values[i] = make([]interface{}, len(se.Results[index].Series[seriesIndex].Values[i]))
+ copy(serResult.Values[i], se.Results[index].Series[seriesIndex].Values[i])
+ }
+
+ resResult.Series[seriesIndex] = serResult
+ }
+ resultSe.Results[index] = resResult
+ }
+ return resultSe
+}
+
+// CropToSize reduces the number of elements in the Timeseries to the provided count, by evicting elements
+// using a least-recently-used methodology. The time parameter limits the upper extent to the provided time,
+// in order to support backfill tolerance
+func (se *SeriesEnvelope) CropToSize(sz int, t time.Time, lur timeseries.Extent) {
+
+ se.isCounted = false
+ se.isSorted = false
+ x := len(se.ExtentList)
+ // The Series has no extents, so no need to do anything
+ if x < 1 {
+ for i := range se.Results {
+ se.Results[i].Series = []models.Row{}
+ }
+ se.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // Crop to the Backfill Tolerance Value if needed
+ if se.ExtentList[x-1].End.After(t) {
+ se.CropToRange(timeseries.Extent{Start: se.ExtentList[0].Start, End: t})
+ }
+
+ tc := se.TimestampCount()
+ if len(se.Results) == 0 || tc <= sz {
+ return
+ }
+
+ el := timeseries.ExtentListLRU(se.ExtentList).UpdateLastUsed(lur, se.StepDuration)
+ sort.Sort(el)
+
+ rc := tc - sz // # of required timestamps we must delete to meet the rentention policy
+ removals := make(map[time.Time]bool)
+ done := false
+ var ok bool
+
+ for _, x := range el {
+ for ts := x.Start; !x.End.Before(ts) && !done; ts = ts.Add(se.StepDuration) {
+ if _, ok = se.timestamps[ts]; ok {
+ removals[ts] = true
+ done = len(removals) >= rc
+ }
+ }
+ if done {
+ break
+ }
+ }
+
+ ti := str.IndexOfString(se.Results[0].Series[0].Columns, "time")
+
+ for i, r := range se.Results {
+ for j, s := range r.Series {
+ tmp := se.Results[i].Series[j].Values[:0]
+ for _, v := range se.Results[i].Series[j].Values {
+ t = time.Unix(int64(v[ti].(float64)/1000), 0)
+ if _, ok := removals[t]; !ok {
+ tmp = append(tmp, v)
+ }
+ }
+ s.Values = tmp
+ }
+ }
+
+ tl := times.FromMap(removals)
+ sort.Sort(tl)
+ for _, t := range tl {
+ for i, e := range el {
+ if e.StartsAt(t) {
+ el[i].Start = e.Start.Add(se.StepDuration)
+ }
+ }
+ }
+
+ se.ExtentList = timeseries.ExtentList(el).Compress(se.StepDuration)
+ se.Sort()
+}
+
+// CropToRange reduces the Timeseries down to timestamps contained within the provided Extents (inclusive).
+// CropToRange assumes the base Timeseries is already sorted, and will corrupt an unsorted Timeseries
+func (se *SeriesEnvelope) CropToRange(e timeseries.Extent) {
+ se.isCounted = false
+ x := len(se.ExtentList)
+ // The Series has no extents, so no need to do anything
+ if x < 1 {
+ for i := range se.Results {
+ se.Results[i].Series = []models.Row{}
+ }
+ se.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // if the extent of the series is entirely outside the extent of the crop range, return empty set and bail
+ if se.ExtentList.OutsideOf(e) {
+ for i := range se.Results {
+ se.Results[i].Series = []models.Row{}
+ }
+ se.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // if the series extent is entirely inside the extent of the crop range, simple adjust down its ExtentList
+ if se.ExtentList.InsideOf(e) {
+ if se.ValueCount() == 0 {
+ for i := range se.Results {
+ se.Results[i].Series = []models.Row{}
+ }
+ }
+ se.ExtentList = se.ExtentList.Crop(e)
+ return
+ }
+
+ startSecs := e.Start.Unix()
+ endSecs := e.End.Unix()
+
+ for i, r := range se.Results {
+
+ if len(r.Series) == 0 {
+ se.ExtentList = se.ExtentList.Crop(e)
+ continue
+ }
+
+ deletes := make(map[int]bool)
+
+ for j, s := range r.Series {
+ // check the index of the time column again just in case it changed in the next series
+ ti := str.IndexOfString(s.Columns, "time")
+ if ti != -1 {
+ start := -1
+ end := -1
+ for vi, v := range se.Results[i].Series[j].Values {
+ t := int64(v[ti].(float64) / 1000)
+ if t == endSecs {
+ if vi == 0 || t == startSecs || start == -1 {
+ start = vi
+ }
+ end = vi + 1
+ break
+ }
+ if t > endSecs {
+ end = vi
+ break
+ }
+ if t < startSecs {
+ continue
+ }
+ if start == -1 && (t == startSecs || (endSecs > t && t > startSecs)) {
+ start = vi
+ }
+ }
+ if start != -1 {
+ if end == -1 {
+ end = len(s.Values)
+ }
+ se.Results[i].Series[j].Values = s.Values[start:end]
+ } else {
+ deletes[j] = true
+ }
+ }
+ }
+ if len(deletes) > 0 {
+ tmp := se.Results[i].Series[:0]
+ for i, r := range se.Results[i].Series {
+ if _, ok := deletes[i]; !ok {
+ tmp = append(tmp, r)
+ }
+ }
+ se.Results[i].Series = tmp
+ }
+ }
+ se.ExtentList = se.ExtentList.Crop(e)
+}
+
+// Sort sorts all Values in each Series chronologically by their timestamp
+func (se *SeriesEnvelope) Sort() {
+
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+
+ if se.isSorted || len(se.Results) == 0 || len(se.Results[0].Series) == 0 {
+ return
+ }
+
+ tsm := map[time.Time]bool{}
+ m := make(map[int64][]interface{})
+ if ti := str.IndexOfString(se.Results[0].Series[0].Columns, "time"); ti != -1 {
+ for ri := range se.Results {
+ for si := range se.Results[ri].Series {
+ keys := make([]int64, 0, len(m))
+ for _, v := range se.Results[ri].Series[si].Values {
+ wg.Add(1)
+ go func(s []interface{}) {
+ t := int64(s[ti].(float64))
+ mtx.Lock()
+ if _, ok := m[t]; !ok {
+ keys = append(keys, t)
+ m[t] = s
+ }
+ tsm[time.Unix(t/1000, 0)] = true
+ mtx.Unlock()
+ wg.Done()
+ }(v)
+ }
+ wg.Wait()
+ sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
+ sm := make([][]interface{}, 0, len(keys))
+ for _, key := range keys {
+ sm = append(sm, m[key])
+ }
+ se.Results[ri].Series[si].Values = sm
+ }
+ }
+ }
+
+ sort.Sort(se.ExtentList)
+
+ se.timestamps = tsm
+ se.tslist = times.FromMap(tsm)
+ se.isCounted = true
+ se.isSorted = true
+}
+
+// Size returns the approximate memory utilization in bytes of the timeseries
+func (se *SeriesEnvelope) Size() int {
+ c := 8 + len(se.Err)
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+ for i := range se.Results {
+ for j := range se.Results[i].Series {
+
+ wg.Add(1)
+ go func(r models.Row) {
+ mtx.Lock()
+ c += len(r.Name)
+ for k, v := range r.Tags {
+ c += len(k) + len(v)
+ }
+ for _, v := range r.Columns {
+ c += len(v)
+ }
+ c += 16 // approximate size of timestamp + value
+ mtx.Unlock()
+ wg.Done()
+ }(se.Results[i].Series[j])
+ }
+ }
+ wg.Wait()
+ return c
+}
diff --git a/internal/proxy/origins/influxdb/series_test.go b/internal/proxy/origins/influxdb/series_test.go
new file mode 100644
index 000000000..9ae6269ff
--- /dev/null
+++ b/internal/proxy/origins/influxdb/series_test.go
@@ -0,0 +1,1580 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "reflect"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/pkg/sort/times"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+
+ "github.com/influxdata/influxdb/models"
+)
+
+const testStep = time.Duration(10) * time.Second
+
+func TestSetExtents(t *testing.T) {
+ se := &SeriesEnvelope{}
+ ex := timeseries.ExtentList{timeseries.Extent{Start: time.Time{}, End: time.Time{}}}
+ se.SetExtents(ex)
+ if len(se.ExtentList) != 1 {
+ t.Errorf(`expected 1. got %d`, len(se.ExtentList))
+ }
+}
+
+func TestExtents(t *testing.T) {
+ se := &SeriesEnvelope{}
+ ex := timeseries.ExtentList{timeseries.Extent{Start: time.Time{}, End: time.Time{}}}
+ se.SetExtents(ex)
+ e := se.Extents()
+ if len(e) != 1 {
+ t.Errorf(`expected 1. got %d`, len(se.ExtentList))
+ }
+}
+
+func TestUpdateTimestamps(t *testing.T) {
+
+ // test edge condition here (core functionality is tested across this file)
+ se := SeriesEnvelope{isCounted: true}
+ se.updateTimestamps()
+ if se.timestamps != nil {
+ t.Errorf("expected nil map, got size %d", len(se.timestamps))
+ }
+
+}
+
+func TestClone(t *testing.T) {
+ se := &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1000), 1.5},
+ {float64(5000), 1.5},
+ {float64(10000), 1.5},
+ },
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName2": "tagValue2"},
+ Values: [][]interface{}{
+ {float64(1000), 2.5},
+ {float64(5000), 2.1},
+ {float64(10000), 2.4},
+ },
+ },
+ },
+ },
+ },
+ }
+
+ sec := se.Clone().(*SeriesEnvelope)
+
+ if len(sec.Results) != 1 {
+ t.Errorf(`expected 1. got %d`, len(sec.Results))
+ return
+ }
+
+ if len(sec.Results[0].Series) != 2 {
+ t.Errorf(`expected 2. got %d`, len(sec.Results[0].Series))
+ return
+ }
+
+ if len(sec.Results[0].Series[0].Values) != 3 {
+ t.Errorf(`expected 3. got %d`, len(sec.Results[0].Series[0].Values))
+ return
+ }
+
+ if len(sec.Results[0].Series[1].Values) != 3 {
+ t.Errorf(`expected 3. got %d`, len(sec.Results[0].Series[1].Values))
+ return
+ }
+
+}
+
+func TestSetStep(t *testing.T) {
+ se := SeriesEnvelope{}
+ const step = time.Duration(300) * time.Minute
+ se.SetStep(step)
+ if se.StepDuration != step {
+ t.Errorf(`expected "%s". got "%s"`, step, se.StepDuration)
+ }
+}
+
+func TestStep(t *testing.T) {
+ se := SeriesEnvelope{}
+ const step = time.Duration(300) * time.Minute
+ se.SetStep(step)
+ if se.Step() != step {
+ t.Errorf(`expected "%s". got "%s"`, step, se.Step())
+ }
+}
+
+func TestSeriesCount(t *testing.T) {
+ se := &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(10000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ }
+ if se.SeriesCount() != 1 {
+ t.Errorf("expected 1 got %d.", se.SeriesCount())
+ }
+}
+
+func TestValueCount(t *testing.T) {
+ se := &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1000), 1.5},
+ {float64(5000), 1.5},
+ {float64(10000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ }
+ if se.ValueCount() != 3 {
+ t.Errorf("expected 3 got %d.", se.ValueCount())
+ }
+}
+
+func TestMerge(t *testing.T) {
+ tests := []struct {
+ a, b, merged *SeriesEnvelope
+ }{
+ // case 0
+ {
+ a: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(15000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(15, 0), End: time.Unix(15, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ b: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(5000), 1.5},
+ {float64(10000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(5, 0), End: time.Unix(10, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ merged: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(5000), 1.5},
+ {float64(10000), 1.5},
+ {float64(15000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(5, 0), End: time.Unix(15, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ timestamps: map[time.Time]bool{time.Unix(5, 0): true, time.Unix(10, 0): true, time.Unix(15, 0): true},
+ tslist: times.Times{time.Unix(5, 0), time.Unix(10, 0), time.Unix(15, 0)},
+ isSorted: true,
+ isCounted: true,
+ },
+ },
+
+ // case 1 empty second series
+ {
+ a: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(10000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10, 0), End: time.Unix(10, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ b: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10, 0), End: time.Unix(10, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ merged: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(10000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10, 0), End: time.Unix(10, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ timestamps: map[time.Time]bool{time.Unix(10, 0): true},
+ tslist: times.Times{time.Unix(10, 0)},
+ isSorted: true,
+ isCounted: true,
+ },
+ },
+
+ // case 2, different series in different envelopes, merge into 1
+ {
+ a: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ b: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ merged: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ timestamps: map[time.Time]bool{},
+ tslist: times.Times{},
+ isSorted: true,
+ isCounted: true,
+ },
+ },
+ // case 3, more results[] elements in the incoming envelope - lazy merge them
+ {
+ a: &SeriesEnvelope{
+ Results: []Result{
+ {
+ StatementID: 0,
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ b: &SeriesEnvelope{
+ Results: []Result{
+ {
+ StatementID: 0,
+ Series: []models.Row{
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ {
+ StatementID: 1,
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ {
+ Name: "c",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ merged: &SeriesEnvelope{
+ Results: []Result{
+ {
+ StatementID: 0,
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ {
+ StatementID: 1,
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ {
+ Name: "c",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ timestamps: map[time.Time]bool{},
+ tslist: times.Times{},
+ isSorted: true,
+ isCounted: true,
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.a.Merge(true, test.b)
+ if !reflect.DeepEqual(test.merged, test.a) {
+ t.Fatalf("Mismatch\nactual=%v\nexpect=%v", test.a, test.merged)
+ }
+ })
+ }
+}
+
+func TestCropToSize(t *testing.T) {
+
+ now := time.Now().Truncate(testStep)
+ nowEpochMs := float64(now.Unix() * 1000)
+
+ tests := []struct {
+ before, after *SeriesEnvelope
+ size int
+ bft time.Time
+ extent timeseries.Extent
+ }{
+ // case 0: where we already have the number of timestamps we are cropping to
+ {
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1444004600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004600, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1444004600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004600, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ timestamps: map[time.Time]bool{time.Unix(1444004600, 0): true},
+ tslist: times.Times{time.Unix(1444004600, 0)},
+ isCounted: true,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1444004600, 0),
+ },
+ size: 1,
+ bft: now,
+ },
+
+ // case 1
+ {
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1444004600000), 1.5},
+ {float64(1444004610000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004610, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1444004610000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004610, 0), End: time.Unix(1444004610, 0)},
+ },
+ StepDuration: testStep,
+ timestamps: map[time.Time]bool{time.Unix(1444004610, 0): true},
+ tslist: times.Times{time.Unix(1444004610, 0)},
+ isCounted: true,
+ isSorted: true,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1444004610, 0),
+ },
+ size: 1,
+ bft: now,
+ },
+
+ // case 2 - empty extent list
+ {
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: testStep,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{},
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{},
+ size: 1,
+ bft: now,
+ },
+
+ // case 3 - backfill tolerance
+ {
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1444004600000), 1.5},
+ {nowEpochMs, 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: now},
+ },
+ StepDuration: testStep,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1444004600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: now.Add(-5 * time.Minute)},
+ },
+ StepDuration: testStep,
+ timestamps: map[time.Time]bool{time.Unix(1444004600, 0): true},
+ tslist: times.Times{time.Unix(1444004600, 0)},
+ isCounted: true,
+ isSorted: false,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: now,
+ },
+ size: 1,
+ bft: now.Add(-5 * time.Minute),
+ },
+
+ // Case 4 - missing "time" column (we accidentally call it timestamp here)
+ {
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"timestamp", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1444004600000), 1.5},
+ {float64(1444004610000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004610, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"timestamp", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1444004600000), 1.5},
+ {float64(1444004610000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004610, 0)},
+ },
+ StepDuration: testStep,
+ timestamps: map[time.Time]bool{},
+ tslist: times.Times{},
+ isCounted: true,
+ isSorted: false,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1444004610, 0),
+ },
+ size: 1,
+ bft: now,
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.before.CropToSize(test.size, test.bft, test.extent)
+
+ for i := range test.before.ExtentList {
+ test.before.ExtentList[i].LastUsed = time.Time{}
+ }
+
+ if !reflect.DeepEqual(test.before, test.after) {
+ t.Errorf("mismatch\nexpected=%v\n got=%v", test.after, test.before)
+ }
+ })
+ }
+}
+
+func TestCropToRange(t *testing.T) {
+ tests := []struct {
+ before, after *SeriesEnvelope
+ extent timeseries.Extent
+ }{
+ { // Run 0 Case where the very first element in the matrix has a timestamp matching the extent's end
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1544004600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1544004600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1544004600, 0),
+ },
+ },
+
+ { // Run 1 Case where we trim nothing
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1544004600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1544004600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1644004600, 0),
+ },
+ },
+
+ { // Run 2 Case where we trim everything (all data is too old)
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(1544004600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{},
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(10, 0),
+ },
+ },
+
+ { // Run 3 Case where we trim everything (all data is too early)
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(100, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{},
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(10000, 0),
+ End: time.Unix(20000, 0),
+ },
+ },
+
+ { // Run 4 Case where we trim some off the beginning
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(300000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(300, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(300, 0),
+ End: time.Unix(300, 0),
+ },
+ },
+
+ { // Run 5 Case where we trim some off the ends
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(200000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(200, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(200, 0),
+ End: time.Unix(200, 0),
+ },
+ },
+
+ { // Run 6 Case where the last datapoint is on the Crop extent
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(200, 0),
+ End: time.Unix(300, 0),
+ },
+ },
+
+ { // Run 7 Case where we aren't given any datapoints
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{},
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(100, 0),
+ End: time.Unix(300, 0),
+ },
+ },
+
+ { // Run 8 Case where we have more series than points
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(400000), 1.5},
+ },
+ },
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(400000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(400, 0), End: time.Unix(400, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{},
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(100, 0),
+ End: time.Unix(300, 0),
+ },
+ },
+
+ // Run 9: Case where after cropping, an inner series is empty/removed
+ {
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ },
+ },
+ {
+ Name: "c",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ {
+ Name: "c",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(400, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(400, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+
+ // Run 10: Case where after cropping, the front series is empty/removed
+ {
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ },
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ {
+ Name: "c",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ {
+ Name: "c",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(400, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(400, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+
+ // Run 11: Case where after cropping, the back series is empty/removed
+ {
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ {
+ Name: "c",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(100000), 1.5},
+ {float64(200000), 1.5},
+ {float64(300000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{
+ {float64(400000), 1.5},
+ {float64(500000), 1.5},
+ {float64(600000), 1.5},
+ },
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(400, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(400, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+
+ // Run 12: Case where we short circuit since the dataset is already entirely inside the crop range
+ {
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ {
+ Name: "c",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{{Series: []models.Row{}}},
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(100, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+
+ { // Run 13 Case where we short circuit since the dataset is empty
+ before: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{},
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(100, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.before.CropToRange(test.extent)
+ if !reflect.DeepEqual(test.before, test.after) {
+ t.Errorf("mismatch\nexpected=%v\ngot =%v", test.after, test.before)
+ }
+ })
+ }
+}
+
+func TestSort(t *testing.T) {
+
+ se := SeriesEnvelope{isSorted: true, isCounted: false}
+ se.Sort()
+ if se.isCounted {
+ t.Errorf("got %t expected %t", se.isCounted, false)
+ }
+}
+
+func TestSize(t *testing.T) {
+ s := &SeriesEnvelope{
+ Results: []Result{
+ {
+ Series: []models.Row{
+ {
+ Name: "a",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ {
+ Name: "b",
+ Columns: []string{"time", "units"},
+ Tags: map[string]string{"tagName1": "tagValue1"},
+ Values: [][]interface{}{},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ }
+
+ i := s.Size()
+ expected := 94
+
+ if i != expected {
+ t.Errorf("expected %d got %d", expected, i)
+ }
+
+}
diff --git a/internal/proxy/origins/influxdb/stubs.go b/internal/proxy/origins/influxdb/stubs.go
new file mode 100644
index 000000000..5f7ad1ea3
--- /dev/null
+++ b/internal/proxy/origins/influxdb/stubs.go
@@ -0,0 +1,38 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// This file holds funcs required by the Proxy Client or Timeseries interfaces,
+// but are (currently) unused by the InfluxDB implementation.
+
+// Series (timeseries.Timeseries Interface) stub funcs
+
+// FastForwardURL is not used for InfluxDB and is here to conform to the Proxy Client interface
+func (c Client) FastForwardURL(r *http.Request) (*url.URL, error) {
+ return nil, nil
+}
+
+// InfluxDb Client (proxy.Client Interface) stub funcs
+
+// UnmarshalInstantaneous is not used for InfluxDB and is here to conform to the Proxy Client interface
+func (c Client) UnmarshalInstantaneous(data []byte) (timeseries.Timeseries, error) {
+ return nil, nil
+}
diff --git a/internal/proxy/origins/influxdb/stubs_test.go b/internal/proxy/origins/influxdb/stubs_test.go
new file mode 100644
index 000000000..109bb28e7
--- /dev/null
+++ b/internal/proxy/origins/influxdb/stubs_test.go
@@ -0,0 +1,46 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "testing"
+)
+
+func TestFastForwardURL(t *testing.T) {
+
+ client := &Client{}
+ u, err := client.FastForwardURL(nil)
+ if u != nil {
+ t.Errorf("Expected nil url, got %s", u)
+ }
+
+ if err != nil {
+ t.Errorf("Expected nil err, got %s", err)
+ }
+}
+
+func TestUnmarshalInstantaneous(t *testing.T) {
+
+ client := &Client{}
+ tr, err := client.UnmarshalInstantaneous(nil)
+
+ if tr != nil {
+ t.Errorf("Expected nil timeseries, got %s", tr)
+ }
+
+ if err != nil {
+ t.Errorf("Expected nil err, got %s", err)
+ }
+
+}
diff --git a/internal/proxy/origins/influxdb/tokenization.go b/internal/proxy/origins/influxdb/tokenization.go
new file mode 100644
index 000000000..6e2ac20f0
--- /dev/null
+++ b/internal/proxy/origins/influxdb/tokenization.go
@@ -0,0 +1,138 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/Comcast/trickster/internal/proxy/timeconv"
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/internal/util/regexp/matching"
+)
+
+// This file handles tokenization of time parameters within InfluxDB queries
+// for cache key hashing and delta proxy caching.
+
+// Tokens for String Interpolation
+const (
+ tkTime = "<$TIME_TOKEN$>"
+)
+
+var reTime1, reTime2, reStep *regexp.Regexp
+
+func init() {
+
+ // Regexp for extracting the step from an InfluxDB Timeseries Query. searches for something like: group by time(1d)
+ reStep = regexp.MustCompile(`(?i)\s+group\s+by\s+.*time\((?P[0-9]+(ns|µ|u|ms|s|m|h|d|w|y))\).*;??`)
+
+ // Regexp for extracting the time elements from an InfluxDB Timeseries Query with equality operators: >=, >, =
+ // If it's a relative time range (e.g., where time >= now() - 24h ), this expression is all that is required
+ reTime1 = regexp.MustCompile(`(?i)(?Pwhere|and)\s+(?Ptime\s+(?P>=|>|=)\s+(?P((?P[0-9]+)(?Pns|µ|u|ms|s|m|h|d|w|y)|(?Pnow\(\))\s+(?P[+-])\s+(?P[0-9]+[mhsdwy]))))(\s+(?Pand|or|group|order|limit)|$)`)
+
+ // Regexp for extracting the time elements from an InfluxDB Timeseries Query with equality operators: <=, <
+ // If it's an absolute time range (e.g., where time >= 150000ms and time <= 150001ms ), this expression catches the second clause
+ reTime2 = regexp.MustCompile(`(?i)(?Pwhere|and)\s+(?Ptime\s+(?P<=|<)\s+(?P((?P[0-9]+)(?Pns|µ|u|ms|s|m|h|d|w|y)|(?Pnow\(\))\s+(?P[+-])\s+(?P[0-9]+[mhsdwy]))))(\s+(?Pand|or|group|order|limit)|$)`)
+}
+
+func interpolateTimeQuery(template string, extent *timeseries.Extent) string {
+ return strings.Replace(template, tkTime, fmt.Sprintf("time >= %dms AND time <= %dms", extent.Start.Unix()*1000, extent.End.Unix()*1000), -1)
+}
+
+func getQueryParts(query string) (string, timeseries.Extent) {
+ m := matching.GetNamedMatches(reTime1, query, nil)
+ if _, ok := m["now1"]; !ok {
+ m2 := matching.GetNamedMatches(reTime2, query, nil)
+ for k, v := range m2 {
+ m[k] = v
+ }
+ }
+ return tokenizeQuery(query, m), parseQueryExtents(query, m)
+}
+
+// tokenizeQuery will take an InfluxDB query and replace all time conditionals with a single $TIME$
+func tokenizeQuery(query string, timeParts map[string]string) string {
+
+ replacement := tkTime
+ // First check the existence of timeExpr1, and if exists, do the replacement
+ // this catches anything with "time >" or "time >="
+ if expr, ok := timeParts["timeExpr1"]; ok {
+ query = strings.Replace(query, expr, replacement, -1)
+ // We already inserted a $TIME$, for any more occurrences, replace with ""
+ replacement = ""
+ }
+
+ // Then check the existence of timeExpr2, and if exists, do the replacement
+ // including any preceding "and" or the following "and" if preceded by "where"
+ // this catches anything with "time <" or "time <="
+ if expr, ok := timeParts["timeExpr2"]; ok {
+ if preOp, ok := timeParts["preOp2"]; ok {
+ if strings.ToLower(preOp) == "where" {
+ if postOp, ok := timeParts["postOp2"]; ok {
+ if strings.ToLower(postOp) == "and" {
+ expr += " " + postOp
+ }
+ }
+ } else {
+ expr = " " + preOp + " " + expr
+ }
+ }
+ query = strings.Replace(query, expr, replacement, -1)
+ }
+ return query
+}
+
+func parseQueryExtents(query string, timeParts map[string]string) timeseries.Extent {
+
+ var e timeseries.Extent
+
+ t1 := timeFromParts("1", timeParts)
+ e.Start = t1
+ if _, ok := timeParts["now1"]; ok {
+ e.End = time.Now()
+ return e
+ }
+
+ t2 := timeFromParts("2", timeParts)
+ e.End = t2
+ return e
+}
+
+func timeFromParts(clauseNum string, timeParts map[string]string) time.Time {
+
+ ts := int64(0)
+
+ if _, ok := timeParts["now"+clauseNum]; ok {
+ if offset, ok := timeParts["offset"+clauseNum]; ok {
+ s, err := timeconv.ParseDuration(offset)
+ if err == nil {
+ if operand, ok := timeParts["operand"+clauseNum]; ok {
+ if operand == "+" {
+ ts = time.Now().Unix() + int64(s.Seconds())
+ } else {
+ ts = time.Now().Unix() - int64(s.Seconds())
+ }
+ }
+ }
+ }
+ } else if v, ok := timeParts["value"+clauseNum]; ok {
+ s, err := time.ParseDuration(v)
+ if err == nil {
+ ts = int64(s.Seconds())
+ }
+ }
+ return time.Unix(ts, 0)
+}
diff --git a/internal/proxy/origins/influxdb/url.go b/internal/proxy/origins/influxdb/url.go
new file mode 100644
index 000000000..c8ac1561e
--- /dev/null
+++ b/internal/proxy/origins/influxdb/url.go
@@ -0,0 +1,72 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// Upstream Endpoints
+const (
+ mnQuery = "query"
+)
+
+// Common URL Parameter Names
+const (
+ upQuery = "q"
+ upDB = "db"
+)
+
+// BaseURL returns a URL in the form of scheme://host/path based on the proxy configuration
+func (c Client) BaseURL() *url.URL {
+ u := &url.URL{}
+ u.Scheme = c.config.Scheme
+ u.Host = c.config.Host
+ u.Path = c.config.PathPrefix
+ return u
+}
+
+// BuildUpstreamURL will merge the downstream request with the BaseURL to construct the full upstream URL
+func (c Client) BuildUpstreamURL(r *http.Request) *url.URL {
+ u := c.BaseURL()
+
+ if strings.HasPrefix(r.URL.Path, "/"+c.name+"/") {
+ u.Path += strings.Replace(r.URL.Path, "/"+c.name+"/", "/", 1)
+ } else {
+ u.Path += r.URL.Path
+ }
+
+ u.RawQuery = r.URL.RawQuery
+ u.Fragment = r.URL.Fragment
+ u.User = r.URL.User
+ return u
+}
+
+// SetExtent will change the upstream request query to use the provided Extent
+func (c Client) SetExtent(r *http.Request, trq *timeseries.TimeRangeQuery, extent *timeseries.Extent) {
+
+ p := r.URL.Query()
+ t := trq.TemplateURL.Query()
+
+ q := t.Get(upQuery)
+ if q != "" {
+ p.Set(upQuery, interpolateTimeQuery(q, extent))
+ }
+
+ r.URL.RawQuery = p.Encode()
+}
diff --git a/internal/proxy/origins/influxdb/url_test.go b/internal/proxy/origins/influxdb/url_test.go
new file mode 100644
index 000000000..6bf7cc087
--- /dev/null
+++ b/internal/proxy/origins/influxdb/url_test.go
@@ -0,0 +1,68 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package influxdb
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+func TestSetExtent(t *testing.T) {
+
+ start := time.Now().Add(time.Duration(-6) * time.Hour)
+ end := time.Now()
+ expected := "q=select+%2A+where+time+%3E%3D+" + fmt.Sprintf("%d", start.Unix()*1000) + "ms+AND+time+%3C%3D+" + fmt.Sprintf("%d", end.Unix()*1000) + "ms+group+by+time%281m%29"
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "none:9090", "-origin-type", "influxdb", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ client := Client{config: oc}
+
+ tu := &url.URL{RawQuery: "q=select * where <$TIME_TOKEN$> group by time(1m)"}
+
+ r, _ := http.NewRequest(http.MethodGet, tu.String(), nil)
+ trq := ×eries.TimeRangeQuery{TemplateURL: tu}
+ e := ×eries.Extent{Start: start, End: end}
+ client.SetExtent(r, trq, e)
+
+ if expected != r.URL.RawQuery {
+ t.Errorf("\nexpected [%s]\ngot [%s]", expected, r.URL.RawQuery)
+ }
+}
+
+func TestBuildUpstreamURL(t *testing.T) {
+
+ cfg := config.NewConfig()
+ oc := cfg.Origins["default"]
+ oc.Scheme = "http"
+ oc.Host = "0"
+ oc.PathPrefix = ""
+
+ client := &Client{name: "default", config: oc}
+ r, err := http.NewRequest(http.MethodGet, "http://0/default/query", nil)
+ if err != nil {
+ t.Error(err)
+ }
+ client.BuildUpstreamURL(r)
+
+}
diff --git a/internal/proxy/origins/irondb/handler_caql.go b/internal/proxy/origins/irondb/handler_caql.go
new file mode 100644
index 000000000..2452b4515
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_caql.go
@@ -0,0 +1,141 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/proxy/urls"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// CAQLHandler handles CAQL requests for timeseries data and processes them
+// through the delta proxy cache.
+func (c *Client) CAQLHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DeltaProxyCacheRequest(w, r)
+}
+
+// caqlHandlerSetExtent will change the upstream request query to use the
+// provided Extent.
+func (c Client) caqlHandlerSetExtent(r *http.Request,
+ trq *timeseries.TimeRangeQuery,
+ extent *timeseries.Extent) {
+
+ if r == nil || extent == nil || (extent.Start.IsZero() && extent.End.IsZero()) {
+ return
+ }
+
+ var err error
+ if trq == nil {
+ if trq, err = c.ParseTimeRangeQuery(r); err != nil {
+ return
+ }
+ }
+
+ st := extent.Start.UnixNano() - (extent.Start.UnixNano() % int64(trq.Step))
+ et := extent.End.UnixNano() - (extent.End.UnixNano() % int64(trq.Step))
+ if st == et {
+ et += int64(trq.Step)
+ }
+
+ q := r.URL.Query()
+ q.Set(upCAQLStart, formatTimestamp(time.Unix(0, st), false))
+ q.Set(upCAQLEnd, formatTimestamp(time.Unix(0, et), false))
+ r.URL.RawQuery = q.Encode()
+}
+
+// caqlHandlerParseTimeRangeQuery parses the key parts of a TimeRangeQuery
+// from the inbound HTTP Request.
+func (c *Client) caqlHandlerParseTimeRangeQuery(
+ r *http.Request) (*timeseries.TimeRangeQuery, error) {
+ trq := ×eries.TimeRangeQuery{}
+ trq.Statement = r.URL.Path
+
+ qp := r.URL.Query()
+ var err error
+ p := ""
+
+ if p = qp.Get(upQuery); p == "" {
+ if p = qp.Get(upCAQLQuery); p == "" {
+ return nil, errors.MissingURLParam(upQuery + " or " + upCAQLQuery)
+ }
+ }
+
+ trq.Statement = p
+
+ if p = qp.Get(upCAQLStart); p == "" {
+ return nil, errors.MissingURLParam(upCAQLStart)
+ }
+
+ if trq.Extent.Start, err = parseTimestamp(p); err != nil {
+ return nil, err
+ }
+
+ if p = qp.Get(upCAQLEnd); p == "" {
+ return nil, errors.MissingURLParam(upCAQLEnd)
+ }
+
+ if trq.Extent.End, err = parseTimestamp(p); err != nil {
+ return nil, err
+ }
+
+ if p = qp.Get(upCAQLPeriod); p == "" {
+ return nil, errors.MissingURLParam(upCAQLPeriod)
+ }
+
+ if !strings.HasSuffix(p, "s") {
+ p += "s"
+ }
+
+ if trq.Step, err = parseDuration(p); err != nil {
+ return nil, err
+ }
+
+ return trq, nil
+}
+
+// caqlHandlerFastForwardURL returns the url to fetch the Fast Forward value
+// based on a timerange URL.
+func (c *Client) caqlHandlerFastForwardURL(
+ r *http.Request) (*url.URL, error) {
+
+ rsc := request.GetResources(r)
+ trq := rsc.TimeRangeQuery
+
+ var err error
+ u := urls.Clone(r.URL)
+ q := u.Query()
+
+ if trq == nil {
+ trq, err = c.ParseTimeRangeQuery(r)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ now := time.Now().Unix()
+ start := now - (now % int64(trq.Step.Seconds()))
+ end := start + int64(trq.Step.Seconds())
+ q.Set(upCAQLStart, formatTimestamp(time.Unix(start, 0), false))
+ q.Set(upCAQLEnd, formatTimestamp(time.Unix(end, 0), false))
+ u.RawQuery = q.Encode()
+ return u, nil
+}
diff --git a/internal/proxy/origins/irondb/handler_caql_test.go b/internal/proxy/origins/irondb/handler_caql_test.go
new file mode 100644
index 000000000..83dbd971d
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_caql_test.go
@@ -0,0 +1,188 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io/ioutil"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/timeseries"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestCAQLHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/extension/lua/caql_v1"+
+ "?query=metric:average(%2200112233-4455-6677-8899-aabbccddeeff%22,"+
+ "%22metric%22)&start=0&end=900&period=300", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.CAQLHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
+
+func TestCaqlHandlerSetExtent(t *testing.T) {
+
+ // provide bad URL with no TimeRange query params
+ client := &Client{name: "test"}
+ cfg := config.NewOriginConfig()
+ cfg.HTTPClient = tu.NewTestWebClient()
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+ r, err := http.NewRequest(http.MethodGet, "http://0/extension/lua/caql_v1", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ r = request.SetResources(r, request.NewResources(cfg, nil, nil, nil, client))
+
+ now := time.Now()
+ then := now.Add(-5 * time.Hour)
+
+ // should short circuit from internal checks
+ // all though this func does not return a value to test, these exercise all coverage areas
+ client.caqlHandlerSetExtent(nil, nil, nil)
+ client.caqlHandlerSetExtent(r, nil, ×eries.Extent{})
+ client.caqlHandlerSetExtent(r, nil, ×eries.Extent{Start: then, End: now})
+ r.URL.RawQuery = "q=1234&query=5678&start=9012&end=3456&period=7890"
+ client.caqlHandlerSetExtent(r, nil, ×eries.Extent{Start: now, End: now})
+
+}
+
+func TestCaqlHandlerParseTimeRangeQuery(t *testing.T) {
+
+ // provide bad URL with no TimeRange query params
+ client := &Client{name: "test"}
+ cfg := config.NewOriginConfig()
+ cfg.HTTPClient = tu.NewTestWebClient()
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+ r, err := http.NewRequest(http.MethodGet, "http://0/extension/lua/caql_v1", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // case where everything is good
+ r.URL.RawQuery = "q=1234&query=5678&start=9012&end=3456&period=7890"
+ trq, err := client.caqlHandlerParseTimeRangeQuery(r)
+ if err != nil {
+ t.Error(err)
+ }
+ if trq == nil {
+ t.Errorf("expected value got nil for %s", r.URL.RawQuery)
+ }
+
+ // missing q param but query is present
+ r.URL.RawQuery = "help=1234&query=5678&start=9012&end=3456&period=7890"
+ _, err = client.caqlHandlerParseTimeRangeQuery(r)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // missing query param but q is present
+ r.URL.RawQuery = "q=1234&start=9012&end=3456&period=7890"
+ _, err = client.caqlHandlerParseTimeRangeQuery(r)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // missing query and q params
+ r.URL.RawQuery = "start=9012&end=3456&period=7890"
+ _, err = client.caqlHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+ // missing start param
+ r.URL.RawQuery = "q=1234&query=5678&end=3456&period=7890"
+ _, err = client.caqlHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+ // can't parse start param
+ r.URL.RawQuery = "q=1234&query=5678&start=abcd&end=3456&period=7890"
+ _, err = client.caqlHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+ // missing end param
+ r.URL.RawQuery = "q=1234&query=5678&start=9012&period=7890"
+ _, err = client.caqlHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+ // can't parse end param
+ r.URL.RawQuery = "q=1234&query=5678&start=9012&end=efgh&period=7890"
+ _, err = client.caqlHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+ // missing period param
+ r.URL.RawQuery = "q=1234&query=5678&start=9012&end=3456"
+ _, err = client.caqlHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+ // unparsable period param
+ r.URL.RawQuery = "q=1234&query=5678&start=9012&end=3456&period=pqrs"
+ _, err = client.caqlHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+}
+
+func TestCaqlHandlerFastForwardURLError(t *testing.T) {
+
+ client := &Client{name: "test"}
+ _, _, r, _, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/extension/lua/caql_v1", "debug")
+ if err != nil {
+ t.Error(err)
+ }
+ _, err = client.caqlHandlerFastForwardURL(r)
+ if err == nil {
+ t.Errorf("expected error: %s", "invalid parameters")
+ }
+}
diff --git a/internal/proxy/origins/irondb/handler_fetch.go b/internal/proxy/origins/irondb/handler_fetch.go
new file mode 100644
index 000000000..3998b833d
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_fetch.go
@@ -0,0 +1,145 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/internal/util/md5"
+)
+
+// FetchHandler handles requests for numeric timeseries data with specified
+// spans and processes them through the delta proxy cache.
+func (c *Client) FetchHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DeltaProxyCacheRequest(w, r)
+}
+
+// fetchHandlerSetExtent will change the upstream request query to use the
+// provided Extent.
+func (c Client) fetchHandlerSetExtent(r *http.Request,
+ trq *timeseries.TimeRangeQuery,
+ extent *timeseries.Extent) {
+
+ if r == nil || extent == nil || (extent.Start.IsZero() && extent.End.IsZero()) {
+ return
+ }
+
+ var err error
+ if trq == nil {
+ if trq, err = c.ParseTimeRangeQuery(r); err != nil {
+ return
+ }
+ }
+
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return
+ }
+
+ fetchReq := map[string]interface{}{}
+ if err = json.NewDecoder(bytes.NewBuffer(b)).Decode(&fetchReq); err != nil {
+ return
+ }
+
+ st := extent.Start.UnixNano() - (extent.Start.UnixNano() % int64(trq.Step))
+ et := extent.End.UnixNano() - (extent.End.UnixNano() % int64(trq.Step))
+ if st == et {
+ et += int64(trq.Step)
+ }
+
+ ct := (et - st) / int64(trq.Step)
+ fetchReq[rbStart] = time.Unix(0, st).Unix()
+ fetchReq[rbCount] = ct
+ newBody := &bytes.Buffer{}
+ err = json.NewEncoder(newBody).Encode(&fetchReq)
+ if err != nil {
+ return
+ }
+
+ r.Body = ioutil.NopCloser(newBody)
+}
+
+// fetchHandlerParseTimeRangeQuery parses the key parts of a TimeRangeQuery
+// from the inbound HTTP Request.
+func (c *Client) fetchHandlerParseTimeRangeQuery(
+ r *http.Request) (*timeseries.TimeRangeQuery, error) {
+ trq := ×eries.TimeRangeQuery{}
+
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return nil, errors.ParseRequestBody(err)
+ }
+
+ r.Body = ioutil.NopCloser(bytes.NewBuffer(b))
+ fetchReq := map[string]interface{}{}
+ if err = json.NewDecoder(bytes.NewBuffer(b)).Decode(&fetchReq); err != nil {
+ return nil, errors.ParseRequestBody(err)
+ }
+
+ var i float64
+ var ok bool
+ if i, ok = fetchReq[rbStart].(float64); !ok {
+ return nil, errors.MissingRequestParam(rbStart)
+ }
+
+ trq.Extent.Start = time.Unix(int64(i), 0)
+ if i, ok = fetchReq[rbPeriod].(float64); !ok {
+ return nil, errors.MissingRequestParam(rbPeriod)
+ }
+
+ trq.Step = time.Second * time.Duration(i)
+ if i, ok = fetchReq[rbCount].(float64); !ok {
+ return nil, errors.MissingRequestParam(rbCount)
+ }
+
+ trq.Extent.End = trq.Extent.Start.Add(trq.Step * time.Duration(i))
+ return trq, nil
+}
+
+// fetchHandlerDeriveCacheKey calculates a query-specific keyname based on the
+// user request.
+func (c Client) fetchHandlerDeriveCacheKey(path string, params url.Values,
+ headers http.Header, body io.ReadCloser, extra string) string {
+ var sb strings.Builder
+ sb.WriteString(path)
+ newBody := &bytes.Buffer{}
+ if b, err := ioutil.ReadAll(body); err == nil {
+ body = ioutil.NopCloser(bytes.NewBuffer(b))
+ fetchReq := map[string]interface{}{}
+ err := json.NewDecoder(bytes.NewBuffer(b)).Decode(&fetchReq)
+ if err == nil {
+ delete(fetchReq, "start")
+ delete(fetchReq, "end")
+ delete(fetchReq, "count")
+ err = json.NewEncoder(newBody).Encode(&fetchReq)
+ if err == nil {
+ sb.Write(newBody.Bytes())
+ }
+ }
+ }
+
+ sb.WriteString(extra)
+ return md5.Checksum(sb.String())
+}
diff --git a/internal/proxy/origins/irondb/handler_fetch_test.go b/internal/proxy/origins/irondb/handler_fetch_test.go
new file mode 100644
index 000000000..104dbb186
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_fetch_test.go
@@ -0,0 +1,148 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/timeseries"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestFetchHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/rollup/00112233-4455-6677-8899-aabbccddeeff/metric"+
+ "?start_ts=0&end_ts=900&rollup_span=300s&type=average", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.FetchHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
+
+func TestFetchHandlerDeriveCacheKey(t *testing.T) {
+
+ client := &Client{name: "test"}
+ path := "/fetch/0/900/00112233-4455-6677-8899-aabbccddeeff/metric"
+ _, _, r, _, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", path, "debug")
+ if err != nil {
+ t.Error(err)
+ }
+
+ r.Body = ioutil.NopCloser(bytes.NewReader([]byte("{}")))
+
+ const expected = "a34bbb372c505e9eea0e0589e16c0914"
+ result := client.fetchHandlerDeriveCacheKey(path, r.URL.Query(), r.Header, r.Body, "extra")
+ if result != expected {
+ t.Errorf("expected %s got %s", expected, result)
+ }
+
+}
+
+func TestFetchHandlerSetExtent(t *testing.T) {
+
+ // provide bad URL with no TimeRange query params
+ hc := tu.NewTestWebClient()
+ cfg := config.NewOriginConfig()
+ client := &Client{name: "test", config: cfg, webClient: hc}
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+ r, err := http.NewRequest(http.MethodGet, "http://0/", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ r = request.SetResources(r, request.NewResources(cfg, nil, nil, nil, client))
+
+ now := time.Now()
+ then := now.Add(-5 * time.Hour)
+
+ r.Body = ioutil.NopCloser(bytes.NewReader([]byte(`{"start": 300, "period": 300, "count": 5}`)))
+
+ // should short circuit from internal checks
+ // all though this func does not return a value to test, these exercise all coverage areas
+ client.fetchHandlerSetExtent(nil, nil, nil)
+ client.fetchHandlerSetExtent(r, nil, ×eries.Extent{Start: then, End: now})
+ client.fetchHandlerSetExtent(r, nil, ×eries.Extent{Start: now, End: now})
+ r.Body = ioutil.NopCloser(bytes.NewReader([]byte(`{a}`)))
+ client.fetchHandlerSetExtent(r, nil, ×eries.Extent{Start: then, End: now})
+
+}
+
+func TestFetchHandlerParseTimeRangeQuery(t *testing.T) {
+
+ // provide bad URL with no TimeRange query params
+ hc := tu.NewTestWebClient()
+ cfg := config.NewOriginConfig()
+ client := &Client{name: "test", config: cfg, webClient: hc}
+
+ r, err := http.NewRequest(http.MethodGet, "http://0/", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ r.Body = ioutil.NopCloser(bytes.NewReader([]byte(`{"start": 300, "period": 300, "count": 5}`)))
+ _, err = client.fetchHandlerParseTimeRangeQuery(r)
+ if err != nil {
+ t.Error(err)
+ }
+
+ r.Body = ioutil.NopCloser(bytes.NewReader([]byte(`{"period": 300, "count": 5}`)))
+ expected := "missing request parameter: start"
+ _, err = client.fetchHandlerParseTimeRangeQuery(r)
+ if err.Error() != expected {
+ t.Errorf("expected %s got %s", expected, err.Error())
+ }
+
+ r.Body = ioutil.NopCloser(bytes.NewReader([]byte(`{"start": 300, "count": 5}`)))
+ expected = "missing request parameter: period"
+ _, err = client.fetchHandlerParseTimeRangeQuery(r)
+ if err.Error() != expected {
+ t.Errorf("expected %s got %s", expected, err.Error())
+ }
+
+ r.Body = ioutil.NopCloser(bytes.NewReader([]byte(`{"start": 300, "period": 300}`)))
+ expected = "missing request parameter: count"
+ _, err = client.fetchHandlerParseTimeRangeQuery(r)
+ if err.Error() != expected {
+ t.Errorf("expected %s got %s", expected, err.Error())
+ }
+}
diff --git a/internal/proxy/origins/irondb/handler_find.go b/internal/proxy/origins/irondb/handler_find.go
new file mode 100644
index 000000000..ef5906e60
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_find.go
@@ -0,0 +1,27 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// FindHandler handles requests to find metirc information and processes them
+// through the object proxy cache.
+func (c *Client) FindHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.ObjectProxyCacheRequest(w, r)
+}
diff --git a/internal/proxy/origins/irondb/handler_find_test.go b/internal/proxy/origins/irondb/handler_find_test.go
new file mode 100644
index 000000000..a895b31dd
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_find_test.go
@@ -0,0 +1,60 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestFindHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/find/1/tags?query=metric"+
+ "&activity_start_secs=0&activity_end_secs=900", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, ok := client.config.Paths["/"+mnFind+"/"]
+ if !ok {
+ t.Errorf("could not find path config named %s", mnFind)
+ }
+
+ client.FindHandler(w, r)
+ resp := w.Result()
+
+ // It should return 200 OK.
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
diff --git a/internal/proxy/origins/irondb/handler_health.go b/internal/proxy/origins/irondb/handler_health.go
new file mode 100644
index 000000000..2ca836abb
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_health.go
@@ -0,0 +1,66 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+// HealthHandler checks the health of the Configured Upstream Origin
+func (c *Client) HealthHandler(w http.ResponseWriter, r *http.Request) {
+ if c.healthURL == nil {
+ c.populateHeathCheckRequestValues()
+ }
+
+ if c.healthMethod == "-" {
+ w.WriteHeader(400)
+ w.Write([]byte("Health Check URL not Configured for origin: " + c.config.Name))
+ return
+ }
+
+ req, _ := http.NewRequest(c.healthMethod, c.healthURL.String(), nil)
+ req = req.WithContext(r.Context())
+
+ req.Header = c.healthHeaders
+ engines.DoProxy(w, req)
+
+}
+
+func (c *Client) populateHeathCheckRequestValues() {
+
+ oc := c.config
+
+ if oc.HealthCheckUpstreamPath == "-" {
+ oc.HealthCheckUpstreamPath = "/" + mnState
+ }
+ if oc.HealthCheckVerb == "-" {
+ oc.HealthCheckVerb = http.MethodGet
+ }
+ if oc.HealthCheckQuery == "-" {
+ oc.HealthCheckQuery = ""
+ }
+
+ c.healthURL = c.BaseURL()
+ c.healthURL.Path += oc.HealthCheckUpstreamPath
+ c.healthURL.RawQuery = oc.HealthCheckQuery
+ c.healthMethod = oc.HealthCheckVerb
+
+ if oc.HealthCheckHeaders != nil {
+ c.healthHeaders = http.Header{}
+ headers.UpdateHeaders(c.healthHeaders, oc.HealthCheckHeaders)
+ }
+}
diff --git a/internal/proxy/origins/irondb/handler_health_test.go b/internal/proxy/origins/irondb/handler_health_test.go
new file mode 100644
index 000000000..36e09e3a6
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_health_test.go
@@ -0,0 +1,103 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io/ioutil"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestHealthHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/health", "debug")
+
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.HealthHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+
+ client.healthMethod = "-"
+
+ w = httptest.NewRecorder()
+ client.HealthHandler(w, r)
+ resp = w.Result()
+ if resp.StatusCode != 400 {
+ t.Errorf("Expected status: 400 got %d.", resp.StatusCode)
+ }
+
+}
+
+func TestHealthHandlerCustomPath(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "", nil, "irondb", "/health", "debug")
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+
+ client.config.HealthCheckUpstreamPath = "-"
+ client.config.HealthCheckVerb = "-"
+ client.config.HealthCheckQuery = "-"
+
+ client.webClient = hc
+ client.config.HTTPClient = hc
+
+ client.HealthHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "" {
+ t.Errorf("expected '' got %s.", bodyBytes)
+ }
+
+}
diff --git a/internal/proxy/origins/irondb/handler_histogram.go b/internal/proxy/origins/irondb/handler_histogram.go
new file mode 100644
index 000000000..234299334
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_histogram.go
@@ -0,0 +1,171 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/proxy/urls"
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/internal/util/md5"
+)
+
+// HistogramHandler handles requests for historgam timeseries data and processes
+// them through the delta proxy cache.
+func (c *Client) HistogramHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DeltaProxyCacheRequest(w, r)
+}
+
+// histogramHandlerSetExtent will change the upstream request query to use the
+// provided Extent.
+func (c Client) histogramHandlerSetExtent(r *http.Request,
+ trq *timeseries.TimeRangeQuery,
+ extent *timeseries.Extent) {
+ var err error
+ if trq == nil {
+ if trq, err = c.ParseTimeRangeQuery(r); err != nil {
+ return
+ }
+ }
+
+ st := extent.Start.UnixNano() - (extent.Start.UnixNano() % int64(trq.Step))
+ et := extent.End.UnixNano() - (extent.End.UnixNano() % int64(trq.Step))
+ if st == et {
+ et += int64(trq.Step)
+ }
+
+ ps := strings.SplitN(strings.TrimPrefix(r.URL.Path, "/"), "/", 6)
+ if len(ps) < 6 || ps[0] != "histogram" {
+ return
+ }
+
+ sb := new(strings.Builder)
+ if strings.HasPrefix(r.URL.Path, "/") {
+ sb.WriteString("/")
+ }
+
+ sb.WriteString("histogram")
+ sb.WriteString("/" + strconv.FormatInt(time.Unix(0, st).Unix(), 10))
+ sb.WriteString("/" + strconv.FormatInt(time.Unix(0, et).Unix(), 10))
+ sb.WriteString("/" + strings.Join(ps[3:], "/"))
+ r.URL.Path = sb.String()
+}
+
+// histogramHandlerParseTimeRangeQuery parses the key parts of a TimeRangeQuery
+// from the inbound HTTP Request.
+func (c *Client) histogramHandlerParseTimeRangeQuery(
+ r *http.Request) (*timeseries.TimeRangeQuery, error) {
+ trq := ×eries.TimeRangeQuery{}
+ var ps []string
+ if strings.HasPrefix(r.URL.Path, "/irondb") {
+ ps = strings.SplitN(strings.TrimPrefix(r.URL.Path, "/"), "/", 7)
+ if len(ps) > 0 {
+ ps = ps[1:]
+ }
+ } else {
+ ps = strings.SplitN(strings.TrimPrefix(r.URL.Path, "/"), "/", 6)
+ }
+
+ if len(ps) < 6 || ps[0] != "histogram" {
+ return nil, errors.ErrNotTimeRangeQuery
+ }
+
+ trq.Statement = "/histogram/" + strings.Join(ps[4:], "/")
+
+ var err error
+ if trq.Extent.Start, err = parseTimestamp(ps[1]); err != nil {
+ return nil, err
+ }
+
+ if trq.Extent.End, err = parseTimestamp(ps[2]); err != nil {
+ return nil, err
+ }
+
+ if trq.Step, err = parseDuration(ps[3]); err != nil {
+ return nil, err
+ }
+
+ return trq, nil
+}
+
+// histogramHandlerDeriveCacheKey calculates a query-specific keyname based on
+// the user request.
+func (c Client) histogramHandlerDeriveCacheKey(path string, params url.Values,
+ headers http.Header, body io.ReadCloser, extra string) string {
+ var sb strings.Builder
+ sb.WriteString(path)
+ var ps []string
+ if strings.HasPrefix(path, "/irondb") {
+ ps = strings.SplitN(strings.TrimPrefix(path, "/"), "/", 7)
+ if len(ps) > 0 {
+ ps = ps[1:]
+ }
+ } else {
+ ps = strings.SplitN(strings.TrimPrefix(path, "/"), "/", 6)
+ }
+
+ if len(ps) >= 6 || ps[0] == "histogram" {
+ sb.WriteString("/histogram/" + strings.Join(ps[3:], "/"))
+ }
+
+ sb.WriteString(extra)
+ return md5.Checksum(sb.String())
+}
+
+// histogramHandlerFastForwardURL returns the url to fetch the Fast Forward value
+// based on a timerange URL.
+func (c *Client) histogramHandlerFastForwardURL(
+ r *http.Request) (*url.URL, error) {
+
+ rsc := request.GetResources(r)
+
+ var err error
+ u := urls.Clone(r.URL)
+ trq := rsc.TimeRangeQuery
+ if trq == nil {
+ trq, err = c.ParseTimeRangeQuery(r)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ now := time.Now().Unix()
+ start := now - (now % int64(trq.Step.Seconds()))
+ end := start + int64(trq.Step.Seconds())
+ ps := strings.SplitN(strings.TrimPrefix(u.Path, "/"), "/", 6)
+ if len(ps) < 6 || ps[0] != "histogram" {
+ return nil, errors.InvalidPath(u.Path)
+ }
+
+ sb := new(strings.Builder)
+ if strings.HasPrefix(u.Path, "/") {
+ sb.WriteString("/")
+ }
+
+ sb.WriteString("histogram")
+ sb.WriteString("/" + strconv.FormatInt(time.Unix(start, 0).Unix(), 10))
+ sb.WriteString("/" + strconv.FormatInt(time.Unix(end, 0).Unix(), 10))
+ sb.WriteString("/" + strings.Join(ps[3:], "/"))
+ u.Path = sb.String()
+ return u, nil
+}
diff --git a/internal/proxy/origins/irondb/handler_histogram_test.go b/internal/proxy/origins/irondb/handler_histogram_test.go
new file mode 100644
index 000000000..011a440c1
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_histogram_test.go
@@ -0,0 +1,228 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/timeseries"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestHistogramHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/histogram/0/900/300/00112233-4455-6677-8899-aabbccddeeff/"+
+ "metric", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.HistogramHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+
+ w = httptest.NewRecorder()
+ r = httptest.NewRequest("GET",
+ "http://0/irondb/histogram/0/900/300/"+
+ "00112233-4455-6677-8899-aabbccddeeff/"+
+ "metric", nil)
+
+ r = request.SetResources(r, rsc)
+
+ client.HistogramHandler(w, r)
+ resp = w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
+
+func TestHistogramHandlerDeriveCacheKey(t *testing.T) {
+
+ client := &Client{name: "test"}
+ path := "/histogram/0/900/00112233-4455-6677-8899-aabbccddeeff/metric"
+ _, _, r, _, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", path, "debug")
+ if err != nil {
+ t.Error(err)
+ }
+
+ expected := "11cc1b20a869f6ff0559b08b014c3ca6"
+ result := client.histogramHandlerDeriveCacheKey(path, r.URL.Query(), r.Header, r.Body, "extra")
+ if result != expected {
+ t.Errorf("expected %s got %s", expected, result)
+ }
+
+ expected = "c70681051e3af3de12f37686b6a4224f"
+ path = "/irondb/0/900/00112233-4455-6677-8899-aabbccddeeff/metric"
+ result = client.histogramHandlerDeriveCacheKey(path, r.URL.Query(), r.Header, r.Body, "extra")
+ if result != expected {
+ t.Errorf("expected %s got %s", expected, result)
+ }
+
+}
+
+func TestHistogramHandlerParseTimeRangeQuery(t *testing.T) {
+
+ path := "/histogram/0/900/300/00112233-4455-6677-8899-aabbccddeeff/metric"
+ r, err := http.NewRequest(http.MethodGet, "http://0"+path, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // provide bad URL with no TimeRange query params
+ hc := tu.NewTestWebClient()
+ cfg := config.NewOriginConfig()
+ client := &Client{name: "test", webClient: hc, config: cfg}
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+
+ //tr := model.NewRequest("HistogramHandler", r.Method, r.URL, r.Header, cfg.Timeout, r, hc)
+
+ // case where everything is good
+ _, err = client.histogramHandlerParseTimeRangeQuery(r)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // case where the path is not long enough
+ r.URL.Path = "/histogram/0/900/"
+ expected := errors.ErrNotTimeRangeQuery
+ _, err = client.histogramHandlerParseTimeRangeQuery(r)
+ if err == nil || err != expected {
+ t.Errorf("expected %s got %s", expected.Error(), err.Error())
+ }
+
+ // case where the start can't be parsed
+ r.URL.Path = "/histogram/z/900/300/00112233-4455-6677-8899-aabbccddeeff/metric"
+ expected2 := `unable to parse timestamp z: strconv.ParseInt: parsing "z": invalid syntax`
+ _, err = client.histogramHandlerParseTimeRangeQuery(r)
+ if err == nil || err.Error() != expected2 {
+ t.Errorf("expected %s got %s", expected2, err.Error())
+ }
+
+ // case where the end can't be parsed
+ r.URL.Path = "/histogram/0/z/300/00112233-4455-6677-8899-aabbccddeeff/metric"
+ _, err = client.histogramHandlerParseTimeRangeQuery(r)
+ if err == nil || err.Error() != expected2 {
+ t.Errorf("expected %s got %s", expected2, err.Error())
+ }
+
+ // case where the period can't be parsed
+ r.URL.Path = "/histogram/0/900/z/00112233-4455-6677-8899-aabbccddeeff/metric"
+ expected2 = `unable to parse duration zs: time: invalid duration zs`
+ _, err = client.histogramHandlerParseTimeRangeQuery(r)
+ if err == nil || err.Error() != expected2 {
+ t.Errorf("expected %s got %s", expected2, err.Error())
+ }
+
+}
+
+func TestHistogramHandlerSetExtent(t *testing.T) {
+
+ // provide bad URL with no TimeRange query params
+ hc := tu.NewTestWebClient()
+ cfg := config.NewOriginConfig()
+ client := &Client{name: "test", webClient: hc, config: cfg}
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+ r, err := http.NewRequest(http.MethodGet, "http://0/", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ r = request.SetResources(r, request.NewResources(cfg, nil, nil, nil, client))
+
+ now := time.Now()
+ then := now.Add(-5 * time.Hour)
+
+ client.histogramHandlerSetExtent(r, nil, ×eries.Extent{Start: then, End: now})
+ if r.URL.Path != "/" {
+ t.Errorf("expected %s got %s", "/", r.URL.Path)
+ }
+
+ // although SetExtent does not return a value to test, these lines exercise all coverage areas
+ r.URL.Path = "/histogram/900/900/300/00112233-4455-6677-8899-aabbccddeeff/metric"
+ client.histogramHandlerSetExtent(r, nil, ×eries.Extent{Start: now, End: now})
+
+ r.URL.Path = "/histogram/900/900/300"
+ trq := ×eries.TimeRangeQuery{Step: 300 * time.Second}
+ client.histogramHandlerSetExtent(r, trq, ×eries.Extent{Start: then, End: now})
+
+}
+
+func TestHistogramHandlerFastForwardURLError(t *testing.T) {
+
+ // provide bad URL with no TimeRange query params
+ hc := tu.NewTestWebClient()
+ cfg := config.NewOriginConfig()
+ client := &Client{name: "test", webClient: hc, config: cfg}
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+ r, err := http.NewRequest(http.MethodGet,
+ "http://0/", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ rsc := request.NewResources(cfg, nil, nil, nil, client)
+ r = request.SetResources(r, rsc)
+
+ r.URL.Path = "/histogram/x/900/300/00112233-4455-6677-8899-aabbccddeeff/metric"
+ _, err = client.histogramHandlerFastForwardURL(r)
+ if err == nil {
+ t.Errorf("expected error: %s", "invalid parameters")
+ }
+
+ r.URL.Path = "/a/900/900/300/00112233-4455-6677-8899-aabbccddeeff/metric"
+ rsc.TimeRangeQuery = ×eries.TimeRangeQuery{Step: 300 * time.Second}
+ _, err = client.histogramHandlerFastForwardURL(r)
+ if err == nil {
+ t.Errorf("expected error: %s", "invalid parameters")
+ }
+
+}
diff --git a/internal/proxy/origins/irondb/handler_proxy.go b/internal/proxy/origins/irondb/handler_proxy.go
new file mode 100644
index 000000000..a11f4960c
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_proxy.go
@@ -0,0 +1,27 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// ProxyHandler sends a request through the basic reverse proxy to the origin
+// for non-cacheable API calls.
+func (c *Client) ProxyHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DoProxy(w, r)
+}
diff --git a/internal/proxy/origins/irondb/handler_proxy_test.go b/internal/proxy/origins/irondb/handler_proxy_test.go
new file mode 100644
index 000000000..3ae1d26d2
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_proxy_test.go
@@ -0,0 +1,55 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestProxyHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/histogram/0/900/300/00112233-4455-6677-8899-aabbccddeeff/"+
+ "metric", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.ProxyHandler(w, r)
+ resp := w.Result()
+
+ // It should return 200 OK.
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
diff --git a/internal/proxy/origins/irondb/handler_raw.go b/internal/proxy/origins/irondb/handler_raw.go
new file mode 100644
index 000000000..3530093a4
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_raw.go
@@ -0,0 +1,69 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// RawHandler handles requests for raw numeric timeseries data and processes
+// them through the delta proxy cache.
+func (c *Client) RawHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DeltaProxyCacheRequest(w, r)
+}
+
+// rawHandlerSetExtent will change the upstream request query to use the
+// provided Extent.
+func (c Client) rawHandlerSetExtent(r *http.Request,
+ trq *timeseries.TimeRangeQuery,
+ extent *timeseries.Extent) {
+ q := r.URL.Query()
+ q.Set(upStart, formatTimestamp(extent.Start, true))
+ q.Set(upEnd, formatTimestamp(extent.End, true))
+ r.URL.RawQuery = q.Encode()
+}
+
+// rawHandlerParseTimeRangeQuery parses the key parts of a TimeRangeQuery
+// from the inbound HTTP Request.
+func (c *Client) rawHandlerParseTimeRangeQuery(
+ r *http.Request) (*timeseries.TimeRangeQuery, error) {
+ trq := ×eries.TimeRangeQuery{}
+ trq.Statement = r.URL.Path
+
+ qp := r.URL.Query()
+ var err error
+ p := ""
+ if p = qp.Get(upStart); p == "" {
+ return nil, errors.MissingURLParam(upStart)
+ }
+
+ if trq.Extent.Start, err = parseTimestamp(p); err != nil {
+ return nil, err
+ }
+
+ if p = qp.Get(upEnd); p == "" {
+ return nil, errors.MissingURLParam(upEnd)
+ }
+
+ if trq.Extent.End, err = parseTimestamp(p); err != nil {
+ return nil, err
+ }
+
+ return trq, nil
+}
diff --git a/internal/proxy/origins/irondb/handler_raw_test.go b/internal/proxy/origins/irondb/handler_raw_test.go
new file mode 100644
index 000000000..a6c1d4156
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_raw_test.go
@@ -0,0 +1,112 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io/ioutil"
+ "net/http"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestRawHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs,
+ 200, "{}", nil, "irondb", "/raw/00112233-4455-6677-8899-aabbccddeeff/metric"+
+ "?start_ts=0&end_ts=900", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.RawHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
+
+func TestRawHandlerParseTimeRangeQuery(t *testing.T) {
+
+ // provide bad URL with no TimeRange query params
+ hc := tu.NewTestWebClient()
+ cfg := config.NewOriginConfig()
+ client := &Client{name: "test", webClient: hc, config: cfg}
+
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+ r, err := http.NewRequest(http.MethodGet, "http://0/raw/00112233-4455-6677-8899-aabbccddeeff/metric", nil)
+ if err != nil {
+ t.Error(err)
+ }
+ //tr := model.NewRequest("RawHandler", r.Method, r.URL, r.Header, cfg.Timeout, r, hc)
+
+ // case where everything is good
+ r.URL.RawQuery = "start_ts=9012&end_ts=3456"
+ trq, err := client.rawHandlerParseTimeRangeQuery(r)
+ if err != nil {
+ t.Error(err)
+ }
+ if trq == nil {
+ t.Errorf("expected value got nil for %s", r.URL.RawQuery)
+ }
+
+ // missing start param
+ r.URL.RawQuery = "end_ts=3456"
+ _, err = client.rawHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+ // can't parse start param
+ r.URL.RawQuery = "start_ts=abc&end_ts=3456"
+ _, err = client.rawHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+ // missing end param
+ r.URL.RawQuery = "start_ts=3456"
+ _, err = client.rawHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+ // can't parse end param
+ r.URL.RawQuery = "start_ts=9012&end_ts=def"
+ _, err = client.rawHandlerParseTimeRangeQuery(r)
+ if err == nil {
+ t.Errorf("expected error for parameter missing")
+ }
+
+}
diff --git a/internal/proxy/origins/irondb/handler_rollup.go b/internal/proxy/origins/irondb/handler_rollup.go
new file mode 100644
index 000000000..faf5cdf5d
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_rollup.go
@@ -0,0 +1,126 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/proxy/urls"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// RollupHandler handles requests for numeric timeseries data with specified
+// spans and processes them through the delta proxy cache.
+func (c *Client) RollupHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DeltaProxyCacheRequest(w, r)
+}
+
+// rollupHandlerSetExtent will change the upstream request query to use the
+// provided Extent.
+func (c Client) rollupHandlerSetExtent(r *http.Request,
+ trq *timeseries.TimeRangeQuery,
+ extent *timeseries.Extent) {
+
+ if r == nil || extent == nil || (extent.Start.IsZero() && extent.End.IsZero()) {
+ return
+ }
+
+ var err error
+ if trq == nil {
+ if trq, err = c.ParseTimeRangeQuery(r); err != nil {
+ return
+ }
+ }
+
+ st := extent.Start.UnixNano() - (extent.Start.UnixNano() % int64(trq.Step))
+ et := extent.End.UnixNano() - (extent.End.UnixNano() % int64(trq.Step))
+ if st == et {
+ et += int64(trq.Step)
+ }
+
+ q := r.URL.Query()
+ q.Set(upStart, formatTimestamp(time.Unix(0, st), true))
+ q.Set(upEnd, formatTimestamp(time.Unix(0, et), true))
+ r.URL.RawQuery = q.Encode()
+}
+
+// rollupHandlerParseTimeRangeQuery parses the key parts of a TimeRangeQuery
+// from the inbound HTTP Request.
+func (c *Client) rollupHandlerParseTimeRangeQuery(
+ r *http.Request) (*timeseries.TimeRangeQuery, error) {
+ trq := ×eries.TimeRangeQuery{}
+ trq.Statement = r.URL.Path
+
+ qp := r.URL.Query()
+ var err error
+ p := ""
+ if p = qp.Get(upStart); p == "" {
+ return nil, errors.MissingURLParam(upStart)
+ }
+
+ if trq.Extent.Start, err = parseTimestamp(p); err != nil {
+ return nil, err
+ }
+
+ if p = qp.Get(upEnd); p == "" {
+ return nil, errors.MissingURLParam(upEnd)
+ }
+
+ if trq.Extent.End, err = parseTimestamp(p); err != nil {
+ return nil, err
+ }
+
+ if p = qp.Get(upSpan); p == "" {
+ return nil, errors.MissingURLParam(upSpan)
+ }
+
+ if trq.Step, err = parseDuration(p); err != nil {
+ return nil, err
+ }
+
+ return trq, nil
+}
+
+// rollupHandlerFastForwardURL returns the url to fetch the Fast Forward value
+// based on a timerange URL.
+func (c *Client) rollupHandlerFastForwardURL(
+ r *http.Request) (*url.URL, error) {
+
+ rsc := request.GetResources(r)
+ trq := rsc.TimeRangeQuery
+
+ var err error
+ u := urls.Clone(r.URL)
+ q := u.Query()
+ if trq == nil {
+ trq, err = c.ParseTimeRangeQuery(r)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ now := time.Now().Unix()
+ start := now - (now % int64(trq.Step.Seconds()))
+ end := start + int64(trq.Step.Seconds())
+ q.Set(upStart, formatTimestamp(time.Unix(start, 0), true))
+ q.Set(upEnd, formatTimestamp(time.Unix(end, 0), true))
+ u.RawQuery = q.Encode()
+ return u, nil
+}
diff --git a/internal/proxy/origins/irondb/handler_rollup_test.go b/internal/proxy/origins/irondb/handler_rollup_test.go
new file mode 100644
index 000000000..c3c9c3330
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_rollup_test.go
@@ -0,0 +1,183 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/timeseries"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestRollupHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/rollup/00112233-4455-6677-8899-aabbccddeeff/metric"+
+ "?start_ts=0&end_ts=900&rollup_span=300s&type=average", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.RollupHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
+
+func TestRollupHandlerSetExtent(t *testing.T) {
+
+ // provide bad URL with no TimeRange query params
+ hc := tu.NewTestWebClient()
+ cfg := config.NewOriginConfig()
+ client := &Client{name: "test", webClient: hc, config: cfg}
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+ r, err := http.NewRequest(http.MethodGet, "http://0//rollup/00112233-4455-6677-8899-aabbccddeeff/metric", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ r = request.SetResources(r, request.NewResources(cfg, nil, nil, nil, client))
+
+ now := time.Now()
+ then := now.Add(-5 * time.Hour)
+
+ // should short circuit from internal checks
+ // all though this func does not return a value to test, these exercise all coverage areas
+ client.rollupHandlerSetExtent(nil, nil, nil)
+ client.rollupHandlerSetExtent(r, nil, ×eries.Extent{})
+ client.rollupHandlerSetExtent(r, nil, ×eries.Extent{Start: then, End: now})
+ r.URL.RawQuery = "start_ts=0&end_ts=900&rollup_span=300s&type=average"
+ client.rollupHandlerSetExtent(r, nil, ×eries.Extent{Start: now, End: now})
+
+}
+
+func TestRollupHandlerParseTimeRangeQuery(t *testing.T) {
+
+ // provide bad URL with no TimeRange query params
+ hc := tu.NewTestWebClient()
+ cfg := config.NewOriginConfig()
+ client := &Client{name: "test", webClient: hc, config: cfg}
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+ r, err := http.NewRequest(http.MethodGet, "http://0/rollup/00112233-4455-6677-8899-aabbccddeeff/metric", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ r = request.SetResources(r, request.NewResources(cfg, nil, nil, nil, client))
+
+ // case where everything is good
+ r.URL.RawQuery = "start_ts=0&end_ts=900&rollup_span=300s&type=average"
+ trq, err := client.rollupHandlerParseTimeRangeQuery(r)
+ if err != nil {
+ t.Error(err)
+ }
+ if trq == nil {
+ t.Errorf("expected value got nil for %s", r.URL.RawQuery)
+ }
+
+ // missing start param
+ r.URL.RawQuery = "end_ts=3456&rollup_span=7890"
+ _, err = client.rollupHandlerParseTimeRangeQuery(r)
+ expected := errors.MissingURLParam(upStart)
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("expected %s got %s", expected.Error(), err)
+ }
+
+ // can't parse start param
+ r.URL.RawQuery = "start_ts=abcd&end_ts=3456&rollup_span=7890"
+ _, err = client.rollupHandlerParseTimeRangeQuery(r)
+ expectedS := `unable to parse timestamp abcd: strconv.ParseInt: parsing "abcd": invalid syntax`
+ if err.Error() != expectedS {
+ t.Errorf("expected %s got %s", expectedS, err.Error())
+ }
+
+ // missing end param
+ r.URL.RawQuery = "start_ts=9012&rollup_span=7890"
+ _, err = client.rollupHandlerParseTimeRangeQuery(r)
+ expected = errors.MissingURLParam(upEnd)
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("expected %s got %s", expected.Error(), err)
+ }
+
+ // can't parse end param
+ r.URL.RawQuery = "start_ts=9012&end_ts=efgh&rollup_span=7890"
+ _, err = client.rollupHandlerParseTimeRangeQuery(r)
+ expectedS = `unable to parse timestamp efgh: strconv.ParseInt: parsing "efgh": invalid syntax`
+ if err.Error() != expectedS {
+ t.Errorf("expected %s got %s", expectedS, err.Error())
+ }
+
+ // missing rollup_span param
+ r.URL.RawQuery = "start_ts=9012&end_ts=3456"
+ _, err = client.rollupHandlerParseTimeRangeQuery(r)
+ expected = errors.MissingURLParam(upSpan)
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("expected %s got %s", expected.Error(), err)
+ }
+
+ // unparsable rollup_span param
+ r.URL.RawQuery = "start_ts=9012&end_ts=3456&rollup_span=pqrs"
+ _, err = client.rollupHandlerParseTimeRangeQuery(r)
+ expectedS = `unable to parse duration pqrs: time: invalid duration pqrs`
+ if err.Error() != expectedS {
+ t.Errorf("expected %s got %s", expectedS, err.Error())
+ }
+
+}
+
+func TestRollupHandlerFastForwardURLError(t *testing.T) {
+
+ client := &Client{name: "test"}
+ _, _, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs,
+ 200, "{}", nil, "irondb",
+ "/rollup/00112233-4455-6677-8899-aabbccddeeff/metric", "debug")
+ if err != nil {
+ t.Error(err)
+ }
+ rsc := request.GetResources(r)
+ client.webClient = hc
+ client.config = rsc.OriginConfig
+ rsc.OriginClient = client
+
+ _, err = client.rollupHandlerFastForwardURL(r)
+ if err == nil {
+ t.Errorf("expected error: %s", "invalid parameters")
+ }
+
+}
diff --git a/internal/proxy/origins/irondb/handler_state.go b/internal/proxy/origins/irondb/handler_state.go
new file mode 100644
index 000000000..c8e1e51a9
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_state.go
@@ -0,0 +1,27 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// StateHandler handles requests for state data and processes them through the
+// basic reverse proxy to the origin for non-cacheable API calls.
+func (c *Client) StateHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DoProxy(w, r)
+}
diff --git a/internal/proxy/origins/irondb/handler_state_test.go b/internal/proxy/origins/irondb/handler_state_test.go
new file mode 100644
index 000000000..e7172b868
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_state_test.go
@@ -0,0 +1,54 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestStateHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.StateHandler(w, r)
+ resp := w.Result()
+
+ // It should return 200 OK.
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
diff --git a/internal/proxy/origins/irondb/handler_text.go b/internal/proxy/origins/irondb/handler_text.go
new file mode 100644
index 000000000..83852e2e0
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_text.go
@@ -0,0 +1,95 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/internal/util/md5"
+)
+
+// TextHandler handles requests for text timeseries data and processes them
+// through the delta proxy cache.
+func (c *Client) TextHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DeltaProxyCacheRequest(w, r)
+}
+
+// textHandlerSetExtent will change the upstream request query to use the
+// provided Extent.
+func (c Client) textHandlerSetExtent(r *http.Request,
+ trq *timeseries.TimeRangeQuery,
+ extent *timeseries.Extent) {
+ ps := strings.SplitN(strings.TrimPrefix(r.URL.Path, "/"), "/", 5)
+ if len(ps) < 5 || ps[0] != "read" {
+ return
+ }
+
+ sb := new(strings.Builder)
+ if strings.HasPrefix(r.URL.Path, "/") {
+ sb.WriteString("/")
+ }
+
+ sb.WriteString("read")
+ sb.WriteString("/" + strconv.FormatInt(extent.Start.Unix(), 10))
+ sb.WriteString("/" + strconv.FormatInt(extent.End.Unix(), 10))
+ sb.WriteString("/" + strings.Join(ps[3:], "/"))
+ r.URL.Path = sb.String()
+}
+
+// textHandlerParseTimeRangeQuery parses the key parts of a TimeRangeQuery
+// from the inbound HTTP Request.
+func (c *Client) textHandlerParseTimeRangeQuery(
+ r *http.Request) (*timeseries.TimeRangeQuery, error) {
+ trq := ×eries.TimeRangeQuery{}
+ ps := strings.SplitN(strings.TrimPrefix(r.URL.Path, "/"), "/", 5)
+ if len(ps) < 5 || ps[0] != "read" {
+ return nil, errors.ErrNotTimeRangeQuery
+ }
+
+ trq.Statement = "/read/" + strings.Join(ps[3:], "/")
+
+ var err error
+ if trq.Extent.Start, err = parseTimestamp(ps[1]); err != nil {
+ return nil, err
+ }
+
+ if trq.Extent.End, err = parseTimestamp(ps[2]); err != nil {
+ return nil, err
+ }
+
+ return trq, nil
+}
+
+// textHandlerDeriveCacheKey calculates a query-specific keyname based on the
+// user request.
+func (c Client) textHandlerDeriveCacheKey(path string, params url.Values,
+ headers http.Header, body io.ReadCloser, extra string) string {
+ var sb strings.Builder
+ sb.WriteString(path)
+ ps := strings.SplitN(strings.TrimPrefix(path, "/"), "/", 5)
+ if len(ps) >= 5 || ps[0] == "read" {
+ sb.WriteString("/read/" + strings.Join(ps[3:], "/"))
+ }
+
+ sb.WriteString(extra)
+ return md5.Checksum(sb.String())
+}
diff --git a/internal/proxy/origins/irondb/handler_text_test.go b/internal/proxy/origins/irondb/handler_text_test.go
new file mode 100644
index 000000000..5ddf9b22f
--- /dev/null
+++ b/internal/proxy/origins/irondb/handler_text_test.go
@@ -0,0 +1,146 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "io/ioutil"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/timeseries"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestTextHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/read/0/900/00112233-4455-6677-8899-aabbccddeeff/metric", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.TextHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
+
+func TestTextHandlerDeriveCacheKey(t *testing.T) {
+
+ client := &Client{name: "test"}
+ path := "/read/0/900/00112233-4455-6677-8899-aabbccddeeff/metric"
+ _, _, r, _, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", path, "debug")
+ if err != nil {
+ t.Error(err)
+ }
+
+ const expected = "a506d1700414b1d0ac15340bd619fdab"
+ result := client.textHandlerDeriveCacheKey(path, r.URL.Query(), r.Header, r.Body, "extra")
+ if result != expected {
+ t.Errorf("expected %s got %s", expected, result)
+ }
+
+}
+
+func TestTextHandlerParseTimeRangeQuery(t *testing.T) {
+
+ path := "/read/0/900/00112233-4455-6677-8899-aabbccddeeff/metric"
+ r, err := http.NewRequest(http.MethodGet, "http://0"+path, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // provide bad URL with no TimeRange query params
+ hc := tu.NewTestWebClient()
+ cfg := config.NewOriginConfig()
+ client := &Client{name: "test", webClient: hc, config: cfg}
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+
+ //tr := model.NewRequest("RollupHandler", r.Method, r.URL, r.Header, cfg.Timeout, r, hc)
+
+ // case where everything is good
+ _, err = client.textHandlerParseTimeRangeQuery(r)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // case where the path is not long enough
+ r.URL.Path = "/read/0/900/"
+ expected := errors.ErrNotTimeRangeQuery
+ _, err = client.textHandlerParseTimeRangeQuery(r)
+ if err == nil || err != expected {
+ t.Errorf("expected %s got %s", expected, err.Error())
+ }
+
+ // case where the start can't be parsed
+ r.URL.Path = "/read/z/900/00112233-4455-6677-8899-aabbccddeeff/metric"
+ expected2 := `unable to parse timestamp z: strconv.ParseInt: parsing "z": invalid syntax`
+ _, err = client.textHandlerParseTimeRangeQuery(r)
+ if err == nil || err.Error() != expected2 {
+ t.Errorf("expected %s got %s", expected2, err.Error())
+ }
+
+ // case where the end can't be parsed
+ r.URL.Path = "/read/0/z/00112233-4455-6677-8899-aabbccddeeff/metric"
+ _, err = client.textHandlerParseTimeRangeQuery(r)
+ if err == nil || err.Error() != expected2 {
+ t.Errorf("expected %s got %s", expected2, err.Error())
+ }
+
+}
+
+func TestTextHandlerSetExtent(t *testing.T) {
+
+ // provide bad URL with no TimeRange query params
+ hc := tu.NewTestWebClient()
+ cfg := config.NewOriginConfig()
+ client := &Client{name: "test", webClient: hc, config: cfg}
+ cfg.Paths = client.DefaultPathConfigs(cfg)
+ r, err := http.NewRequest(http.MethodGet, "http://0/test", nil)
+ if err != nil {
+ t.Error(err)
+ }
+ //tr := model.NewRequest("TextHandler", r.Method, r.URL, r.Header, cfg.Timeout, r, hc)
+
+ now := time.Now()
+ then := now.Add(-5 * time.Hour)
+
+ client.textHandlerSetExtent(r, nil, ×eries.Extent{Start: then, End: now})
+ if r.URL.Path != "/test" {
+ t.Errorf("expected %s got %s", "/test", r.URL.Path)
+ }
+
+}
diff --git a/internal/proxy/origins/irondb/irondb.go b/internal/proxy/origins/irondb/irondb.go
new file mode 100644
index 000000000..d317ba624
--- /dev/null
+++ b/internal/proxy/origins/irondb/irondb.go
@@ -0,0 +1,136 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+// Package irondb provides proxy origin support for IRONdb databases.
+package irondb
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// IRONdb API path segments.
+const (
+ mnRaw = "raw"
+ mnRollup = "rollup"
+ mnFetch = "fetch"
+ mnRead = "read"
+ mnHistogram = "histogram"
+ mnFind = "find"
+ mnCAQL = "extension/lua/caql_v1"
+ mnCAQLPub = "extension/lua/public/caql_v1"
+ mnState = "state"
+)
+
+// Common IRONdb URL query parameter names.
+const (
+ upQuery = "query"
+ upStart = "start_ts"
+ upEnd = "end_ts"
+ upSpan = "rollup_span"
+ upEngine = "get_engine"
+ upType = "type"
+ upCAQLQuery = "q"
+ upCAQLStart = "start"
+ upCAQLEnd = "end"
+ upCAQLPeriod = "period"
+)
+
+// IRONdb request body field names.
+const (
+ rbStart = "start"
+ rbCount = "count"
+ rbPeriod = "period"
+)
+
+type trqParser func(*http.Request) (*timeseries.TimeRangeQuery, error)
+type extentSetter func(*http.Request, *timeseries.TimeRangeQuery, *timeseries.Extent)
+
+// Client values provide access to IRONdb and implement the Trickster proxy
+// client interface.
+type Client struct {
+ name string
+ config *config.OriginConfig
+ cache cache.Cache
+ webClient *http.Client
+ handlers map[string]http.Handler
+ handlersRegistered bool
+
+ healthURL *url.URL
+ healthHeaders http.Header
+ healthMethod string
+
+ trqParsers map[string]trqParser
+ extentSetters map[string]extentSetter
+}
+
+// NewClient returns a new Client Instance
+func NewClient(name string, oc *config.OriginConfig, cache cache.Cache) (*Client, error) {
+ c, err := proxy.NewHTTPClient(oc)
+ client := &Client{name: name, config: oc, cache: cache, webClient: c}
+ client.makeTrqParsers()
+ client.makeExtentSetters()
+ return client, err
+}
+
+func (c *Client) makeTrqParsers() {
+ c.trqParsers = map[string]trqParser{
+ "RawHandler": c.rawHandlerParseTimeRangeQuery,
+ "RollupHandler": c.rollupHandlerParseTimeRangeQuery,
+ "FetchHandler": c.fetchHandlerParseTimeRangeQuery,
+ "TextHandler": c.textHandlerParseTimeRangeQuery,
+ "HistogramHandler": c.histogramHandlerParseTimeRangeQuery,
+ "CAQLHandler": c.caqlHandlerParseTimeRangeQuery,
+ }
+}
+
+func (c *Client) makeExtentSetters() {
+ c.extentSetters = map[string]extentSetter{
+ "RawHandler": c.rawHandlerSetExtent,
+ "RollupHandler": c.rollupHandlerSetExtent,
+ "FetchHandler": c.fetchHandlerSetExtent,
+ "TextHandler": c.textHandlerSetExtent,
+ "HistogramHandler": c.histogramHandlerSetExtent,
+ "CAQLHandler": c.caqlHandlerSetExtent,
+ }
+}
+
+// Configuration returns the upstream Configuration for this Client.
+func (c *Client) Configuration() *config.OriginConfig {
+ return c.config
+}
+
+// HTTPClient returns the HTTP Transport this client is using.
+func (c *Client) HTTPClient() *http.Client {
+ return c.webClient
+}
+
+// Cache returns a handle to the Cache instance used by this Client.
+func (c *Client) Cache() cache.Cache {
+ return c.cache
+}
+
+// Name returns the name of the origin Configuration proxied by the Client.
+func (c *Client) Name() string {
+ return c.name
+}
+
+// SetCache sets the Cache object the client will use for caching origin content
+func (c *Client) SetCache(cc cache.Cache) {
+ c.cache = cc
+}
diff --git a/internal/proxy/origins/irondb/irondb_test.go b/internal/proxy/origins/irondb/irondb_test.go
new file mode 100644
index 000000000..9defa1b59
--- /dev/null
+++ b/internal/proxy/origins/irondb/irondb_test.go
@@ -0,0 +1,134 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "testing"
+
+ cr "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/origins"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+func init() {
+ // Initialize Trickster instrumentation metrics.
+ metrics.Init()
+}
+
+func TestIRONdbClientInterfacing(t *testing.T) {
+
+ // this test ensures the client will properly conform to the
+ // Client and TimeseriesClient interfaces
+
+ c := &Client{name: "test"}
+ var oc origins.Client = c
+ var tc origins.TimeseriesClient = c
+
+ if oc.Name() != "test" {
+ t.Errorf("expected %s got %s", "test", oc.Name())
+ }
+
+ if tc.Name() != "test" {
+ t.Errorf("expected %s got %s", "test", tc.Name())
+ }
+}
+
+func TestNewClient(t *testing.T) {
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://example.com", "-origin-type", "TEST_CLIENT"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ oc := &config.OriginConfig{OriginType: "TEST_CLIENT"}
+ c, err := NewClient("default", oc, cache)
+ if err != nil {
+ t.Error(err)
+ }
+ if c.Name() != "default" {
+ t.Errorf("expected %s got %s", "default", c.Name())
+ }
+
+ if c.Cache().Configuration().CacheType != "memory" {
+ t.Errorf("expected %s got %s", "memory", c.Cache().Configuration().CacheType)
+ }
+
+ if c.Configuration().OriginType != "TEST_CLIENT" {
+ t.Errorf("expected %s got %s", "TEST_CLIENT", c.Configuration().OriginType)
+ }
+}
+
+func TestConfiguration(t *testing.T) {
+ oc := &config.OriginConfig{OriginType: "TEST"}
+ client := Client{config: oc}
+ c := client.Configuration()
+ if c.OriginType != "TEST" {
+ t.Errorf("expected %s got %s", "TEST", c.OriginType)
+ }
+}
+
+func TestCache(t *testing.T) {
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://example.com", "-origin-type", "TEST_CLIENT"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ client := Client{cache: cache}
+ c := client.Cache()
+ if c.Configuration().CacheType != "memory" {
+ t.Errorf("expected %s got %s", "memory", c.Configuration().CacheType)
+ }
+}
+
+func TestName(t *testing.T) {
+ client := Client{name: "TEST"}
+ c := client.Name()
+ if c != "TEST" {
+ t.Errorf("expected %s got %s", "TEST", c)
+ }
+}
+
+func TestHTTPClient(t *testing.T) {
+ oc := &config.OriginConfig{OriginType: "TEST"}
+ client, err := NewClient("test", oc, nil)
+ if err != nil {
+ t.Error(err)
+ }
+ if client.HTTPClient() == nil {
+ t.Errorf("missing http client")
+ }
+}
+
+func TestSetCache(t *testing.T) {
+ c, err := NewClient("test", config.NewOriginConfig(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+ c.SetCache(nil)
+ if c.Cache() != nil {
+ t.Errorf("expected nil cache for client named %s", "test")
+ }
+}
diff --git a/internal/proxy/origins/irondb/model.go b/internal/proxy/origins/irondb/model.go
new file mode 100644
index 000000000..398507c99
--- /dev/null
+++ b/internal/proxy/origins/irondb/model.go
@@ -0,0 +1,364 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// SeriesEnvelope values represent a time series data response from the
+// IRONdb API.
+type SeriesEnvelope struct {
+ Data DataPoints `json:"data"`
+ ExtentList timeseries.ExtentList `json:"extents,omitempty"`
+ StepDuration time.Duration `json:"step,omitempty"`
+}
+
+// MarshalJSON encodes a series envelope value into a JSON byte slice.
+func (se *SeriesEnvelope) MarshalJSON() ([]byte, error) {
+ if se.StepDuration == 0 && len(se.ExtentList) == 0 {
+ // Special case for when returning data to the caller.
+ return json.Marshal(se.Data)
+ }
+
+ se2 := struct {
+ Data DataPoints `json:"data"`
+ ExtentList timeseries.ExtentList `json:"extents,omitempty"`
+ StepDuration string `json:"step,omitempty"`
+ }{
+ Data: se.Data,
+ ExtentList: se.ExtentList,
+ }
+
+ if se.StepDuration != 0 {
+ se2.StepDuration = se.StepDuration.String()
+ }
+
+ return json.Marshal(se2)
+}
+
+// UnmarshalJSON decodes a JSON byte slice into this data point value.
+func (se *SeriesEnvelope) UnmarshalJSON(b []byte) error {
+ if strings.Contains(string(b), `"data"`) &&
+ (strings.Contains(string(b), `"extents"`) ||
+ strings.Contains(string(b), `"step"`)) {
+ var se2 struct {
+ Data DataPoints `json:"data"`
+ ExtentList timeseries.ExtentList `json:"extents,omitempty"`
+ StepDuration string `json:"step,omitempty"`
+ }
+
+ if err := json.Unmarshal(b, &se2); err != nil {
+ return err
+ }
+
+ se.Data = se2.Data
+ se.ExtentList = se2.ExtentList
+ d, err := time.ParseDuration(se2.StepDuration)
+ if err != nil {
+ return err
+ }
+
+ se.StepDuration = d
+ return err
+ }
+
+ err := json.Unmarshal(b, &se.Data)
+ return err
+}
+
+// DataPoint values represent a single data element of a time series data
+// response from the IRONdb API.
+type DataPoint struct {
+ Time time.Time
+ Step uint32
+ Value interface{}
+}
+
+// MarshalJSON encodes a data point value into a JSON byte slice.
+func (dp *DataPoint) MarshalJSON() ([]byte, error) {
+ v := []interface{}{}
+ tn := float64(0)
+ fv, err := strconv.ParseFloat(formatTimestamp(dp.Time, true), 64)
+ if err == nil {
+ tn = float64(fv)
+ }
+
+ v = append(v, tn)
+ if dp.Step != 0 {
+ v = append(v, dp.Step)
+ }
+
+ v = append(v, dp.Value)
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON decodes a JSON byte slice into this data point value.
+func (dp *DataPoint) UnmarshalJSON(b []byte) error {
+ v := []interface{}{}
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+
+ if len(v) < 2 {
+ return fmt.Errorf("unable to unmarshal IRONdb data point: %s",
+ string(b))
+ }
+
+ if fv, ok := v[0].(float64); ok {
+ tv, err := parseTimestamp(strconv.FormatFloat(fv, 'f', 3, 64))
+ if err != nil {
+ return err
+ }
+
+ dp.Time = tv
+ }
+
+ if fv, ok := v[1].(float64); ok && len(v) > 2 {
+ dp.Step = uint32(fv)
+ dp.Value = v[2]
+ return nil
+ }
+
+ dp.Value = v[1]
+ return nil
+}
+
+// DataPoints values represent sortable slices of data point values.
+type DataPoints []DataPoint
+
+// Len returns the length of an array of Prometheus model.Times
+func (dps DataPoints) Len() int {
+ return len(dps)
+}
+
+// Less returns true if the value at index i comes before the value at index j.
+func (dps DataPoints) Less(i, j int) bool {
+ return dps[i].Time.Before(dps[j].Time)
+}
+
+// Swap modifies a slice of data tuples by swapping the values in indexes
+// i and j.
+func (dps DataPoints) Swap(i, j int) {
+ dps[i], dps[j] = dps[j], dps[i]
+}
+
+// Step returns the step for the Timeseries.
+func (se *SeriesEnvelope) Step() time.Duration {
+ return se.StepDuration
+}
+
+// SetStep sets the step for the Timeseries.
+func (se *SeriesEnvelope) SetStep(step time.Duration) {
+ se.StepDuration = step
+}
+
+// SetExtents overwrites a Timeseries's known extents with the provided extent
+// list.
+func (se *SeriesEnvelope) SetExtents(extents timeseries.ExtentList) {
+ se.ExtentList = extents
+}
+
+// Extents returns the Timeseries's extent list.
+func (se *SeriesEnvelope) Extents() timeseries.ExtentList {
+ return se.ExtentList
+}
+
+// SeriesCount returns the number of individual series in the Timeseries value.
+func (se *SeriesEnvelope) SeriesCount() int {
+ return 1
+}
+
+// ValueCount returns the count of all data values across all Series in the
+// Timeseries value.
+func (se *SeriesEnvelope) ValueCount() int {
+ return len(se.Data)
+}
+
+// TimestampCount returns the number of unique timestamps across the timeseries.
+func (se *SeriesEnvelope) TimestampCount() int {
+ ts := map[int64]struct{}{}
+ for _, dp := range se.Data {
+ ts[dp.Time.Unix()] = struct{}{}
+ }
+
+ return len(ts)
+}
+
+// Merge merges the provided Timeseries list into the base Timeseries (in the
+// order provided) and optionally sorts the merged Timeseries.
+func (se *SeriesEnvelope) Merge(sort bool,
+ collection ...timeseries.Timeseries) {
+ for _, ts := range collection {
+ if ts != nil {
+ if se2, ok := ts.(*SeriesEnvelope); ok {
+ se.Data = append(se.Data, se2.Data...)
+ se.ExtentList = append(se.ExtentList, se2.ExtentList...)
+ }
+ }
+ }
+
+ se.ExtentList = se.ExtentList.Compress(se.StepDuration)
+ if sort {
+ se.Sort()
+ }
+}
+
+// Clone returns a perfect copy of the base Timeseries.
+func (se *SeriesEnvelope) Clone() timeseries.Timeseries {
+ b := &SeriesEnvelope{
+ Data: make([]DataPoint, len(se.Data)),
+ StepDuration: se.StepDuration,
+ ExtentList: make(timeseries.ExtentList, 0, len(se.ExtentList)),
+ }
+
+ copy(b.ExtentList, se.ExtentList)
+ if len(se.Data) > 0 {
+ b.Data = make(DataPoints, len(se.Data))
+ copy(b.Data, se.Data)
+ }
+
+ return b
+}
+
+// CropToRange crops down a Timeseries value to the provided Extent.
+// Crop assumes the base Timeseries is already sorted, and will corrupt an
+// unsorted Timeseries.
+func (se *SeriesEnvelope) CropToRange(e timeseries.Extent) {
+ newData := DataPoints{}
+ for _, dv := range se.Data {
+ if (dv.Time.After(e.Start) || dv.Time.Equal(e.Start)) &&
+ (dv.Time.Before(e.End) || dv.Time.Equal(e.End)) {
+ newData = append(newData, dv)
+ }
+ }
+
+ se.Data = newData
+ se.ExtentList = se.ExtentList.Crop(e)
+}
+
+// CropToSize reduces the number of elements in the Timeseries to the provided
+// count, by evicting elements using a least-recently-used methodology. Any
+// timestamps newer than the provided time are removed before sizing, in order
+// to support backfill tolerance. The provided extent will be marked as used
+// during crop.
+func (se *SeriesEnvelope) CropToSize(sz int, t time.Time,
+ lur timeseries.Extent) {
+ // The Series has no extents, so no need to do anything.
+ if len(se.ExtentList) < 1 {
+ se.Data = DataPoints{}
+ se.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // Crop to the Backfill Tolerance Value if needed.
+ if se.ExtentList[len(se.ExtentList)-1].End.After(t) {
+ se.CropToRange(timeseries.Extent{Start: se.ExtentList[0].Start, End: t})
+ }
+
+ ts := map[int64]struct{}{}
+ for _, dp := range se.Data {
+ ts[dp.Time.Unix()] = struct{}{}
+ }
+
+ if len(se.Data) == 0 || len(ts) <= sz {
+ return
+ }
+
+ rc := len(ts) - sz // removal count
+ tsl := []int{}
+ for k := range ts {
+ tsl = append(tsl, int(k))
+ }
+
+ sort.Ints(tsl)
+ tsl = tsl[rc:]
+ tsm := map[int64]struct{}{}
+ for _, t := range tsl {
+ tsm[int64(t)] = struct{}{}
+ }
+
+ min, max := time.Now().Unix(), int64(0)
+ newData := DataPoints{}
+ for _, dp := range se.Data {
+ t := dp.Time.Unix()
+ if _, ok := tsm[t]; ok {
+ newData = append(newData, dp)
+ if t < min {
+ min = t
+ }
+
+ if t > max {
+ max = t
+ }
+ }
+ }
+
+ se.Data = newData
+ se.ExtentList = timeseries.ExtentList{timeseries.Extent{
+ Start: time.Unix(min, 0),
+ End: time.Unix(max, 0),
+ }}
+
+ se.Sort()
+}
+
+// Sort sorts all data in the Timeseries chronologically by their timestamp.
+func (se *SeriesEnvelope) Sort() {
+ sort.Sort(se.Data)
+}
+
+// MarshalTimeseries converts a Timeseries into a JSON blob for cache storage.
+func (c *Client) MarshalTimeseries(ts timeseries.Timeseries) ([]byte, error) {
+ return json.Marshal(ts)
+}
+
+// UnmarshalTimeseries converts a JSON blob into a Timeseries value.
+func (c *Client) UnmarshalTimeseries(data []byte) (timeseries.Timeseries,
+ error) {
+ if strings.Contains(strings.Replace(string(data), " ", "", -1),
+ `"version":"DF4"`) {
+ se := &DF4SeriesEnvelope{}
+ err := json.Unmarshal(data, &se)
+ return se, err
+ }
+
+ se := &SeriesEnvelope{}
+ err := json.Unmarshal(data, &se)
+ return se, err
+}
+
+// UnmarshalInstantaneous is not used for IRONdb origins and is here to conform
+// to the Client interface.
+func (c Client) UnmarshalInstantaneous(
+ data []byte) (timeseries.Timeseries, error) {
+ return c.UnmarshalTimeseries(data)
+}
+
+// Size returns the approximate memory utilization in bytes of the timeseries
+func (se *SeriesEnvelope) Size() int {
+
+ // TODO this implementation is a rough approximation to ensure we conform to the
+ // interface specification, it requires refinement in order to be in the ballpark
+ c := len(se.Data) * 24
+ return c
+}
diff --git a/internal/proxy/origins/irondb/model_df4.go b/internal/proxy/origins/irondb/model_df4.go
new file mode 100644
index 000000000..bb1eb0d68
--- /dev/null
+++ b/internal/proxy/origins/irondb/model_df4.go
@@ -0,0 +1,343 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "sync"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// DF4SeriesEnvelope values represent DF4 format time series data from the
+// IRONdb API.
+type DF4SeriesEnvelope struct {
+ Data [][]interface{} `json:"data"`
+ Meta []map[string]interface{} `json:"meta,omitempty"`
+ Ver string `json:"version,omitempty"`
+ Head DF4Info `json:"head"`
+ StepDuration time.Duration `json:"step,omitempty"`
+ ExtentList timeseries.ExtentList `json:"extents,omitempty"`
+}
+
+// DF4Info values contain information about the timestamps of the data elements
+// in DF4 data series.
+type DF4Info struct {
+ Count int64 `json:"count"`
+ Start int64 `json:"start"`
+ Period int64 `json:"period"`
+}
+
+// Step returns the step for the Timeseries.
+func (se *DF4SeriesEnvelope) Step() time.Duration {
+ return se.StepDuration
+}
+
+// SetStep sets the step for the Timeseries.
+func (se *DF4SeriesEnvelope) SetStep(step time.Duration) {
+ se.StepDuration = step
+}
+
+// Extents returns the Timeseries's extent list.
+func (se *DF4SeriesEnvelope) Extents() timeseries.ExtentList {
+ return se.ExtentList
+}
+
+// SetExtents overwrites a Timeseries's known extents with the provided extent
+// list.
+func (se *DF4SeriesEnvelope) SetExtents(extents timeseries.ExtentList) {
+ se.ExtentList = extents
+}
+
+// SeriesCount returns the number of individual series in the Timeseries value.
+func (se *DF4SeriesEnvelope) SeriesCount() int {
+ return len(se.Data)
+}
+
+// ValueCount returns the count of all data values across all Series in the
+// Timeseries value.
+func (se *DF4SeriesEnvelope) ValueCount() int {
+ n := 0
+ for _, v := range se.Data {
+ n += len(v)
+ }
+
+ return n
+}
+
+// TimestampCount returns the number of unique timestamps across the timeseries.
+func (se *DF4SeriesEnvelope) TimestampCount() int {
+ return int(se.Head.Count)
+}
+
+type metricData struct {
+ name string
+ meta map[string]interface{}
+ data map[int64]interface{}
+}
+
+// Merge merges the provided Timeseries list into the base Timeseries (in the
+// order provided) and optionally sorts the merged Timeseries.
+func (se *DF4SeriesEnvelope) Merge(sort bool,
+ collection ...timeseries.Timeseries) {
+ for _, ts := range collection {
+ if ts != nil && ts.Step() == se.Step() {
+ if se2, ok := ts.(*DF4SeriesEnvelope); ok {
+ // Build new data series for each metric.
+ metrics := map[string]*metricData{}
+ for i, mv := range se.Meta {
+ if name, ok := mv["label"].(string); ok {
+ metrics[name] = &metricData{
+ name: name,
+ meta: mv,
+ data: map[int64]interface{}{},
+ }
+
+ for j, dv := range se.Data[i] {
+ ts := se.Head.Start + (int64(j) * se.Head.Period)
+ metrics[name].data[ts] = dv
+ }
+ }
+ }
+
+ // Merge in the data from the merging series.
+ for i, mv := range se2.Meta {
+ if name, ok := mv["label"].(string); ok {
+ md, ok := metrics[name]
+ if !ok {
+ metrics[name] = &metricData{
+ name: name,
+ meta: mv,
+ data: map[int64]interface{}{},
+ }
+
+ md = metrics[name]
+ }
+
+ for j, dv := range se2.Data[i] {
+ ts := se2.Head.Start +
+ (int64(j) * se2.Head.Period)
+ md.data[ts] = dv
+ }
+ }
+ }
+
+ // Calculate the new range of data points.
+ min := se.Head.Start
+ if se2.Head.Start < se.Head.Start {
+ min = se2.Head.Start
+ }
+
+ max := se.Head.Start + ((se.Head.Count - 1) * se.Head.Period)
+ max2 := se2.Head.Start + ((se2.Head.Count - 1) * se2.Head.Period)
+ if max2 > max {
+ max = max2
+ }
+
+ // Merge the new data series.
+ newData := [][]interface{}{}
+ newMeta := []map[string]interface{}{}
+ newHead := DF4Info{
+ Count: (max-min)/se.Head.Period + 1,
+ Start: min,
+ Period: se.Head.Period,
+ }
+
+ for _, m := range metrics {
+ newMeta = append(newMeta, m.meta)
+ d := []interface{}{}
+ for i := int64(0); i < newHead.Count; i++ {
+ ts := newHead.Start + (i * newHead.Period)
+ d = append(d, m.data[ts])
+ }
+
+ newData = append(newData, d)
+ }
+
+ se.Data = newData
+ se.Meta = newMeta
+ se.Head = newHead
+ se.ExtentList = append(se.ExtentList, se2.ExtentList...)
+ }
+ }
+ }
+
+ se.ExtentList = se.ExtentList.Compress(se.StepDuration)
+ if sort {
+ se.Sort()
+ }
+}
+
+// Clone returns a perfect copy of the base Timeseries.
+func (se *DF4SeriesEnvelope) Clone() timeseries.Timeseries {
+ b := &DF4SeriesEnvelope{
+ Data: make([][]interface{}, len(se.Data)),
+ Meta: make([]map[string]interface{}, len(se.Meta)),
+ Ver: se.Ver,
+ Head: DF4Info{
+ Count: se.Head.Count,
+ Start: se.Head.Start,
+ Period: se.Head.Period,
+ },
+ StepDuration: se.StepDuration,
+ ExtentList: se.ExtentList.Clone(),
+ }
+
+ for i, v := range se.Data {
+ b.Data[i] = make([]interface{}, len(v))
+ copy(b.Data[i], v)
+ }
+
+ for i, v := range se.Meta {
+ b.Meta[i] = make(map[string]interface{}, len(se.Meta[i]))
+ for k, mv := range v {
+ b.Meta[i][k] = mv
+ }
+ }
+
+ return b
+}
+
+// CropToRange crops down a Timeseries value to the provided Extent.
+// Crop assumes the base Timeseries is already sorted, and will corrupt an
+// unsorted Timeseries.
+func (se *DF4SeriesEnvelope) CropToRange(e timeseries.Extent) {
+ // Align crop extents with step period.
+ e.Start = time.Unix(e.Start.Unix()-(e.Start.Unix()%se.Head.Period), 0)
+ e.End = time.Unix(e.End.Unix()-(e.End.Unix()%se.Head.Period), 0)
+
+ // If the Timeseries has no extents, or the extent of the series is entirely
+ // outside the extent of the crop range, return empty set and bail.
+ if len(se.ExtentList) < 1 || se.ExtentList.OutsideOf(e) {
+ se.Data = [][]interface{}{}
+ se.Meta = []map[string]interface{}{}
+ se.Head.Start = e.Start.Unix()
+ se.Head.Count = 0
+ se.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // Create a map of the time series data.
+ metrics := map[string]metricData{}
+ for i, mv := range se.Meta {
+ if name, ok := mv["label"].(string); ok {
+ metrics[name] = metricData{
+ name: name,
+ meta: mv,
+ data: map[int64]interface{}{},
+ }
+
+ for j, dv := range se.Data[i] {
+ ts := se.Head.Start + (int64(j) * se.Head.Period)
+ if ts >= e.Start.Unix() && ts <= e.End.Unix() {
+ metrics[name].data[ts] = dv
+ }
+ }
+ }
+ }
+
+ // Replace with the cropped data series.
+ newData := [][]interface{}{}
+ newMeta := []map[string]interface{}{}
+ newHead := DF4Info{
+ Count: (e.End.Unix() - e.Start.Unix()) / se.Head.Period,
+ Start: e.Start.Unix(),
+ Period: se.Head.Period,
+ }
+
+ for _, m := range metrics {
+ newMeta = append(newMeta, m.meta)
+ d := []interface{}{}
+ for i := int64(0); i < newHead.Count; i++ {
+ ts := newHead.Start + (i * newHead.Period)
+ d = append(d, m.data[ts])
+ }
+
+ newData = append(newData, d)
+ }
+
+ se.Data = newData
+ se.Meta = newMeta
+ se.Head = newHead
+ se.ExtentList = se.ExtentList.Crop(e)
+}
+
+// CropToSize reduces the number of elements in the Timeseries to the provided
+// count, by evicting elements using a least-recently-used methodology. Any
+// timestamps newer than the provided time are removed before sizing, in order
+// to support backfill tolerance. The provided extent will be marked as used
+// during crop.
+func (se *DF4SeriesEnvelope) CropToSize(sz int, t time.Time,
+ lur timeseries.Extent) {
+ // The Series has no extents, so no need to do anything.
+ if len(se.ExtentList) < 1 {
+ se.Data = [][]interface{}{}
+ se.Meta = []map[string]interface{}{}
+ se.Head.Start = 0
+ se.Head.Count = 0
+ se.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // Crop to the Backfill Tolerance Value if needed.
+ if se.ExtentList[len(se.ExtentList)-1].End.After(t) {
+ se.CropToRange(timeseries.Extent{Start: se.ExtentList[0].Start, End: t})
+ }
+
+ tc := se.TimestampCount()
+ if len(se.Data) == 0 || tc <= sz {
+ return
+ }
+
+ rc := tc - sz // removal count
+ newData := [][]interface{}{}
+ for _, data := range se.Data {
+ newData = append(newData, data[rc:])
+ }
+
+ se.Head.Start += int64(rc) * se.Head.Period
+ se.Head.Count -= int64(rc)
+ se.Data = newData
+ se.ExtentList = timeseries.ExtentList{timeseries.Extent{
+ Start: time.Unix(se.Head.Start, 0),
+ End: time.Unix(se.Head.Start+((se.Head.Count-1)*se.Head.Period), 0),
+ }}
+}
+
+// Sort sorts all data in the Timeseries chronologically by their timestamp.
+func (se *DF4SeriesEnvelope) Sort() {
+ // DF4SeriesEnvelope is sorted by definition.
+}
+
+// Size returns the approximate memory utilization in bytes of the timeseries
+func (se *DF4SeriesEnvelope) Size() int {
+
+ // TODO this implementation is a rough approximation to ensure we conform to the
+ // interface specification, it requires refinement in order to be in the ballpark
+
+ c := 24 + len(se.Ver) // accounts for head + ver
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+ for i := range se.Data {
+ wg.Add(1)
+ go func(s []interface{}) {
+ mtx.Lock()
+ c += (len(s) * 16)
+ mtx.Unlock()
+ wg.Done()
+ }(se.Data[i])
+ }
+ wg.Wait()
+ return c
+}
diff --git a/internal/proxy/origins/irondb/model_df4_test.go b/internal/proxy/origins/irondb/model_df4_test.go
new file mode 100644
index 000000000..db27ceee8
--- /dev/null
+++ b/internal/proxy/origins/irondb/model_df4_test.go
@@ -0,0 +1,396 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+const testDF4Response = `{
+ "data": [
+ [
+ 1,
+ 2,
+ 3
+ ]
+ ],
+ "meta": [
+ {
+ "kind": "numeric",
+ "label": "test",
+ "tags": [
+ "__check_uuid:11223344-5566-7788-9900-aabbccddeeff",
+ "__name:test"
+ ]
+ }
+ ],
+ "version": "DF4",
+ "head": {
+ "count": 3,
+ "start": 0,
+ "period": 300
+ }
+}
+`
+
+const testDF4Response2 = `{
+ "data": [
+ [
+ 4,
+ 5,
+ 6
+ ],
+ [
+ 1,
+ 2,
+ null
+ ]
+ ],
+ "meta": [
+ {
+ "tags": [
+ "__check_uuid:11223344-5566-7788-9900-aabbccddeeff",
+ "__name:test"
+ ],
+ "label": "test",
+ "kind": "numeric"
+ },
+ {
+ "tags": [
+ "__check_uuid:11223344-5566-7788-9900-aabbccddeeff",
+ "__name:test1"
+ ],
+ "label": "test1",
+ "kind": "numeric"
+ }
+ ],
+ "version": "DF4",
+ "head": {
+ "count": 3,
+ "start": 300,
+ "period": 300
+ }
+}
+`
+
+func TestDF4SeriesEnvelopeSetStep(t *testing.T) {
+ se := DF4SeriesEnvelope{}
+ const step = time.Duration(300) * time.Minute
+ se.SetStep(step)
+ if se.Step() != step {
+ t.Errorf("Expected step: %v, got: %v", step, se.Step())
+ }
+}
+
+func TestDF4SeriesEnvelopeSetExtents(t *testing.T) {
+ se := &DF4SeriesEnvelope{}
+ ex := timeseries.ExtentList{timeseries.Extent{
+ Start: time.Time{},
+ End: time.Time{},
+ }}
+
+ se.SetExtents(ex)
+ e := se.Extents()
+ if len(e) != 1 {
+ t.Errorf("Expected length: 1, got: %d", len(e))
+ }
+}
+
+func TestDF4SeriesEnvelopeSeriesCount(t *testing.T) {
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries([]byte(testDF4Response))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts.(*DF4SeriesEnvelope)
+ if se.SeriesCount() != 1 {
+ t.Errorf("Expected count: 1, got %d", se.SeriesCount())
+ }
+}
+
+func TestDF4SeriesEnvelopeValueCount(t *testing.T) {
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries([]byte(testDF4Response))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts.(*DF4SeriesEnvelope)
+ if se.ValueCount() != 3 {
+ t.Errorf("Expected count: 3, got %d", se.ValueCount())
+ }
+}
+
+func TestDF4SeriesEnvelopeTimestampCount(t *testing.T) {
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries([]byte(testDF4Response2))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts.(*DF4SeriesEnvelope)
+ if se.TimestampCount() != 3 {
+ t.Errorf("Expected count: 3, got %d", se.TimestampCount())
+ }
+}
+
+func TestDF4SeriesEnvelopeMerge(t *testing.T) {
+ client := &Client{}
+ ts1, err := client.UnmarshalTimeseries([]byte(testDF4Response))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se1 := ts1.(*DF4SeriesEnvelope)
+ ts2, err := client.UnmarshalTimeseries([]byte(testDF4Response2))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se2 := ts2.(*DF4SeriesEnvelope)
+ se1.Merge(true, se2)
+ if se1.SeriesCount() != 2 {
+ t.Errorf("Expected count: 2, got: %v", se1.SeriesCount())
+ }
+
+ if se1.ValueCount() != 8 {
+ t.Errorf("Expected count: 8, got: %v", se1.ValueCount())
+ }
+
+ // disabled until Merge functionality can be made deterministic
+
+ // if se1.Data[0][0] != 1.0 {
+ // t.Errorf("Expected first value: 1, got: %v", se1.Data[0][0])
+ // }
+
+ // if se1.Data[0][3] != 6.0 {
+ // t.Errorf("Expected last value: 6, got: %v", se1.Data[0][3])
+ // }
+}
+
+func TestDF4SeriesEnvelopeClone(t *testing.T) {
+ client := &Client{}
+ ts1, err := client.UnmarshalTimeseries([]byte(testDF4Response))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts1.(*DF4SeriesEnvelope)
+ se2 := se.Clone()
+
+ s1, err := client.MarshalTimeseries(se)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ s2, err := client.MarshalTimeseries(se2)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ if string(s1) != string(s2) {
+ t.Errorf("Expected %s = %s", string(s1), string(s2))
+ }
+}
+
+func TestDF4SeriesEnvelopeCropToRange(t *testing.T) {
+ client := &Client{}
+ ts1, err := client.UnmarshalTimeseries([]byte(testDF4Response))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se1 := ts1.(*DF4SeriesEnvelope)
+ se1.SetExtents(timeseries.ExtentList{timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(600, 0),
+ }})
+
+ se1.CropToRange(timeseries.Extent{
+ Start: time.Unix(100, 0),
+ End: time.Unix(500, 0),
+ })
+
+ b, err := client.MarshalTimeseries(se1)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ exp := `{"data":[[1]],"meta":[{"kind":"numeric","label":"test",` +
+ `"tags":["__check_uuid:11223344-5566-7788-9900-aabbccddeeff",` +
+ `"__name:test"]}],"version":"DF4","head":{"count":1,"start":0,` +
+ `"period":300},"extents":[{"start":"` +
+ time.Unix(0, 0).Format(time.RFC3339) +
+ `","end":"` + time.Unix(300, 0).Format(time.RFC3339) + `"}]}`
+ if string(b) != exp {
+ t.Errorf("Expected JSON: %s, got: %s", exp, string(b))
+ }
+
+ // Test crop outside extents.
+ se1.CropToRange(timeseries.Extent{
+ Start: time.Unix(900, 0),
+ End: time.Unix(1200, 0),
+ })
+
+ b, err = client.MarshalTimeseries(se1)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ exp = `{"data":[],"version":"DF4",` +
+ `"head":{"count":0,"start":900,"period":300}}`
+ if string(b) != exp {
+ t.Errorf("Expected JSON: %s, got: %s", exp, string(b))
+ }
+}
+
+func TestDF4SeriesEnvelopeCropToSize(t *testing.T) {
+ client := &Client{}
+ ts1, err := client.UnmarshalTimeseries([]byte(testDF4Response))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se1 := ts1.(*DF4SeriesEnvelope)
+ se1.SetExtents(timeseries.ExtentList{timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(600, 0),
+ }})
+
+ se1.CropToSize(2, time.Unix(600, 0), timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(600, 0),
+ })
+
+ b, err := client.MarshalTimeseries(se1)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ exp := `{"data":[[2,3]],"meta":[{"kind":"numeric","label":"test",` +
+ `"tags":["__check_uuid:11223344-5566-7788-9900-aabbccddeeff",` +
+ `"__name:test"]}],"version":"DF4","head":{"count":2,` +
+ `"start":300,"period":300},` +
+ `"extents":[{"start":"` +
+ time.Unix(300, 0).Format(time.RFC3339) + `",` +
+ `"end":"` + time.Unix(600, 0).Format(time.RFC3339) + `"}]}`
+ if string(b) != exp {
+ t.Errorf("Expected JSON: %s, got: %s", exp, string(b))
+ }
+
+ se1.ExtentList = timeseries.ExtentList{}
+ se1.CropToSize(2, time.Unix(600, 0), timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(600, 0),
+ })
+
+ if len(se1.Data) > 0 {
+ t.Errorf("Expected data length: 0, got: %v", len(se1.Data))
+ }
+}
+
+func TestMarshalDF4Timeseries(t *testing.T) {
+ se := &DF4SeriesEnvelope{
+ Data: [][]interface{}{{1, 2, 3}},
+ Meta: []map[string]interface{}{{
+ "tags": []string{
+ "__check_uuid:11223344-5566-7788-9900-aabbccddeeff",
+ "__name:test",
+ },
+ "label": "test",
+ "kind": "numeric",
+ }},
+ Ver: "DF4",
+ Head: DF4Info{
+ Count: 3,
+ Start: 0,
+ Period: 300,
+ },
+ }
+
+ client := &Client{}
+ bytes, err := client.MarshalTimeseries(se)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ exp := strings.Replace(strings.Replace(testDF4Response, "\n", "", -1),
+ " ", "", -1)
+ if string(bytes) != exp {
+ t.Errorf("Expected JSON: %s, got: %s", exp, string(bytes))
+ }
+
+}
+
+func TestUnmarshalDF4Timeseries(t *testing.T) {
+ bytes := []byte(testDF4Response2)
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries(bytes)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts.(*DF4SeriesEnvelope)
+ if len(se.Data) != 2 {
+ t.Errorf(`Expected length: 2. got %d`, len(se.Data))
+ return
+ }
+
+ if se.Data[1][1] != 2.0 {
+ t.Errorf(`Expected value: 2.0. got %f`, se.Data[1][1])
+ return
+ }
+
+ if se.Head.Start != 300 {
+ t.Errorf(`Expected time start: 300. got %d`, se.Head.Start)
+ return
+ }
+
+ if se.Head.Period != 300 {
+ t.Errorf(`Expected time period: 300. got %d`, se.Head.Period)
+ return
+ }
+}
+
+func TestSize(t *testing.T) {
+
+ client := &Client{}
+ s, _ := client.UnmarshalTimeseries([]byte(testDF4Response))
+ expected := 75
+
+ if s.Size() != expected {
+ t.Errorf("expected %d got %d", expected, s.Size())
+ }
+
+}
diff --git a/internal/proxy/origins/irondb/model_test.go b/internal/proxy/origins/irondb/model_test.go
new file mode 100644
index 000000000..ca5e33f6b
--- /dev/null
+++ b/internal/proxy/origins/irondb/model_test.go
@@ -0,0 +1,488 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+const testResponse = `[
+ [600.000,1.75],
+ [0,1],
+ [300.000,1.5]
+]
+`
+const testResponse2 = `[
+ [300.000,2],
+ [900.000,2.75],
+ [600.000,2.5],
+ [1200.000,3]
+]`
+
+func TestDataPointMarshalJSON(t *testing.T) {
+ dp := &DataPoint{
+ Time: time.Unix(99900, 0),
+ Value: 1.5,
+ }
+
+ b, err := dp.MarshalJSON()
+ if err != nil {
+ t.Error(err)
+ t.FailNow()
+ }
+
+ exp := `[99900,1.5]`
+ if string(b) != exp {
+ t.Errorf("Expected JSON: %v, got: %v", exp, string(b))
+ }
+
+ ts := `[
+ 1556290800,
+ 300,
+ {
+ "+23e-004": 1,
+ "+85e-004": 1
+ }
+ ]`
+
+ dp.UnmarshalJSON([]byte(ts))
+ if dp.Step != 300 {
+ t.Errorf("Expected step: 300, got: %v", dp.Step)
+ }
+
+ mv, ok := dp.Value.(map[string]interface{})
+ if !ok {
+ t.Errorf("Unexpected histogram value type: %v", dp.Value)
+ t.FailNow()
+ }
+
+ if mv["+85e-004"] != float64(1) {
+ t.Errorf("Expected histogram value: 1, got: %v", mv["+85e-004"])
+ }
+}
+
+func TestSeriesEnvelopeSetStep(t *testing.T) {
+ se := SeriesEnvelope{}
+ const step = time.Duration(300) * time.Minute
+ se.SetStep(step)
+ if se.StepDuration != step {
+ t.Errorf("Expected step: %v, got: %v", step, se.StepDuration)
+ }
+}
+
+func TestSeriesEnvelopeStep(t *testing.T) {
+ se := SeriesEnvelope{}
+ const step = time.Duration(300) * time.Minute
+ se.SetStep(step)
+ if se.Step() != step {
+ t.Errorf("Expected step: %v, got: %v", step, se.Step())
+ }
+}
+
+func TestSeriesEnvelopeSetExtents(t *testing.T) {
+ se := &SeriesEnvelope{}
+ ex := timeseries.ExtentList{timeseries.Extent{
+ Start: time.Time{},
+ End: time.Time{},
+ }}
+
+ se.SetExtents(ex)
+ if len(se.ExtentList) != 1 {
+ t.Errorf("Expected length: 1, got: %d", len(se.ExtentList))
+ }
+}
+
+func TestSeriesEnvelopeExtents(t *testing.T) {
+ se := &SeriesEnvelope{}
+ ex := timeseries.ExtentList{timeseries.Extent{
+ Start: time.Time{},
+ End: time.Time{},
+ }}
+
+ se.SetExtents(ex)
+ e := se.Extents()
+ if len(e) != 1 {
+ t.Errorf("Expected length: 1, got: %d", len(e))
+ }
+}
+
+func TestSeriesEnvelopeSeriesCount(t *testing.T) {
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries([]byte(testResponse))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts.(*SeriesEnvelope)
+ if se.SeriesCount() != 1 {
+ t.Errorf("Expected count: 1, got %d", se.SeriesCount())
+ }
+}
+
+func TestSeriesEnvelopeValueCount(t *testing.T) {
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries([]byte(testResponse))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts.(*SeriesEnvelope)
+ if se.ValueCount() != 3 {
+ t.Errorf("Expected count: 3, got %d", se.ValueCount())
+ }
+}
+
+func TestSeriesEnvelopeTimestampCount(t *testing.T) {
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries([]byte(testResponse2))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts.(*SeriesEnvelope)
+ if se.TimestampCount() != 4 {
+ t.Errorf("Expected count: 4, got %d", se.TimestampCount())
+ }
+}
+
+func TestSeriesEnvelopeMerge(t *testing.T) {
+ client := &Client{}
+ ts1, err := client.UnmarshalTimeseries([]byte(testResponse))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se1 := ts1.(*SeriesEnvelope)
+ ts2, err := client.UnmarshalTimeseries([]byte(testResponse2))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se2 := ts2.(*SeriesEnvelope)
+ se1.Merge(true, se2)
+ if se1.ValueCount() != 7 {
+ t.Errorf("Expected count: 7, got: %v", se1.ValueCount())
+ }
+
+ if se1.Data[0].Value != 1.0 {
+ t.Errorf("Expected first value: 1, got: %v", se1.Data[0].Value)
+ }
+
+ if se1.Data[6].Value != 3.0 {
+ t.Errorf("Expected last value: 3, got: %v", se1.Data[6].Value)
+ }
+}
+
+func TestSeriesEnvelopeSort(t *testing.T) {
+ client := &Client{}
+ ts1, err := client.UnmarshalTimeseries([]byte(testResponse))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts1.(*SeriesEnvelope)
+ se.Sort()
+ if len(se.Data) != 3 {
+ t.Errorf("Expected length: 3, got: %v", len(se.Data))
+ }
+
+ if se.Data[0].Value != 1.0 {
+ t.Errorf("Expected first value: 1, got: %v", se.Data[0].Value)
+ }
+
+ if se.Data[2].Value != 1.75 {
+ t.Errorf("Expected last value: 1.75, got: %v", se.Data[2].Value)
+ }
+}
+
+func TestSeriesEnvelopeClone(t *testing.T) {
+ client := &Client{}
+ ts1, err := client.UnmarshalTimeseries([]byte(testResponse))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts1.(*SeriesEnvelope)
+ se2 := se.Clone()
+
+ s1, err := client.MarshalTimeseries(se)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ s2, err := client.MarshalTimeseries(se2)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ if string(s1) != string(s2) {
+ t.Errorf("Expected %s = %s", string(s1), string(s2))
+ }
+}
+
+func TestSeriesEnvelopeCropToRange(t *testing.T) {
+ client := &Client{}
+ ts1, err := client.UnmarshalTimeseries([]byte(testResponse))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se1 := ts1.(*SeriesEnvelope)
+ se1.SetExtents(timeseries.ExtentList{timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(600, 0),
+ }})
+
+ ts2, err := client.UnmarshalTimeseries([]byte(testResponse2))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se2 := ts2.(*SeriesEnvelope)
+ se2.SetExtents(timeseries.ExtentList{timeseries.Extent{
+ Start: time.Unix(300, 0),
+ End: time.Unix(1200, 0),
+ }})
+
+ se1.Merge(true, se2)
+ se1.CropToRange(timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(300, 0),
+ })
+
+ s1, err := client.MarshalTimeseries(se1)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ exp := `{"data":[[0,1],[300,1.5],[300,2]],` +
+ `"extents":[{"start":"` + time.Unix(0, 0).Format(time.RFC3339) + `",` +
+ `"end":"` + time.Unix(300, 0).Format(time.RFC3339) + `"}]}`
+ if string(s1) != exp {
+ t.Errorf("Expected JSON: %s, got: %s", exp, string(s1))
+ }
+}
+
+func TestSeriesEnvelopeCropToSize(t *testing.T) {
+ client := &Client{}
+ ts1, err := client.UnmarshalTimeseries([]byte(testResponse))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se1 := ts1.(*SeriesEnvelope)
+ se1.SetExtents(timeseries.ExtentList{timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(300, 0),
+ }})
+
+ ts2, err := client.UnmarshalTimeseries([]byte(testResponse2))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se2 := ts2.(*SeriesEnvelope)
+ se2.SetExtents(timeseries.ExtentList{timeseries.Extent{
+ Start: time.Unix(300, 0),
+ End: time.Unix(1200, 0),
+ }})
+
+ se1.Merge(true, se2)
+ se1.CropToSize(2, time.Unix(900, 0), timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(900, 0),
+ })
+
+ s1, err := client.MarshalTimeseries(se1)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ exp := `{"data":[[600,1.75],[600,2.5],[900,2.75]],` +
+ `"extents":[{"start":"` + time.Unix(600, 0).Format(time.RFC3339) +
+ `","end":"` + time.Unix(900, 0).Format(time.RFC3339) + `"}]}`
+ if string(s1) != exp {
+ t.Errorf("Expected JSON: %s, got: %s", exp, string(s1))
+ }
+
+ se1.ExtentList = timeseries.ExtentList{}
+ se1.CropToSize(2, time.Unix(900, 0), timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(900, 0),
+ })
+
+ if len(se1.Data) > 0 {
+ t.Errorf("Expected data length: 0, got: %v", len(se1.Data))
+ }
+}
+
+func TestMarshalTimeseries(t *testing.T) {
+ se := &SeriesEnvelope{
+ Data: DataPoints{
+ DataPoint{
+ Time: time.Unix(99000, 0),
+ Value: 1.5,
+ },
+ DataPoint{
+ Time: time.Unix(99000, 500000000),
+ Value: 1.5,
+ },
+ },
+ }
+
+ client := &Client{}
+ bytes, err := client.MarshalTimeseries(se)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ exp := `[[99000,1.5],[99000.5,1.5]]`
+ if string(bytes) != exp {
+ t.Errorf("Expected JSON: %s, got: %s", exp, string(bytes))
+ }
+
+}
+
+func TestUnmarshalTimeseries(t *testing.T) {
+ bytes := []byte(`[[99000,1.5],[99000.500,1.5]]`)
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries(bytes)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts.(*SeriesEnvelope)
+ if len(se.Data) != 2 {
+ t.Errorf(`Expected length: 2. got %d`, len(se.Data))
+ return
+ }
+
+ if se.Data[1].Value != 1.5 {
+ t.Errorf(`Expected value: 1.5. got %f`, se.Data[1].Value)
+ return
+ }
+
+ if se.Data[1].Time.Unix() != 99000 {
+ t.Errorf(`Expected time secs: 99000. got %d`, se.Data[1].Time.Unix())
+ return
+ }
+
+ if se.Data[1].Time.Nanosecond()/1000000 != 500 {
+ t.Errorf(`Expected time nano: 500. got %d`,
+ se.Data[1].Time.Nanosecond()/1000000)
+ return
+ }
+
+ bytes = []byte(`{"data":[[99000,1.5],[99000.500,1.5]],"step":"300s"}`)
+ ts, err = client.UnmarshalTimeseries(bytes)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se = ts.(*SeriesEnvelope)
+ if len(se.Data) != 2 {
+ t.Errorf(`Expected length: 2. got %d`, len(se.Data))
+ return
+ }
+
+ if se.Data[1].Value != 1.5 {
+ t.Errorf(`Expected value: 1.5. got %f`, se.Data[1].Value)
+ return
+ }
+
+ if se.Data[1].Time.Unix() != 99000 {
+ t.Errorf(`Expected time secs: 99000. got %d`, se.Data[1].Time.Unix())
+ return
+ }
+
+ if se.Data[1].Time.Nanosecond()/1000000 != 500 {
+ t.Errorf(`Expected time nano: 500. got %d`,
+ se.Data[1].Time.Nanosecond()/1000000)
+ return
+ }
+
+ if se.Step() != 300*time.Second {
+ t.Errorf("Expected step: 300s, got: %v", se.Step())
+ return
+ }
+}
+
+func TestUnmarshalInstantaneous(t *testing.T) {
+ bytes := []byte(`[[99000,1.5],[99000.500,1.5]]`)
+ client := &Client{}
+ ts, err := client.UnmarshalInstantaneous(bytes)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ se := ts.(*SeriesEnvelope)
+ if len(se.Data) != 2 {
+ t.Errorf(`Expected length: 2. got %d`, len(se.Data))
+ return
+ }
+
+ if se.Data[1].Value != 1.5 {
+ t.Errorf(`Expected value: 1.5. got %f`, se.Data[1].Value)
+ return
+ }
+
+ if se.Data[1].Time.Unix() != 99000 {
+ t.Errorf(`Expected time secs: 99000. got %d`, se.Data[1].Time.Unix())
+ return
+ }
+
+ if se.Data[1].Time.Nanosecond()/1000000 != 500 {
+ t.Errorf(`Expected time nano: 500. got %d`,
+ se.Data[1].Time.Nanosecond()/1000000)
+ return
+ }
+}
+
+func TestTSSize(t *testing.T) {
+
+ bytes := []byte(`[[99000,1.5],[99000.500,1.5]]`)
+ client := &Client{}
+
+ s, _ := client.UnmarshalTimeseries(bytes)
+
+ expected := 48
+ size := s.Size()
+
+ if size != expected {
+ t.Errorf("got %d expected %d", size, expected)
+ }
+
+}
diff --git a/internal/proxy/origins/irondb/routes.go b/internal/proxy/origins/irondb/routes.go
new file mode 100644
index 000000000..ccca85288
--- /dev/null
+++ b/internal/proxy/origins/irondb/routes.go
@@ -0,0 +1,170 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/config"
+)
+
+func (c *Client) registerHandlers() {
+ c.handlersRegistered = true
+ c.handlers = make(map[string]http.Handler)
+ // This is the registry of handlers that Trickster supports for IRONdb,
+ // and are able to be referenced by name (map key) in Config Files
+ c.handlers["health"] = http.HandlerFunc(c.HealthHandler)
+ c.handlers[mnRaw] = http.HandlerFunc(c.RawHandler)
+ c.handlers[mnRollup] = http.HandlerFunc(c.RollupHandler)
+ c.handlers[mnFetch] = http.HandlerFunc(c.FetchHandler)
+ c.handlers[mnRead] = http.HandlerFunc(c.TextHandler)
+ c.handlers[mnHistogram] = http.HandlerFunc(c.HistogramHandler)
+ c.handlers[mnFind] = http.HandlerFunc(c.FindHandler)
+ c.handlers[mnState] = http.HandlerFunc(c.StateHandler)
+ c.handlers[mnCAQL] = http.HandlerFunc(c.CAQLHandler)
+ c.handlers["proxy"] = http.HandlerFunc(c.ProxyHandler)
+}
+
+// Handlers returns a map of the HTTP Handlers the client has registered
+func (c *Client) Handlers() map[string]http.Handler {
+ if !c.handlersRegistered {
+ c.registerHandlers()
+ }
+ return c.handlers
+}
+
+func populateHeathCheckRequestValues(oc *config.OriginConfig) {
+ if oc.HealthCheckUpstreamPath == "-" {
+ oc.HealthCheckUpstreamPath = "/" + mnState
+ }
+ if oc.HealthCheckVerb == "-" {
+ oc.HealthCheckVerb = http.MethodGet
+ }
+ if oc.HealthCheckQuery == "-" {
+ oc.HealthCheckQuery = ""
+ }
+}
+
+// DefaultPathConfigs returns the default PathConfigs for the given OriginType
+func (c *Client) DefaultPathConfigs(oc *config.OriginConfig) map[string]*config.PathConfig {
+
+ populateHeathCheckRequestValues(oc)
+
+ paths := map[string]*config.PathConfig{
+
+ "/" + mnRaw + "/": {
+ Path: "/" + mnRaw + "/",
+ HandlerName: "RawHandler",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+
+ "/" + mnRollup + "/": {
+ Path: "/" + mnRollup + "/",
+ HandlerName: "RollupHandler",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{upSpan, upEngine, upType},
+ CacheKeyHeaders: []string{},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+
+ "/" + mnFetch: {
+ Path: "/" + mnFetch,
+ HandlerName: "FetchHandler",
+ KeyHasher: []config.KeyHasherFunc{c.fetchHandlerDeriveCacheKey},
+ Methods: []string{http.MethodPost},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+
+ "/" + mnRead + "/": {
+ Path: "/" + mnRead + "/",
+ HandlerName: "TextHandler",
+ KeyHasher: []config.KeyHasherFunc{c.textHandlerDeriveCacheKey},
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{"*"},
+ CacheKeyHeaders: []string{},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+
+ "/" + mnHistogram + "/": {
+ Path: "/" + mnHistogram + "/",
+ HandlerName: "HistogramHandler",
+ Methods: []string{http.MethodGet},
+ KeyHasher: []config.KeyHasherFunc{c.histogramHandlerDeriveCacheKey},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+
+ "/" + mnFind + "/": {
+ Path: "/" + mnFind + "/",
+ HandlerName: "FindHandler",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{upQuery},
+ CacheKeyHeaders: []string{},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+
+ "/" + mnState + "/": {
+ Path: "/" + mnState + "/",
+ HandlerName: "StateHandler",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{"*"},
+ CacheKeyHeaders: []string{},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+
+ "/" + mnCAQL: {
+ Path: "/" + mnCAQL,
+ HandlerName: "CAQLHandler",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{upQuery, upCAQLQuery, upCAQLPeriod},
+ CacheKeyHeaders: []string{},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+
+ "/" + mnCAQLPub + "/": {
+ Path: "/" + mnCAQLPub + "/",
+ HandlerName: "CAQLPubHandler",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{upQuery, upCAQLQuery, upCAQLPeriod},
+ CacheKeyHeaders: []string{},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+
+ "/": {
+ Path: "/",
+ HandlerName: "ProxyHandler",
+ Methods: []string{http.MethodGet},
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+ }
+
+ return paths
+
+}
diff --git a/internal/proxy/origins/irondb/routes_test.go b/internal/proxy/origins/irondb/routes_test.go
new file mode 100644
index 000000000..921421502
--- /dev/null
+++ b/internal/proxy/origins/irondb/routes_test.go
@@ -0,0 +1,61 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestRegisterHandlers(t *testing.T) {
+ c := &Client{}
+ c.registerHandlers()
+ if _, ok := c.handlers[mnCAQL]; !ok {
+ t.Errorf("expected to find handler named: %s", mnCAQL)
+ }
+}
+
+func TestHandlers(t *testing.T) {
+ c := &Client{}
+ m := c.Handlers()
+ if _, ok := m[mnCAQL]; !ok {
+ t.Errorf("expected to find handler named: %s", mnCAQL)
+ }
+}
+
+func TestDefaultPathConfigs(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, _, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "irondb", "/health", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if _, ok := client.config.Paths["/"]; !ok {
+ t.Errorf("expected to find path named: %s", "/")
+ }
+
+ const expectedLen = 10
+ if len(client.config.Paths) != expectedLen {
+ t.Errorf("expected ordered length to be: %d got %d", expectedLen, len(client.config.Paths))
+ }
+
+}
diff --git a/internal/proxy/origins/irondb/url.go b/internal/proxy/origins/irondb/url.go
new file mode 100644
index 000000000..d6390daf5
--- /dev/null
+++ b/internal/proxy/origins/irondb/url.go
@@ -0,0 +1,166 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ terr "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// BaseURL returns a URL in the form of scheme://host/path based on the proxy
+// configuration.
+func (c Client) BaseURL() *url.URL {
+ u := &url.URL{}
+ u.Scheme = c.config.Scheme
+ u.Host = c.config.Host
+ u.Path = c.config.PathPrefix
+ return u
+}
+
+// BuildUpstreamURL will merge the downstream request with the BaseURL to
+// construct the full upstream URL.
+func (c Client) BuildUpstreamURL(r *http.Request) *url.URL {
+ u := c.BaseURL()
+ if strings.HasPrefix(r.URL.Path, "/"+c.name+"/") {
+ u.Path += strings.Replace(r.URL.Path, "/"+c.name+"/",
+ "/", 1)
+ if u.Path == "//" {
+ u.Path = "/"
+ }
+ } else {
+ u.Path += r.URL.Path
+ }
+
+ u.RawQuery = r.URL.RawQuery
+ u.Fragment = r.URL.Fragment
+ u.User = r.URL.User
+ return u
+}
+
+// SetExtent will change the upstream request query to use the provided Extent.
+func (c Client) SetExtent(r *http.Request, trq *timeseries.TimeRangeQuery, extent *timeseries.Extent) {
+
+ rsc := request.GetResources(r)
+ if rsc.PathConfig == nil {
+ return
+ }
+
+ if f, ok := c.extentSetters[rsc.PathConfig.HandlerName]; ok {
+ f(r, trq, extent)
+ }
+}
+
+// FastForwardURL returns the url to fetch the Fast Forward value based on a
+// timerange URL.
+func (c *Client) FastForwardURL(r *http.Request) (*url.URL, error) {
+
+ rsc := request.GetResources(r)
+ if rsc.PathConfig == nil {
+ return nil, errors.New("missing path config")
+ }
+
+ switch rsc.PathConfig.HandlerName {
+ case "RollupHandler":
+ return c.rollupHandlerFastForwardURL(r)
+ case "HistogramHandler":
+ return c.histogramHandlerFastForwardURL(r)
+ case "CAQLHandler":
+ return c.caqlHandlerFastForwardURL(r)
+ }
+
+ return nil, fmt.Errorf("unknown handler name: %s", rsc.PathConfig.HandlerName)
+}
+
+// formatTimestamp returns a string containing a timestamp in the format used
+// by the IRONdb API.
+func formatTimestamp(t time.Time, milli bool) string {
+ if milli {
+ return fmt.Sprintf("%d.%03d", t.Unix(), t.Nanosecond()/1000000)
+ }
+
+ return fmt.Sprintf("%d", t.Unix())
+}
+
+// parseTimestamp attempts to parse an IRONdb API timestamp string into a valid
+// time value.
+func parseTimestamp(s string) (time.Time, error) {
+ sp := strings.Split(s, ".")
+ sec, nsec := int64(0), int64(0)
+ var err error
+ if len(sp) > 0 {
+ if sec, err = strconv.ParseInt(sp[0], 10, 64); err != nil {
+ return time.Time{}, fmt.Errorf("unable to parse timestamp %s: %s",
+ s, err.Error())
+ }
+ }
+
+ if len(sp) > 1 {
+ if nsec, err = strconv.ParseInt(sp[1], 10, 64); err != nil {
+ return time.Time{}, fmt.Errorf("unable to parse timestamp %s: %s",
+ s, err.Error())
+ }
+
+ nsec = nsec * 1000000
+ }
+
+ return time.Unix(sec, nsec), nil
+}
+
+// parseDuration attempts to parse an IRONdb API duration string into a valid
+// duration value.
+func parseDuration(s string) (time.Duration, error) {
+ if !strings.HasSuffix(s, "s") {
+ s += "s"
+ }
+
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse duration %s: %s",
+ s, err.Error())
+ }
+
+ return d, nil
+}
+
+// ParseTimeRangeQuery parses the key parts of a TimeRangeQuery from the
+// inbound HTTP Request.
+func (c *Client) ParseTimeRangeQuery(
+ r *http.Request) (*timeseries.TimeRangeQuery, error) {
+
+ rsc := request.GetResources(r)
+ if rsc.PathConfig == nil {
+ return nil, errors.New("missing path config")
+ }
+
+ var trq *timeseries.TimeRangeQuery
+ var err error
+
+ if f, ok := c.trqParsers[rsc.PathConfig.HandlerName]; ok {
+ trq, err = f(r)
+ } else {
+ trq = nil
+ err = terr.ErrNotTimeRangeQuery
+ }
+ rsc.TimeRangeQuery = trq
+ return trq, err
+}
diff --git a/internal/proxy/origins/irondb/url_test.go b/internal/proxy/origins/irondb/url_test.go
new file mode 100644
index 000000000..17e1fd9af
--- /dev/null
+++ b/internal/proxy/origins/irondb/url_test.go
@@ -0,0 +1,383 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package irondb
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+func TestSetExtent(t *testing.T) {
+ start := time.Now().Add(time.Duration(-6) * time.Hour)
+ end := time.Now()
+ stFl := time.Unix(start.Unix()-(start.Unix()%300), 0)
+ etFl := time.Unix(end.Unix()-(end.Unix()%300), 0)
+ e := ×eries.Extent{Start: start, End: end}
+ err := config.Load("trickster", "test",
+ []string{"-origin-url", "none:9090",
+ "-origin-type", "irondb",
+ "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ client := &Client{config: oc}
+
+ client.makeTrqParsers()
+ client.makeExtentSetters()
+
+ pcs := client.DefaultPathConfigs(oc)
+ rsc := request.NewResources(oc, nil, nil, nil, client)
+
+ cases := []struct {
+ handler string
+ u *url.URL
+ body string
+ expPath string
+ expQuery string
+ expBody string
+ p *config.PathConfig
+ }{
+ { // case 0
+ handler: "CAQLHandler",
+ u: &url.URL{
+ Path: "/extension/lua/caql_v1",
+ RawQuery: "query=metric:average(%22" +
+ "00112233-4455-6677-8899-aabbccddeeff%22," +
+ "%22metric%22)&start=0&end=900&period=300",
+ },
+ expPath: "/extension/lua/caql_v1",
+ expQuery: "end=" + formatTimestamp(etFl, false) +
+ "&period=300&query=metric%3Aaverage%28%22" +
+ "00112233-4455-6677-8899-aabbccddeeff%22%2C%22metric%22%29" +
+ "&start=" + formatTimestamp(stFl, false),
+ p: pcs["/extension/lua/caql_v1"],
+ },
+ { // case 1
+ handler: "HistogramHandler",
+ u: &url.URL{
+ Path: "/histogram/0/900/300/" +
+ "00112233-4455-6677-8899-aabbccddeeff/metric",
+ RawQuery: "",
+ },
+ expPath: "/histogram/" + formatTimestamp(stFl, false) +
+ "/" + formatTimestamp(etFl, false) + "/300" +
+ "/00112233-4455-6677-8899-aabbccddeeff/metric",
+ expQuery: "",
+ p: pcs["/histogram/"],
+ },
+ { // case 2
+ handler: "RawHandler",
+ u: &url.URL{
+ Path: "/raw/e312a0cb-dbe9-445d-8346-13b0ae6a3382/requests",
+ RawQuery: "start_ts=1560902400.000&end_ts=1561055856.000",
+ },
+ expPath: "/raw/e312a0cb-dbe9-445d-8346-13b0ae6a3382/requests",
+ expQuery: "end_ts=" + formatTimestamp(end, true) +
+ "&start_ts=" + formatTimestamp(start, true),
+ p: pcs["/raw/"],
+ },
+ { // case 3
+ handler: "RollupHandler",
+ u: &url.URL{
+ Path: "/rollup/e312a0cb-dbe9-445d-8346-13b0ae6a3382/requests",
+ RawQuery: "start_ts=1560902400.000&end_ts=1561055856.000" +
+ "&rollup_span=300s&type=count",
+ },
+ expPath: "/rollup/e312a0cb-dbe9-445d-8346-13b0ae6a3382/requests",
+ expQuery: "end_ts=" + formatTimestamp(etFl, true) +
+ "&rollup_span=300s" + "&start_ts=" +
+ formatTimestamp(stFl, true) + "&type=count",
+ p: pcs["/rollup/"],
+ },
+ { // case 4
+ handler: "FetchHandler",
+ u: &url.URL{
+ Path: "/fetch",
+ },
+ body: `{
+ "start":` + strconv.FormatInt(start.Unix(), 10) + `,
+ "period":300,
+ "count":10,
+ "streams":[
+ {
+ "uuid":"00112233-4455-6677-8899-aabbccddeeff",
+ "name":"test",
+ "kind":"numeric",
+ "transform": "average"
+ }
+ ],
+ "reduce":[{"label":"test","method":"average"}]
+ }`,
+ expPath: "/fetch",
+ expQuery: "",
+ expBody: `{"count":72,"period":300,"reduce":[{"label":"test",` +
+ `"method":"average"}],"start":` +
+ strconv.FormatInt(stFl.Unix(), 10) + `,"streams":[{"kind":` +
+ `"numeric","name":"test","transform":"average","uuid":` +
+ `"00112233-4455-6677-8899-aabbccddeeff"}]}`,
+ p: pcs["/fetch"],
+ },
+ { // case 5
+ handler: "TextHandler",
+ u: &url.URL{
+ Path: "/read/0/900/00112233-4455-6677-8899-aabbccddeeff" +
+ "/metric",
+ RawQuery: "",
+ },
+ expPath: "/read/" + formatTimestamp(start, false) +
+ "/" + formatTimestamp(end, false) +
+ "/00112233-4455-6677-8899-aabbccddeeff/metric",
+ expQuery: "",
+ p: pcs["/read/"],
+ },
+ }
+
+ for i, c := range cases {
+
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+
+ r, _ := http.NewRequest(http.MethodGet, c.u.String(), ioutil.NopCloser(bytes.NewBufferString(c.body)))
+ rsc.PathConfig = c.p
+ r = request.SetResources(r, rsc)
+
+ client.SetExtent(r, nil, e)
+ if r.URL.Path != c.expPath {
+ t.Errorf("Expected path: %s, got: %s", c.expPath, r.URL.Path)
+ }
+
+ if r.URL.RawQuery != c.expQuery {
+ t.Errorf("Expected query: %s, got: %s", c.expQuery, r.URL.RawQuery)
+ }
+
+ if c.expBody != "" {
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Unable to read request body: %v", err)
+ return
+ }
+
+ if string(b) != (c.expBody + "\n") {
+ t.Errorf("Expected request body: %v, got: %v", c.expBody,
+ string(b))
+ }
+ }
+ })
+ }
+}
+
+func TestFastForwardURL(t *testing.T) {
+ now := time.Now().Unix()
+ start := now - (now % 300)
+ end := start + 300
+ err := config.Load("trickster", "test",
+ []string{"-origin-url", "none:9090",
+ "-origin-type", "irondb",
+ "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ client := &Client{config: oc}
+
+ client.makeTrqParsers()
+ client.makeExtentSetters()
+
+ pcs := client.DefaultPathConfigs(oc)
+
+ rsc := request.NewResources(oc, nil, nil, nil, client)
+
+ cases := []struct {
+ handler string
+ u *url.URL
+ exp string
+ p *config.PathConfig
+ }{
+ { // case 0
+ handler: "CAQLHandler",
+ u: &url.URL{
+ Path: "/extension/lua/caql_v1",
+ RawQuery: "query=metric:average(%22" +
+ "00112233-4455-6677-8899-aabbccddeeff%22," +
+ "%22metric%22)&start=0&end=900&period=300",
+ },
+ exp: "/extension/lua/caql_v1" +
+ "?end=" + formatTimestamp(time.Unix(end, 0), false) +
+ "&period=300&query=metric%3Aaverage%28%22" +
+ "00112233-4455-6677-8899-aabbccddeeff%22%2C%22metric%22%29" +
+ "&start=" + formatTimestamp(time.Unix(start, 0), false),
+ p: pcs["/extension/lua/caql_v1"],
+ },
+ { // case 1
+ handler: "HistogramHandler",
+ u: &url.URL{
+ Path: "/histogram/0/900/300/" +
+ "00112233-4455-6677-8899-aabbccddeeff/metric",
+ RawQuery: "",
+ },
+ exp: "/histogram/" + formatTimestamp(time.Unix(start, 0), false) +
+ "/" + formatTimestamp(time.Unix(end, 0), false) +
+ "/300" +
+ "/00112233-4455-6677-8899-aabbccddeeff/metric",
+ p: pcs["/histogram/"],
+ },
+ { // case 2
+ handler: "RollupHandler",
+ u: &url.URL{
+ Path: "/rollup/e312a0cb-dbe9-445d-8346-13b0ae6a3382/requests",
+ RawQuery: "start_ts=1560902400.000&end_ts=1560903000.000" +
+ "&rollup_span=300s&type=count",
+ },
+ exp: "/rollup/e312a0cb-dbe9-445d-8346-13b0ae6a3382/requests" +
+ "?end_ts=" + formatTimestamp(time.Unix(end, 0), true) +
+ "&rollup_span=300s" +
+ "&start_ts=" + formatTimestamp(time.Unix(start, 0), true) +
+ "&type=count",
+ p: pcs["/rollup/"],
+ },
+ { // case 3
+ handler: "ProxyHandler",
+ u: &url.URL{
+ Path: "/test",
+ RawQuery: "",
+ },
+ exp: "/test",
+ p: pcs["/"],
+ },
+ }
+
+ for i, c := range cases {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+
+ r, _ := http.NewRequest(http.MethodGet, c.u.String(), nil)
+ rsc.PathConfig = c.p
+ r = request.SetResources(r, rsc)
+ u, err := client.FastForwardURL(r)
+ if c.handler != "ProxyHandler" && err != nil {
+ t.Error(err)
+ }
+
+ if c.handler == "ProxyHandler" && err.Error() != "unknown handler name: ProxyHandler" {
+ t.Errorf("expected error: %s", "unknown handler name")
+ }
+
+ if u != nil {
+ if u.String() != c.exp {
+ t.Errorf("Expected URL: %v, got: %v", c.exp, u.String())
+ }
+ }
+ })
+ }
+}
+
+func TestFormatTimestamp(t *testing.T) {
+ tm := time.Unix(123456789, int64(time.Millisecond))
+ exp := "123456789.001"
+ res := formatTimestamp(tm, true)
+ if res != exp {
+ t.Errorf("Expected string: %v, got: %v", exp, res)
+ }
+
+ tm = time.Unix(123456789, int64(time.Millisecond))
+ exp = "123456789"
+ res = formatTimestamp(tm, false)
+ if res != exp {
+ t.Errorf("Expected string: %v, got: %v", exp, res)
+ }
+}
+
+func TestParseTimestamp(t *testing.T) {
+ v := "123456789.001"
+ res, err := parseTimestamp(v)
+ if err != nil {
+ t.Fatalf("Error parsing %s: %v", v, err.Error())
+ }
+
+ exp := time.Unix(123456789, int64(time.Millisecond))
+ if !res.Equal(exp) {
+ t.Errorf("Expected time: %v, got: %v", exp, res)
+ }
+
+ v = "1.a"
+ _, err = parseTimestamp(v)
+ if err == nil {
+ t.Fatalf("expected error: %s", "parse timestamp")
+ }
+
+}
+
+func TestBuildUpstreamURL(t *testing.T) {
+
+ expected := "q=up&start=1&end=1&step=1"
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "none:9090", "-origin-type", "rpc", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ client := Client{config: oc, name: "default"}
+
+ u := &url.URL{Path: "/default/query_range", RawQuery: expected}
+
+ r, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ u2 := client.BuildUpstreamURL(r)
+
+ if expected != u2.RawQuery {
+ t.Errorf("\nexpected [%s]\ngot [%s]", expected, u2.RawQuery)
+ }
+
+ u = &url.URL{Path: "/default//", RawQuery: ""}
+
+ r, err = http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ u2 = client.BuildUpstreamURL(r)
+
+ if u2.Path != "/" {
+ t.Errorf("\nexpected [%s]\ngot [%s]", "/", u2.Path)
+ }
+
+}
+
+func TestParseTimerangeQuery(t *testing.T) {
+ expected := errors.ErrNotTimeRangeQuery
+ client := &Client{name: "test"}
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/", nil)
+
+ r = request.SetResources(r, request.NewResources(client.config, &config.PathConfig{}, nil, nil, client))
+
+ _, err := client.ParseTimeRangeQuery(r)
+ if err == nil || err != expected {
+ t.Errorf("expected %s got %v", expected.Error(), err.Error())
+ }
+}
diff --git a/internal/proxy/origins/prometheus/handler_health.go b/internal/proxy/origins/prometheus/handler_health.go
new file mode 100644
index 000000000..c873b735f
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_health.go
@@ -0,0 +1,66 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+// HealthHandler checks the health of the Configured Upstream Origin
+func (c *Client) HealthHandler(w http.ResponseWriter, r *http.Request) {
+
+ if c.healthURL == nil {
+ c.populateHeathCheckRequestValues()
+ }
+
+ if c.healthMethod == "-" {
+ w.WriteHeader(400)
+ w.Write([]byte("Health Check URL not Configured for origin: " + c.config.Name))
+ return
+ }
+
+ req, _ := http.NewRequest(c.healthMethod, c.healthURL.String(), nil)
+ req = req.WithContext(r.Context())
+
+ req.Header = c.healthHeaders
+ engines.DoProxy(w, req)
+}
+
+func (c *Client) populateHeathCheckRequestValues() {
+
+ oc := c.config
+
+ if oc.HealthCheckUpstreamPath == "-" {
+ oc.HealthCheckUpstreamPath = "/"
+ }
+ if oc.HealthCheckVerb == "-" {
+ oc.HealthCheckVerb = http.MethodGet
+ }
+ if oc.HealthCheckQuery == "-" {
+ oc.HealthCheckQuery = "query=up"
+ }
+
+ c.healthURL = c.BaseURL()
+ c.healthURL.Path += oc.HealthCheckUpstreamPath
+ c.healthURL.RawQuery = oc.HealthCheckQuery
+ c.healthMethod = oc.HealthCheckVerb
+
+ if oc.HealthCheckHeaders != nil {
+ c.healthHeaders = http.Header{}
+ headers.UpdateHeaders(c.healthHeaders, oc.HealthCheckHeaders)
+ }
+}
diff --git a/internal/proxy/origins/prometheus/handler_health_test.go b/internal/proxy/origins/prometheus/handler_health_test.go
new file mode 100644
index 000000000..beab8dca2
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_health_test.go
@@ -0,0 +1,108 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "io/ioutil"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/util/metrics"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func init() {
+ metrics.Init()
+}
+
+func TestHealthHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "prometheus", "/health", "debug")
+
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.HealthHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+
+ client.healthMethod = "-"
+
+ w = httptest.NewRecorder()
+ client.HealthHandler(w, r)
+ resp = w.Result()
+ if resp.StatusCode != 400 {
+ t.Errorf("Expected status: 400 got %d.", resp.StatusCode)
+ }
+
+}
+
+func TestHealthHandlerCustomPath(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "", nil, "prometheus", "/health", "debug")
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ rsc := request.GetResources(r)
+ client.config = rsc.OriginConfig
+
+ client.config.HealthCheckUpstreamPath = "-"
+ client.config.HealthCheckVerb = "-"
+ client.config.HealthCheckQuery = "-"
+
+ client.webClient = hc
+ client.config.HTTPClient = hc
+
+ client.HealthHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "" {
+ t.Errorf("expected '' got %s.", bodyBytes)
+ }
+
+}
diff --git a/internal/proxy/origins/prometheus/handler_objectproxycache.go b/internal/proxy/origins/prometheus/handler_objectproxycache.go
new file mode 100644
index 000000000..861f66c9b
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_objectproxycache.go
@@ -0,0 +1,26 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// ObjectProxyCacheHandler handles calls to /query (for instantaneous values)
+func (c *Client) ObjectProxyCacheHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.ObjectProxyCacheRequest(w, r)
+}
diff --git a/internal/proxy/origins/prometheus/handler_objectproxycache_test.go b/internal/proxy/origins/prometheus/handler_objectproxycache_test.go
new file mode 100644
index 000000000..86dce29c5
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_objectproxycache_test.go
@@ -0,0 +1,60 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestObjectProxyCacheHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "prometheus", "/health", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, ok := client.config.Paths[APIPath+mnQuery]
+ if !ok {
+ t.Errorf("could not find path config named %s", APIPath+mnQuery)
+ }
+
+ client.ObjectProxyCacheHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
diff --git a/internal/proxy/origins/prometheus/handler_proxy.go b/internal/proxy/origins/prometheus/handler_proxy.go
new file mode 100644
index 000000000..2dea4a537
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_proxy.go
@@ -0,0 +1,26 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// ProxyHandler sends a request through the basic reverse proxy to the origin, and services non-cacheable Prometheus API calls.
+func (c *Client) ProxyHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DoProxy(w, r)
+}
diff --git a/internal/proxy/origins/prometheus/handler_proxy_test.go b/internal/proxy/origins/prometheus/handler_proxy_test.go
new file mode 100644
index 000000000..7490b90e1
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_proxy_test.go
@@ -0,0 +1,55 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestProxyHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "test", nil, "prometheus", "/health", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.ProxyHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "test" {
+ t.Errorf("expected 'test' got %s.", bodyBytes)
+ }
+
+}
diff --git a/internal/proxy/origins/prometheus/handler_query.go b/internal/proxy/origins/prometheus/handler_query.go
new file mode 100644
index 000000000..4cd704525
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_query.go
@@ -0,0 +1,41 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// QueryHandler handles calls to /query (for instantaneous values)
+func (c *Client) QueryHandler(w http.ResponseWriter, r *http.Request) {
+
+ u := c.BuildUpstreamURL(r)
+ params := u.Query()
+
+ // Round time param down to the nearest 15 seconds if it exists
+ if p := params.Get(upTime); p != "" {
+ if i, err := strconv.ParseInt(p, 10, 64); err == nil {
+ params.Set(upTime, strconv.FormatInt(time.Unix(i, 0).Truncate(time.Second*time.Duration(15)).Unix(), 10))
+ }
+ }
+
+ r.URL = u
+ r.URL.RawQuery = params.Encode()
+
+ engines.ObjectProxyCacheRequest(w, r)
+}
diff --git a/internal/proxy/origins/prometheus/handler_query_range.go b/internal/proxy/origins/prometheus/handler_query_range.go
new file mode 100644
index 000000000..8e6231bf8
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_query_range.go
@@ -0,0 +1,26 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// QueryRangeHandler handles timeseries requests for Prometheus and processes them through the delta proxy cache
+func (c *Client) QueryRangeHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DeltaProxyCacheRequest(w, r)
+}
diff --git a/internal/proxy/origins/prometheus/handler_query_range_test.go b/internal/proxy/origins/prometheus/handler_query_range_test.go
new file mode 100644
index 000000000..a1e7d9e70
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_query_range_test.go
@@ -0,0 +1,55 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestQueryRangeHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "prometheus", "/query_range?q=up&start=0&end=900&step=15", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.QueryRangeHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
diff --git a/internal/proxy/origins/prometheus/handler_query_test.go b/internal/proxy/origins/prometheus/handler_query_test.go
new file mode 100644
index 000000000..4e3718f31
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_query_test.go
@@ -0,0 +1,60 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestQueryHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "prometheus", "/query?q=up&time=0", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, ok := client.config.Paths[APIPath+mnQuery]
+ if !ok {
+ t.Errorf("could not find path config named %s", mnQuery)
+ }
+
+ client.QueryHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
diff --git a/internal/proxy/origins/prometheus/handler_series.go b/internal/proxy/origins/prometheus/handler_series.go
new file mode 100644
index 000000000..e31971aa0
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_series.go
@@ -0,0 +1,47 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// SeriesHandler proxies requests for path /series to the origin by way of the object proxy cache
+func (c *Client) SeriesHandler(w http.ResponseWriter, r *http.Request) {
+ u := c.BuildUpstreamURL(r)
+
+ params := u.Query()
+
+ // Round Start and End times down to top of most recent minute for cacheability
+ if p := params.Get(upStart); p != "" {
+ if i, err := strconv.ParseInt(p, 10, 64); err == nil {
+ params.Set(upStart, strconv.FormatInt(time.Unix(i, 0).Truncate(time.Second*time.Duration(60)).Unix(), 10))
+ }
+ }
+
+ if p := params.Get(upEnd); p != "" {
+ if i, err := strconv.ParseInt(p, 10, 64); err == nil {
+ params.Set(upEnd, strconv.FormatInt(time.Unix(i, 0).Truncate(time.Second*time.Duration(60)).Unix(), 10))
+ }
+ }
+
+ r.URL = u
+ r.URL.RawQuery = params.Encode()
+
+ engines.ObjectProxyCacheRequest(w, r)
+}
diff --git a/internal/proxy/origins/prometheus/handler_series_test.go b/internal/proxy/origins/prometheus/handler_series_test.go
new file mode 100644
index 000000000..9e9d0d812
--- /dev/null
+++ b/internal/proxy/origins/prometheus/handler_series_test.go
@@ -0,0 +1,60 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestSeriesHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "prometheus", `/default/api/v1/series?match[]=up&match[]=process_start_time_seconds{job="prometheus"}&start=100&end=100`, "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, ok := client.config.Paths[APIPath+mnSeries]
+ if !ok {
+ t.Errorf("could not find path config named %s", mnSeries)
+ }
+
+ client.SeriesHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+}
diff --git a/internal/proxy/origins/prometheus/matrix.go b/internal/proxy/origins/prometheus/matrix.go
new file mode 100644
index 000000000..0cc09429c
--- /dev/null
+++ b/internal/proxy/origins/prometheus/matrix.go
@@ -0,0 +1,414 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/Comcast/trickster/pkg/sort/times"
+ "github.com/prometheus/common/model"
+)
+
+// Step returns the step for the Timeseries
+func (me *MatrixEnvelope) Step() time.Duration {
+ return me.StepDuration
+}
+
+// SetStep sets the step for the Timeseries
+func (me *MatrixEnvelope) SetStep(step time.Duration) {
+ me.StepDuration = step
+}
+
+// Merge merges the provided Timeseries list into the base Timeseries (in the order provided) and optionally sorts the merged Timeseries
+func (me *MatrixEnvelope) Merge(sort bool, collection ...timeseries.Timeseries) {
+ meMetrics := make(map[string]*model.SampleStream)
+
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+ for _, s := range me.Data.Result {
+ wg.Add(1)
+ go func(t *model.SampleStream) {
+ mtx.Lock()
+ meMetrics[t.Metric.String()] = t
+ mtx.Unlock()
+ wg.Done()
+ }(s)
+ }
+ wg.Wait()
+
+ for _, ts := range collection {
+ if ts != nil {
+ me2 := ts.(*MatrixEnvelope)
+ for _, s := range me2.Data.Result {
+ wg.Add(1)
+ go func(t *model.SampleStream) {
+ mtx.Lock()
+ name := t.Metric.String()
+ if _, ok := meMetrics[name]; !ok {
+ meMetrics[name] = t
+ me.Data.Result = append(me.Data.Result, t)
+ mtx.Unlock()
+ wg.Done()
+ return
+ }
+ meMetrics[name].Values = append(meMetrics[name].Values, t.Values...)
+ mtx.Unlock()
+ wg.Done()
+ }(s)
+ }
+ wg.Wait()
+ me.ExtentList = append(me.ExtentList, me2.ExtentList...)
+ }
+ }
+ me.ExtentList = me.ExtentList.Compress(me.StepDuration)
+ me.isSorted = false
+ me.isCounted = false
+ if sort {
+ me.Sort()
+ }
+}
+
+// Clone returns a perfect copy of the base Timeseries
+func (me *MatrixEnvelope) Clone() timeseries.Timeseries {
+ resMe := &MatrixEnvelope{
+ isCounted: me.isCounted,
+ isSorted: me.isSorted,
+ tslist: make(times.Times, len(me.tslist)),
+ timestamps: make(map[time.Time]bool),
+ Status: me.Status,
+ Data: MatrixData{
+ ResultType: me.Data.ResultType,
+ Result: make(model.Matrix, 0, len(me.Data.Result)),
+ },
+ StepDuration: me.StepDuration,
+ ExtentList: make(timeseries.ExtentList, len(me.ExtentList)),
+ }
+ copy(resMe.ExtentList, me.ExtentList)
+ copy(resMe.tslist, me.tslist)
+
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+ for k, v := range me.timestamps {
+ wg.Add(1)
+ go func(t time.Time, b bool) {
+ mtx.Lock()
+ resMe.timestamps[t] = b
+ mtx.Unlock()
+ wg.Done()
+ }(k, v)
+ }
+ wg.Wait()
+
+ for _, ss := range me.Data.Result {
+ newSS := &model.SampleStream{Metric: ss.Metric}
+ newSS.Values = ss.Values[:]
+ resMe.Data.Result = append(resMe.Data.Result, newSS)
+ }
+ return resMe
+}
+
+// CropToSize reduces the number of elements in the Timeseries to the provided count, by evicting elements
+// using a least-recently-used methodology. Any timestamps newer than the provided time are removed before
+// sizing, in order to support backfill tolerance. The provided extent will be marked as used during crop.
+func (me *MatrixEnvelope) CropToSize(sz int, t time.Time, lur timeseries.Extent) {
+ me.isCounted = false
+ me.isSorted = false
+ x := len(me.ExtentList)
+ // The Series has no extents, so no need to do anything
+ if x < 1 {
+ me.Data.Result = model.Matrix{}
+ me.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // Crop to the Backfill Tolerance Value if needed
+ if me.ExtentList[x-1].End.After(t) {
+ me.CropToRange(timeseries.Extent{Start: me.ExtentList[0].Start, End: t})
+ }
+
+ tc := me.TimestampCount()
+ el := timeseries.ExtentListLRU(me.ExtentList).UpdateLastUsed(lur, me.StepDuration)
+ sort.Sort(el)
+ if len(me.Data.Result) == 0 || tc <= sz {
+ return
+ }
+
+ rc := tc - sz // # of required timestamps we must delete to meet the rentention policy
+ removals := make(map[time.Time]bool)
+ done := false
+ var ok bool
+
+ for _, x := range el {
+ for ts := x.Start; !x.End.Before(ts) && !done; ts = ts.Add(me.StepDuration) {
+ if _, ok = me.timestamps[ts]; ok {
+ removals[ts] = true
+ done = len(removals) >= rc
+ }
+ }
+ if done {
+ break
+ }
+ }
+
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+
+ for _, s := range me.Data.Result {
+ tmp := s.Values[:0]
+ for _, r := range s.Values {
+ wg.Add(1)
+ go func(p model.SamplePair) {
+ mtx.Lock()
+ if _, ok := removals[p.Timestamp.Time()]; !ok {
+ tmp = append(tmp, p)
+ }
+ mtx.Unlock()
+ wg.Done()
+ }(r)
+ }
+ wg.Wait()
+ s.Values = tmp
+ }
+
+ tl := times.FromMap(removals)
+ sort.Sort(tl)
+
+ for _, t := range tl {
+ for i, e := range el {
+ if e.StartsAt(t) {
+ el[i].Start = e.Start.Add(me.StepDuration)
+ }
+ }
+ }
+ wg.Wait()
+
+ me.ExtentList = timeseries.ExtentList(el).Compress(me.StepDuration)
+ me.Sort()
+}
+
+// CropToRange reduces the Timeseries down to timestamps contained within the provided Extents (inclusive).
+// CropToRange assumes the base Timeseries is already sorted, and will corrupt an unsorted Timeseries
+func (me *MatrixEnvelope) CropToRange(e timeseries.Extent) {
+ me.isCounted = false
+ x := len(me.ExtentList)
+ // The Series has no extents, so no need to do anything
+ if x < 1 {
+ me.Data.Result = model.Matrix{}
+ me.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // if the extent of the series is entirely outside the extent of the crop range, return empty set and bail
+ if me.ExtentList.OutsideOf(e) {
+ me.Data.Result = model.Matrix{}
+ me.ExtentList = timeseries.ExtentList{}
+ return
+ }
+
+ // if the series extent is entirely inside the extent of the crop range, simply adjust down its ExtentList
+ if me.ExtentList.InsideOf(e) {
+ if me.ValueCount() == 0 {
+ me.Data.Result = model.Matrix{}
+ }
+ me.ExtentList = me.ExtentList.Crop(e)
+ return
+ }
+
+ if len(me.Data.Result) == 0 {
+ me.ExtentList = me.ExtentList.Crop(e)
+ return
+ }
+
+ deletes := make(map[int]bool)
+
+ for i, s := range me.Data.Result {
+ start := -1
+ end := -1
+ for j, val := range s.Values {
+ t := val.Timestamp.Time()
+ if t.Equal(e.End) {
+ // for cases where the first element is the only qualifying element,
+ // start must be incremented or an empty response is returned
+ if j == 0 || t.Equal(e.Start) || start == -1 {
+ start = j
+ }
+ end = j + 1
+ break
+ }
+ if t.After(e.End) {
+ end = j
+ break
+ }
+ if t.Before(e.Start) {
+ continue
+ }
+ if start == -1 && (t.Equal(e.Start) || (e.End.After(t) && t.After(e.Start))) {
+ start = j
+ }
+ }
+ if start != -1 && len(s.Values) > 0 {
+ if end == -1 {
+ end = len(s.Values)
+ }
+ me.Data.Result[i].Values = s.Values[start:end]
+ } else {
+ deletes[i] = true
+ }
+ }
+ if len(deletes) > 0 {
+ tmp := me.Data.Result[:0]
+ for i, r := range me.Data.Result {
+ if _, ok := deletes[i]; !ok {
+ tmp = append(tmp, r)
+ }
+ }
+ me.Data.Result = tmp
+ }
+ me.ExtentList = me.ExtentList.Crop(e)
+}
+
+// Sort sorts all Values in each Series chronologically by their timestamp
+func (me *MatrixEnvelope) Sort() {
+
+ if me.isSorted || len(me.Data.Result) == 0 {
+ return
+ }
+
+ tsm := map[time.Time]bool{}
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+
+ for i, s := range me.Data.Result { // []SampleStream
+ m := make(map[time.Time]model.SamplePair)
+ keys := make(times.Times, 0, len(m))
+
+ for _, v := range s.Values { // []SamplePair
+ wg.Add(1)
+ go func(sp model.SamplePair) {
+ t := sp.Timestamp.Time()
+ mtx.Lock()
+ if _, ok := m[t]; !ok {
+ keys = append(keys, t)
+ m[t] = sp
+ }
+ tsm[t] = true
+ m[t] = sp
+ mtx.Unlock()
+ wg.Done()
+ }(v)
+ }
+ wg.Wait()
+ sort.Sort(keys)
+ sm := make([]model.SamplePair, 0, len(keys))
+ for _, key := range keys {
+ sm = append(sm, m[key])
+ }
+ me.Data.Result[i].Values = sm
+ }
+
+ sort.Sort(me.ExtentList)
+
+ me.timestamps = tsm
+ me.tslist = times.FromMap(tsm)
+ me.isCounted = true
+ me.isSorted = true
+}
+
+func (me *MatrixEnvelope) updateTimestamps() {
+
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+
+ if me.isCounted {
+ return
+ }
+ m := make(map[time.Time]bool)
+ for _, s := range me.Data.Result { // []SampleStream
+ for _, v := range s.Values { // []SamplePair
+ wg.Add(1)
+ go func(t time.Time) {
+ mtx.Lock()
+ m[t] = true
+ mtx.Unlock()
+ wg.Done()
+ }(v.Timestamp.Time())
+ }
+ }
+ wg.Wait()
+ me.timestamps = m
+ me.tslist = times.FromMap(m)
+ me.isCounted = true
+}
+
+// SetExtents overwrites a Timeseries's known extents with the provided extent list
+func (me *MatrixEnvelope) SetExtents(extents timeseries.ExtentList) {
+ me.isCounted = false
+ me.ExtentList = extents
+}
+
+// Extents returns the Timeseries's ExentList
+func (me *MatrixEnvelope) Extents() timeseries.ExtentList {
+ return me.ExtentList
+}
+
+// TimestampCount returns the number of unique timestamps across the timeseries
+func (me *MatrixEnvelope) TimestampCount() int {
+ me.updateTimestamps()
+ return len(me.timestamps)
+}
+
+// SeriesCount returns the number of individual Series in the Timeseries object
+func (me *MatrixEnvelope) SeriesCount() int {
+ return len(me.Data.Result)
+}
+
+// ValueCount returns the count of all values across all Series in the Timeseries object
+func (me *MatrixEnvelope) ValueCount() int {
+ c := 0
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+ for i := range me.Data.Result {
+ wg.Add(1)
+ go func(j int) {
+ mtx.Lock()
+ c += j
+ mtx.Unlock()
+ wg.Done()
+ }(len(me.Data.Result[i].Values))
+ }
+ wg.Wait()
+ return c
+}
+
+// Size returns the approximate memory utilization in bytes of the timeseries
+func (me *MatrixEnvelope) Size() int {
+
+ c := 0
+ wg := sync.WaitGroup{}
+ mtx := sync.Mutex{}
+ for i := range me.Data.Result {
+ wg.Add(1)
+ go func(s *model.SampleStream) {
+ mtx.Lock()
+ c += (len(s.Values) * 16) + len(s.Metric.String())
+ mtx.Unlock()
+ wg.Done()
+ }(me.Data.Result[i])
+ }
+ wg.Wait()
+ return c
+}
diff --git a/internal/proxy/origins/prometheus/matrix_test.go b/internal/proxy/origins/prometheus/matrix_test.go
new file mode 100644
index 000000000..5f7fc30c2
--- /dev/null
+++ b/internal/proxy/origins/prometheus/matrix_test.go
@@ -0,0 +1,1702 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "reflect"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/pkg/sort/times"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/prometheus/common/model"
+)
+
+const rvSuccess = "success"
+
+func TestSetStep(t *testing.T) {
+ me := MatrixEnvelope{}
+ const step = time.Duration(300) * time.Minute
+ me.SetStep(step)
+ if me.StepDuration != step {
+ t.Errorf(`expected "%s". got "%s"`, testStep, me.StepDuration)
+ }
+}
+
+func TestStep(t *testing.T) {
+ me := MatrixEnvelope{}
+ const step = time.Duration(300) * time.Minute
+ me.SetStep(step)
+ if me.Step() != step {
+ t.Errorf(`expected "%s". got "%s"`, testStep, me.Step())
+ }
+}
+
+func TestMerge(t *testing.T) {
+ tests := []struct {
+ a, b, merged *MatrixEnvelope
+ }{
+ // Run 0: Series that adhere to rule
+ {
+ a: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10, 0), End: time.Unix(10, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ b: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 5000, Value: 1.5},
+ {Timestamp: 15000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(5, 0), End: time.Unix(5, 0)},
+ timeseries.Extent{Start: time.Unix(15, 0), End: time.Unix(15, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ merged: &MatrixEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(5, 0), time.Unix(10, 0), time.Unix(15, 0)},
+ timestamps: map[time.Time]bool{time.Unix(5, 0): true, time.Unix(10, 0): true, time.Unix(15, 0): true},
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 5000, Value: 1.5},
+ {Timestamp: 10000, Value: 1.5},
+ {Timestamp: 15000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(5, 0), End: time.Unix(15, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ },
+ },
+ // Run 1: Empty second series
+ {
+ a: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(10000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ b: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ merged: &MatrixEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(10000, 0)},
+ timestamps: map[time.Time]bool{time.Unix(10000, 0): true},
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(10000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ },
+ // Run 2: second series has new metric
+ {
+ a: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(10000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ b: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 15000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(15000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ merged: &MatrixEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(10000, 0), time.Unix(15000, 0)},
+ timestamps: map[time.Time]bool{time.Unix(10000, 0): true, time.Unix(15000, 0): true},
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 15000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ },
+ // Run 3: merge one metric, one metric unchanged
+ {
+ a: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(10000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ b: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 15000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(15000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ merged: &MatrixEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(10000, 0), time.Unix(15000, 0)},
+ timestamps: map[time.Time]bool{time.Unix(10000, 0): true, time.Unix(15000, 0): true},
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ {Timestamp: 15000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ },
+ // Run 4: merge multiple extents
+ {
+ a: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ {Timestamp: 15000000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ {Timestamp: 15000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ b: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 30000000, Value: 1.5},
+ {Timestamp: 35000000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 30000000, Value: 1.5},
+ {Timestamp: 35000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(30000, 0), End: time.Unix(35000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ merged: &MatrixEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(10000, 0), time.Unix(15000, 0), time.Unix(30000, 0), time.Unix(35000, 0)},
+ timestamps: map[time.Time]bool{time.Unix(10000, 0): true, time.Unix(15000, 0): true, time.Unix(30000, 0): true, time.Unix(35000, 0): true},
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ {Timestamp: 15000000, Value: 1.5},
+ {Timestamp: 30000000, Value: 1.5},
+ {Timestamp: 35000000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ {Timestamp: 15000000, Value: 1.5},
+ {Timestamp: 30000000, Value: 1.5},
+ {Timestamp: 35000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(15000, 0)},
+ timeseries.Extent{Start: time.Unix(30000, 0), End: time.Unix(35000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ },
+ //
+ //
+ // Run 5: merge with some overlapping extents
+ {
+ a: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ {Timestamp: 15000000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ {Timestamp: 15000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(15000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ b: &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 15000000, Value: 1.5},
+ {Timestamp: 20000000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 15000000, Value: 1.5},
+ {Timestamp: 20000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(15000, 0), End: time.Unix(20000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ merged: &MatrixEnvelope{
+ isCounted: true,
+ isSorted: true,
+ tslist: times.Times{time.Unix(10000, 0), time.Unix(15000, 0), time.Unix(20000, 0)},
+ timestamps: map[time.Time]bool{time.Unix(10000, 0): true, time.Unix(15000, 0): true, time.Unix(20000, 0): true},
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ {Timestamp: 15000000, Value: 1.5},
+ {Timestamp: 20000000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000000, Value: 1.5},
+ {Timestamp: 15000000, Value: 1.5},
+ {Timestamp: 20000000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10000, 0), End: time.Unix(20000, 0)},
+ },
+ StepDuration: time.Duration(5000) * time.Second,
+ },
+ },
+ }
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.a.Merge(true, test.b)
+ if !reflect.DeepEqual(test.merged, test.a) {
+ t.Errorf("mismatch\nactual=%v\nexpected=%v", test.a, test.merged)
+ }
+ })
+ }
+}
+
+func TestCropToRange(t *testing.T) {
+ tests := []struct {
+ before, after *MatrixEnvelope
+ extent timeseries.Extent
+ }{
+ // Run 0: Case where the very first element in the matrix has a timestamp matching the extent's end
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1644004600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1644004600, 0), End: time.Unix(1644004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1644004600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1644004600, 0), End: time.Unix(1644004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1644004600, 0),
+ },
+ },
+ // Run 1: Case where we trim nothing
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1544004600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1544004600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1644004600, 0),
+ },
+ },
+ // Run 2: Case where we trim everything (all data is too late)
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 1544004600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1544004600, 0), End: time.Unix(1544004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{},
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(10, 0),
+ },
+ },
+ // Run 3: Case where we trim everything (all data is too early)
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(100, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{},
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(10000, 0),
+ End: time.Unix(20000, 0),
+ },
+ },
+ // Run 4: Case where we trim some off the beginning
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "d"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "d"},
+ Values: []model.SamplePair{
+ {Timestamp: 300000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(300, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(300, 0),
+ End: time.Unix(400, 0),
+ },
+ },
+ // Run 5: Case where we trim some off the ends
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "e"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "e"},
+ Values: []model.SamplePair{
+ {Timestamp: 200000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(200, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(200, 0),
+ End: time.Unix(200, 0),
+ },
+ },
+ // Run 6: Case where the last datapoint is on the Crop extent
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "f"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "f"},
+ Values: []model.SamplePair{
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(200, 0),
+ End: time.Unix(300, 0),
+ },
+ },
+ // Run 7: Case where we aren't given any datapoints
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "g"},
+ Values: []model.SamplePair{},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{},
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(200, 0),
+ End: time.Unix(300, 0),
+ },
+ },
+
+ // Run 8: Case where we have more series than points
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "h"},
+ Values: []model.SamplePair{{Timestamp: 100000, Value: 1.5}},
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "h"},
+ Values: []model.SamplePair{{Timestamp: 100000, Value: 1.5}},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(100, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{},
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(200, 0),
+ End: time.Unix(300, 0),
+ },
+ },
+ // Run 9: Case where after cropping, an inner series is empty/removed
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(400, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(400, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ // Run 10: Case where after cropping, the front series is empty/removed
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(400, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(400, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ // Run 11: Case where after cropping, the back series is empty/removed
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 100000, Value: 1.5},
+ {Timestamp: 200000, Value: 1.5},
+ {Timestamp: 300000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(100, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 400000, Value: 1.5},
+ {Timestamp: 500000, Value: 1.5},
+ {Timestamp: 600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(400, 0), End: time.Unix(600, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(400, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ // Run 12: Case where we short circuit since the dataset is already entirely inside the crop range
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{},
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{},
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{},
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(100, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ // Run 13: Case where we short circuit since the dataset is empty
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{},
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(200, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{},
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(300, 0), End: time.Unix(300, 0)},
+ },
+ StepDuration: time.Duration(100) * time.Second,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(300, 0),
+ End: time.Unix(600, 0),
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.before.CropToRange(test.extent)
+ if !reflect.DeepEqual(test.before, test.after) {
+ t.Errorf("mismatch\nexpected=%v\ngot=%v", test.after, test.before)
+ }
+ })
+ }
+}
+
+const testStep = time.Duration(10) * time.Second
+
+func TestCropToSize(t *testing.T) {
+
+ now := time.Now().Truncate(testStep)
+ nowEpochMs := model.Time(now.Unix() * 1000)
+
+ tests := []struct {
+ before, after *MatrixEnvelope
+ size int
+ bft time.Time
+ extent timeseries.Extent
+ }{
+ // case 0: where we already have the number of timestamps we are cropping to
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1444004600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004600, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1444004600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004600, 0)},
+ },
+ StepDuration: testStep,
+ timestamps: map[time.Time]bool{time.Unix(1444004600, 0): true},
+ tslist: times.Times{time.Unix(1444004600, 0)},
+ isCounted: true,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1444004600, 0),
+ },
+ size: 1,
+ bft: now,
+ },
+
+ // case 1
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1444004600000, Value: 1.5},
+ {Timestamp: 1444004610000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004600, 0), End: time.Unix(1444004610, 0)},
+ },
+ StepDuration: testStep,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1444004610000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004610, 0), End: time.Unix(1444004610, 0)},
+ },
+ StepDuration: testStep,
+ timestamps: map[time.Time]bool{time.Unix(1444004610, 0): true},
+ tslist: times.Times{time.Unix(1444004610, 0)},
+ isCounted: true,
+ isSorted: true,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: time.Unix(1444004610, 0),
+ },
+ size: 1,
+ bft: now,
+ },
+
+ // case 2 - empty extent list
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{},
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: testStep,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{},
+ },
+ ExtentList: timeseries.ExtentList{},
+ StepDuration: testStep,
+ },
+ extent: timeseries.Extent{},
+ size: 1,
+ bft: now,
+ },
+
+ // case 3 - backfill tolerance
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1444004610000, Value: 1.5},
+ {Timestamp: nowEpochMs, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004610, 0), End: now},
+ },
+ StepDuration: testStep,
+ },
+ after: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1444004610000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1444004610, 0), End: now.Add(-5 * time.Minute)},
+ },
+ StepDuration: testStep,
+ timestamps: map[time.Time]bool{time.Unix(1444004610, 0): true},
+ tslist: times.Times{time.Unix(1444004610, 0)},
+ isCounted: true,
+ isSorted: false,
+ },
+ extent: timeseries.Extent{
+ Start: time.Unix(0, 0),
+ End: now,
+ },
+ size: 2,
+ bft: now.Add(-5 * time.Minute),
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.before.CropToSize(test.size, test.bft, test.extent)
+
+ for i := range test.before.ExtentList {
+ test.before.ExtentList[i].LastUsed = time.Time{}
+ }
+
+ if !reflect.DeepEqual(test.before, test.after) {
+ t.Errorf("mismatch\nexpected=%v\n got=%v", test.after, test.before)
+ }
+ })
+ }
+}
+
+func TestUpdateTimestamps(t *testing.T) {
+
+ // test edge condition here (core functionality is tested across this file)
+ me := MatrixEnvelope{isCounted: true}
+ me.updateTimestamps()
+ if me.timestamps != nil {
+ t.Errorf("expected nil map, got size %d", len(me.timestamps))
+ }
+
+}
+
+func TestClone(t *testing.T) {
+
+ tests := []struct {
+ before *MatrixEnvelope
+ }{
+ // Run 0
+ {
+ before: &MatrixEnvelope{
+ tslist: times.Times{time.Unix(1644001200, 0)},
+ timestamps: map[time.Time]bool{time.Unix(1644001200, 0): true},
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1644001200000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1644001200, 0), End: time.Unix(1644001200, 0)},
+ },
+ StepDuration: time.Duration(3600) * time.Second,
+ },
+ },
+
+ // Run 1
+ {
+ before: &MatrixEnvelope{
+ tslist: times.Times{time.Unix(1644001200, 0), time.Unix(1644004800, 0)},
+ timestamps: map[time.Time]bool{time.Unix(1644001200, 0): true, time.Unix(1644004800, 0): true},
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1644001200000, Value: 1.5},
+ },
+ },
+
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 1644001200000, Value: 1.5},
+ {Timestamp: 1644004800000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(1644001200, 0), End: time.Unix(1644004800, 0)},
+ },
+ StepDuration: time.Duration(3600) * time.Second,
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ after := test.before.Clone()
+ if !reflect.DeepEqual(test.before, after) {
+ t.Errorf("mismatch\nexpected %v\ngot %v", test.before, after)
+ }
+ })
+ }
+
+}
+
+func TestSort(t *testing.T) {
+ tests := []struct {
+ before, after *MatrixEnvelope
+ extent timeseries.Extent
+ }{
+ // Case where we trim nothing
+ {
+ before: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1544004200000, Value: 1.5},
+ {Timestamp: 1544004600000, Value: 1.5},
+ {Timestamp: 1544004800000, Value: 1.5},
+ {Timestamp: 1544004000000, Value: 1.5},
+ {Timestamp: 1544004000000, Value: 1.5}, // sort should also dupe kill
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 1544004600000, Value: 1.5},
+ {Timestamp: 1544004200000, Value: 1.5},
+ {Timestamp: 1544004000000, Value: 1.5},
+ {Timestamp: 1544004800000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 1544004800000, Value: 1.5},
+ {Timestamp: 1544004200000, Value: 1.5},
+ {Timestamp: 1544004000000, Value: 1.5},
+ {Timestamp: 1544004600000, Value: 1.5},
+ },
+ },
+ },
+ },
+ },
+ after: &MatrixEnvelope{
+ isSorted: true,
+ isCounted: true,
+ tslist: []time.Time{time.Unix(1544004000, 0), time.Unix(1544004200, 0), time.Unix(1544004600, 0), time.Unix(1544004800, 0)},
+ timestamps: map[time.Time]bool{time.Unix(1544004000, 0): true, time.Unix(1544004200, 0): true,
+ time.Unix(1544004600, 0): true, time.Unix(1544004800, 0): true},
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 1544004000000, Value: 1.5},
+ {Timestamp: 1544004200000, Value: 1.5},
+ {Timestamp: 1544004600000, Value: 1.5},
+ {Timestamp: 1544004800000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 1544004000000, Value: 1.5},
+ {Timestamp: 1544004200000, Value: 1.5},
+ {Timestamp: 1544004600000, Value: 1.5},
+ {Timestamp: 1544004800000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "c"},
+ Values: []model.SamplePair{
+ {Timestamp: 1544004000000, Value: 1.5},
+ {Timestamp: 1544004200000, Value: 1.5},
+ {Timestamp: 1544004600000, Value: 1.5},
+ {Timestamp: 1544004800000, Value: 1.5},
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ test.before.isSorted = false
+ test.before.Sort()
+ if !reflect.DeepEqual(test.before, test.after) {
+ t.Errorf("mismatch\nexpected=%v\nactual=%v", test.after, test.before)
+ }
+ // test isSorted short circuit
+ test.before.Sort()
+ if !reflect.DeepEqual(test.before, test.after) {
+ t.Errorf("mismatch\nexpected=%v\nactual=%v", test.after, test.before)
+ }
+ })
+ }
+}
+
+func TestSetExtents(t *testing.T) {
+ me := &MatrixEnvelope{}
+ ex := timeseries.ExtentList{timeseries.Extent{Start: time.Time{}, End: time.Time{}}}
+ me.SetExtents(ex)
+ if len(me.ExtentList) != 1 {
+ t.Errorf(`expected 1. got %d`, len(me.ExtentList))
+ }
+}
+
+func TestExtents(t *testing.T) {
+ me := &MatrixEnvelope{}
+ ex := timeseries.ExtentList{timeseries.Extent{Start: time.Time{}, End: time.Time{}}}
+ me.SetExtents(ex)
+ e := me.Extents()
+ if len(e) != 1 {
+ t.Errorf(`expected 1. got %d`, len(me.ExtentList))
+ }
+}
+
+func TestSeriesCount(t *testing.T) {
+ me := &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "d"},
+ Values: []model.SamplePair{
+ {Timestamp: 99000, Value: 1.5},
+ {Timestamp: 199000, Value: 1.5},
+ {Timestamp: 299000, Value: 1.5},
+ },
+ },
+ },
+ },
+ }
+ if me.SeriesCount() != 1 {
+ t.Errorf("expected 1 got %d.", me.SeriesCount())
+ }
+}
+
+func TestValueCount(t *testing.T) {
+ me := &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "d"},
+ Values: []model.SamplePair{
+ {Timestamp: 99000, Value: 1.5},
+ {Timestamp: 199000, Value: 1.5},
+ {Timestamp: 299000, Value: 1.5},
+ },
+ },
+ },
+ },
+ }
+ if me.ValueCount() != 3 {
+ t.Errorf("expected 3 got %d.", me.ValueCount())
+ }
+}
+
+func TestTimestampCount(t *testing.T) {
+
+ tests := []struct {
+ ts *MatrixEnvelope
+ expected int
+ }{
+ {
+ ts: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "d"},
+ Values: []model.SamplePair{
+ {Timestamp: 99000, Value: 1.5},
+ {Timestamp: 199000, Value: 1.5},
+ {Timestamp: 299000, Value: 1.5},
+ },
+ },
+ },
+ },
+ },
+ expected: 3,
+ },
+
+ {
+ ts: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "d"},
+ Values: []model.SamplePair{
+ {Timestamp: 99000, Value: 1.5},
+ {Timestamp: 199000, Value: 1.5},
+ },
+ },
+ },
+ },
+ },
+ expected: 2,
+ },
+
+ {
+ ts: &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "d"},
+ Values: []model.SamplePair{
+ {Timestamp: 99000, Value: 1.5},
+ {Timestamp: 199000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "e"},
+ Values: []model.SamplePair{
+ {Timestamp: 99000, Value: 1.5},
+ {Timestamp: 299000, Value: 1.5},
+ },
+ },
+ },
+ },
+ },
+ expected: 3,
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ tc := test.ts.TimestampCount()
+ if tc != test.expected {
+ t.Errorf("expected %d got %d.", test.expected, tc)
+ }
+ })
+ }
+}
+
+func TestSize(t *testing.T) {
+ m := &MatrixEnvelope{
+ Status: rvSuccess,
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 10000, Value: 1.5},
+ },
+ },
+ },
+ },
+ ExtentList: timeseries.ExtentList{
+ timeseries.Extent{Start: time.Unix(10, 0), End: time.Unix(10, 0)},
+ },
+ StepDuration: time.Duration(5) * time.Second,
+ }
+ i := m.Size()
+ expected := 17
+
+ if i != expected {
+ t.Errorf("expected %d got %d", expected, i)
+ }
+}
diff --git a/internal/proxy/origins/prometheus/model.go b/internal/proxy/origins/prometheus/model.go
new file mode 100644
index 000000000..cd0245b03
--- /dev/null
+++ b/internal/proxy/origins/prometheus/model.go
@@ -0,0 +1,97 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/Comcast/trickster/pkg/sort/times"
+
+ "github.com/Comcast/trickster/internal/timeseries"
+ "github.com/prometheus/common/model"
+)
+
+// VectorEnvelope represents a Vector response object from the Prometheus HTTP API
+type VectorEnvelope struct {
+ Status string `json:"status"`
+ Data VectorData `json:"data"`
+}
+
+// VectorData represents the Data body of a Vector response object from the Prometheus HTTP API
+type VectorData struct {
+ ResultType string `json:"resultType"`
+ Result model.Vector `json:"result"`
+}
+
+// MatrixEnvelope represents a Matrix response object from the Prometheus HTTP API
+type MatrixEnvelope struct {
+ Status string `json:"status"`
+ Data MatrixData `json:"data"`
+ ExtentList timeseries.ExtentList `json:"extents,omitempty"`
+ StepDuration time.Duration `json:"step,omitempty"`
+
+ timestamps map[time.Time]bool // tracks unique timestamps in the matrix data
+ tslist times.Times
+ isSorted bool // tracks if the matrix data is currently sorted
+ isCounted bool // tracks if timestamps slice is up-to-date
+}
+
+// MatrixData represents the Data body of a Matrix response object from the Prometheus HTTP API
+type MatrixData struct {
+ ResultType string `json:"resultType"`
+ Result model.Matrix `json:"result"`
+}
+
+// MarshalTimeseries converts a Timeseries into a JSON blob
+func (c *Client) MarshalTimeseries(ts timeseries.Timeseries) ([]byte, error) {
+ // Marshal the Envelope back to a json object for Cache Storage
+ return json.Marshal(ts)
+}
+
+// UnmarshalTimeseries converts a JSON blob into a Timeseries
+func (c *Client) UnmarshalTimeseries(data []byte) (timeseries.Timeseries, error) {
+ me := &MatrixEnvelope{}
+ err := json.Unmarshal(data, &me)
+ return me, err
+}
+
+// UnmarshalInstantaneous converts a JSON blob into an Instantaneous Data Point
+func (c *Client) UnmarshalInstantaneous(data []byte) (timeseries.Timeseries, error) {
+ ve := &VectorEnvelope{}
+ err := json.Unmarshal(data, &ve)
+ if err != nil {
+ return nil, err
+ }
+ return ve.ToMatrix(), nil
+}
+
+// ToMatrix converts a VectorEnvelope to a MatrixEnvelope
+func (ve *VectorEnvelope) ToMatrix() *MatrixEnvelope {
+ me := &MatrixEnvelope{}
+ me.Status = ve.Status
+ me.Data = MatrixData{
+ ResultType: "matrix",
+ Result: make(model.Matrix, 0, len(ve.Data.Result)),
+ }
+
+ var ts time.Time
+ for _, v := range ve.Data.Result {
+ v.Timestamp = model.TimeFromUnix(v.Timestamp.Unix()) // Round to nearest Second
+ ts = v.Timestamp.Time()
+ me.Data.Result = append(me.Data.Result, &model.SampleStream{Metric: v.Metric, Values: []model.SamplePair{{Timestamp: v.Timestamp, Value: v.Value}}})
+ }
+ me.ExtentList = timeseries.ExtentList{timeseries.Extent{Start: ts, End: ts}}
+ return me
+}
diff --git a/internal/proxy/origins/prometheus/model_test.go b/internal/proxy/origins/prometheus/model_test.go
new file mode 100644
index 000000000..a94a16658
--- /dev/null
+++ b/internal/proxy/origins/prometheus/model_test.go
@@ -0,0 +1,130 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "testing"
+
+ "github.com/prometheus/common/model"
+)
+
+func TestMarshalTimeseries(t *testing.T) {
+
+ me := &MatrixEnvelope{
+ Data: MatrixData{
+ ResultType: "matrix",
+ Result: model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "a"},
+ Values: []model.SamplePair{
+ {Timestamp: 99000, Value: 1.5},
+ {Timestamp: 199000, Value: 1.5},
+ {Timestamp: 299000, Value: 1.5},
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{"__name__": "b"},
+ Values: []model.SamplePair{
+ {Timestamp: 99000, Value: 1.5},
+ {Timestamp: 199000, Value: 1.5},
+ {Timestamp: 299000, Value: 1.5},
+ },
+ },
+ },
+ },
+ }
+
+ expected := `{"status":"","data":{"resultType":"matrix","result":[{"metric":{"__name__":"a"},"values":[[99,"1.5"],[199,"1.5"],[299,"1.5"]]},{"metric":{"__name__":"b"},"values":[[99,"1.5"],[199,"1.5"],[299,"1.5"]]}]}}`
+ client := &Client{}
+ bytes, err := client.MarshalTimeseries(me)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ if string(bytes) != expected {
+ t.Errorf("expected [%s] got [%s]", expected, string(bytes))
+ }
+
+}
+
+func TestUnmarshalTimeseries(t *testing.T) {
+
+ bytes := []byte(`{"status":"","data":{"resultType":"matrix","result":[{"metric":{"__name__":"a"},"values":[[99,"1.5"],[199,"1.5"],[299,"1.5"]]},{"metric":{"__name__":"b"},"values":[[99,"1.5"],[199,"1.5"],[299,"1.5"]]}]}}`)
+ client := &Client{}
+ ts, err := client.UnmarshalTimeseries(bytes)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ me := ts.(*MatrixEnvelope)
+
+ if len(me.Data.Result) != 2 {
+ t.Errorf(`expected 2. got %d`, len(me.Data.Result))
+ return
+ }
+
+ if len(me.Data.Result[0].Values) != 3 {
+ t.Errorf(`expected 3. got %d`, len(me.Data.Result[0].Values))
+ return
+ }
+
+ if len(me.Data.Result[1].Values) != 3 {
+ t.Errorf(`expected 3. got %d`, len(me.Data.Result[1].Values))
+ return
+ }
+
+}
+
+func TestUnmarshalInstantaneous(t *testing.T) {
+
+ bytes := []byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","instance":"localhost:9090","job":"prometheus"},"value":[1554730772.113,"1"]}]}}`)
+ client := &Client{}
+ ts, err := client.UnmarshalInstantaneous(bytes)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ me := ts.(*MatrixEnvelope)
+
+ if len(me.Data.Result) != 1 {
+ t.Errorf(`expected 1. got %d`, len(me.Data.Result))
+ return
+ }
+
+ if len(me.Data.Result[0].Values) != 1 {
+ t.Errorf(`expected 3. got %d`, len(me.Data.Result[0].Values))
+ return
+ }
+
+ if me.Data.Result[0].Values[0].Value != 1 {
+ t.Errorf(`expected 1. got %d`, len(me.Data.Result[0].Values))
+ return
+ }
+
+}
+
+func TestUnmarshalInstantaneousFails(t *testing.T) {
+
+ bytes := []byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","instance":"localhost:9090","job":"prometheus"},"value":[1554730772.113,"1"]}]}`)
+ client := &Client{}
+ _, err := client.UnmarshalInstantaneous(bytes)
+ if err == nil {
+ t.Errorf("expected error: 'unexpected end of JSON input'")
+ return
+ }
+
+}
diff --git a/internal/proxy/origins/prometheus/prometheus.go b/internal/proxy/origins/prometheus/prometheus.go
new file mode 100644
index 000000000..0e18923d3
--- /dev/null
+++ b/internal/proxy/origins/prometheus/prometheus.go
@@ -0,0 +1,176 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ tt "github.com/Comcast/trickster/internal/proxy/timeconv"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// Prometheus API
+const (
+ APIPath = "/api/v1/"
+ mnQueryRange = "query_range"
+ mnQuery = "query"
+ mnLabels = "labels"
+ mnLabel = "label"
+ mnSeries = "series"
+ mnTargets = "targets"
+ mnTargetsMeta = "targets/metadata"
+ mnRules = "rules"
+ mnAlerts = "alerts"
+ mnAlertManagers = "alertmanagers"
+ mnStatus = "status"
+)
+
+// Common URL Parameter Names
+const (
+ upQuery = "query"
+ upStart = "start"
+ upEnd = "end"
+ upStep = "step"
+ upTime = "time"
+ upMatch = "match[]"
+)
+
+// Client Implements Proxy Client Interface
+type Client struct {
+ name string
+ config *config.OriginConfig
+ cache cache.Cache
+ webClient *http.Client
+ handlers map[string]http.Handler
+ handlersRegistered bool
+
+ healthURL *url.URL
+ healthHeaders http.Header
+ healthMethod string
+}
+
+// NewClient returns a new Client Instance
+func NewClient(name string, oc *config.OriginConfig, cache cache.Cache) (*Client, error) {
+ c, err := proxy.NewHTTPClient(oc)
+ return &Client{name: name, config: oc, cache: cache, webClient: c}, err
+}
+
+// SetCache sets the Cache object the client will use for caching origin content
+func (c *Client) SetCache(cc cache.Cache) {
+ c.cache = cc
+}
+
+// Configuration returns the upstream Configuration for this Client
+func (c *Client) Configuration() *config.OriginConfig {
+ return c.config
+}
+
+// HTTPClient returns the HTTP Client for this origin
+func (c *Client) HTTPClient() *http.Client {
+ return c.webClient
+}
+
+// Name returns the name of the upstream Configuration proxied by the Client
+func (c *Client) Name() string {
+ return c.name
+}
+
+// Cache returns and handle to the Cache instance used by the Client
+func (c *Client) Cache() cache.Cache {
+ return c.cache
+}
+
+// parseTime converts a query time URL parameter to time.Time.
+// Copied from https://github.com/prometheus/prometheus/blob/master/web/api/v1/api.go
+func parseTime(s string) (time.Time, error) {
+ if t, err := strconv.ParseFloat(s, 64); err == nil {
+ s, ns := math.Modf(t)
+ ns = math.Round(ns*1000) / 1000
+ return time.Unix(int64(s), int64(ns*float64(time.Second))), nil
+ }
+ if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
+ return t, nil
+ }
+ return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
+}
+
+// parseDuration parses prometheus step parameters, which can be float64 or durations like 1d, 5m, etc
+// the proxy.ParseDuration handles the second kind, and the float64's are handled here
+func parseDuration(input string) (time.Duration, error) {
+ v, err := strconv.ParseFloat(input, 64)
+ if err != nil {
+ return tt.ParseDuration(input)
+ }
+ // assume v is in seconds
+ return time.Duration(int64(v)) * time.Second, nil
+}
+
+// ParseTimeRangeQuery parses the key parts of a TimeRangeQuery from the inbound HTTP Request
+func (c *Client) ParseTimeRangeQuery(r *http.Request) (*timeseries.TimeRangeQuery, error) {
+
+ trq := ×eries.TimeRangeQuery{Extent: timeseries.Extent{}}
+ qp := r.URL.Query()
+
+ trq.Statement = qp.Get(upQuery)
+ if trq.Statement == "" {
+ return nil, errors.MissingURLParam(upQuery)
+ }
+
+ if p := qp.Get(upStart); p != "" {
+ t, err := parseTime(p)
+ if err != nil {
+ return nil, err
+ }
+ trq.Extent.Start = t
+ } else {
+ return nil, errors.MissingURLParam(upStart)
+ }
+
+ if p := qp.Get(upEnd); p != "" {
+ t, err := parseTime(p)
+ if err != nil {
+ return nil, err
+ }
+ trq.Extent.End = t
+ } else {
+ return nil, errors.MissingURLParam(upEnd)
+ }
+
+ if p := qp.Get(upStep); p != "" {
+ step, err := parseDuration(p)
+ if err != nil {
+ return nil, err
+ }
+ trq.Step = step
+ } else {
+ return nil, errors.MissingURLParam(upStep)
+ }
+
+ if strings.Contains(trq.Statement, " offset ") {
+ trq.IsOffset = true
+ trq.FastForwardDisable = true
+ }
+
+ return trq, nil
+}
diff --git a/internal/proxy/origins/prometheus/prometheus_test.go b/internal/proxy/origins/prometheus/prometheus_test.go
new file mode 100644
index 000000000..b28a7fbcf
--- /dev/null
+++ b/internal/proxy/origins/prometheus/prometheus_test.go
@@ -0,0 +1,393 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "testing"
+ "time"
+
+ cr "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/errors"
+ "github.com/Comcast/trickster/internal/proxy/origins"
+)
+
+func TestPrometheusClientInterfacing(t *testing.T) {
+
+ // this test ensures the client will properly conform to the
+ // Client and TimeseriesClient interfaces
+
+ c := &Client{name: "test"}
+ var oc origins.Client = c
+ var tc origins.TimeseriesClient = c
+
+ if oc.Name() != "test" {
+ t.Errorf("expected %s got %s", "test", oc.Name())
+ }
+
+ if tc.Name() != "test" {
+ t.Errorf("expected %s got %s", "test", tc.Name())
+ }
+}
+
+func TestNewClient(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+
+ oc := &config.OriginConfig{OriginType: "TEST_CLIENT"}
+ c, err := NewClient("default", oc, cache)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if c.Name() != "default" {
+ t.Errorf("expected %s got %s", "default", c.Name())
+ }
+
+ if c.Cache().Configuration().CacheType != "memory" {
+ t.Errorf("expected %s got %s", "memory", c.Cache().Configuration().CacheType)
+ }
+
+ if c.Configuration().OriginType != "TEST_CLIENT" {
+ t.Errorf("expected %s got %s", "TEST_CLIENT", c.Configuration().OriginType)
+ }
+}
+
+func TestParseTime(t *testing.T) {
+ fixtures := []struct {
+ input string
+ output string
+ }{
+ {"2018-04-07T05:08:53.200Z", "2018-04-07 05:08:53.2 +0000 UTC"},
+ {"1523077733", "2018-04-07 05:08:53 +0000 UTC"},
+ {"1523077733.2", "2018-04-07 05:08:53.2 +0000 UTC"},
+ }
+
+ for _, f := range fixtures {
+ out, err := parseTime(f.input)
+ if err != nil {
+ t.Error(err)
+ }
+
+ outStr := out.UTC().String()
+ if outStr != f.output {
+ t.Errorf("Expected %s, got %s for input %s", f.output, outStr, f.input)
+ }
+ }
+}
+
+func TestParseTimeFails(t *testing.T) {
+ _, err := parseTime("a")
+ if err == nil {
+ t.Errorf(`expected error 'cannot parse "a" to a valid timestamp'`)
+ }
+}
+
+func TestConfiguration(t *testing.T) {
+ oc := &config.OriginConfig{OriginType: "TEST"}
+ client := Client{config: oc}
+ c := client.Configuration()
+ if c.OriginType != "TEST" {
+ t.Errorf("expected %s got %s", "TEST", c.OriginType)
+ }
+}
+
+func TestHTTPClient(t *testing.T) {
+ oc := &config.OriginConfig{OriginType: "TEST"}
+
+ client, err := NewClient("test", oc, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if client.HTTPClient() == nil {
+ t.Errorf("missing http client")
+ }
+}
+
+func TestCache(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://1", "-origin-type", "test"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ t.Error(err)
+ }
+ client := Client{cache: cache}
+ c := client.Cache()
+
+ if c.Configuration().CacheType != "memory" {
+ t.Errorf("expected %s got %s", "memory", c.Configuration().CacheType)
+ }
+}
+
+func TestName(t *testing.T) {
+
+ client := Client{name: "TEST"}
+ c := client.Name()
+
+ if c != "TEST" {
+ t.Errorf("expected %s got %s", "TEST", c)
+ }
+
+}
+
+func TestParseTimeRangeQuery(t *testing.T) {
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "query": {`up`},
+ "start": {strconv.Itoa(int(time.Now().Add(time.Duration(-6) * time.Hour).Unix()))},
+ "end": {strconv.Itoa(int(time.Now().Unix()))},
+ "step": {"15"},
+ }).Encode(),
+ }}
+ client := &Client{}
+ res, err := client.ParseTimeRangeQuery(req)
+ if err != nil {
+ t.Error(err)
+ } else {
+ if int(res.Step.Seconds()) != 15 {
+ t.Errorf("expected 15 got %d", int(res.Step.Seconds()))
+ }
+
+ if int(res.Extent.End.Sub(res.Extent.Start).Hours()) != 6 {
+ t.Errorf("expected 6 got %d", int(res.Extent.End.Sub(res.Extent.Start).Hours()))
+ }
+ }
+}
+
+func TestParseTimeRangeQueryMissingQuery(t *testing.T) {
+ expected := errors.MissingURLParam(upQuery).Error()
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "query_": {`up`},
+ "start": {strconv.Itoa(int(time.Now().Add(time.Duration(-6) * time.Hour).Unix()))},
+ "end": {strconv.Itoa(int(time.Now().Unix()))},
+ "step": {"15"}}).Encode(),
+ }}
+ client := &Client{}
+ _, err := client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf(`expected "%s", got NO ERROR`, expected)
+ return
+ }
+ if err.Error() != expected {
+ t.Errorf(`expected "%s", got "%s"`, expected, err.Error())
+ }
+}
+
+func TestParseTimeRangeBadStartTime(t *testing.T) {
+ const color = "red"
+ expected := fmt.Errorf(`cannot parse "%s" to a valid timestamp`, color)
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "query": {`up`},
+ "start": {color},
+ "end": {strconv.Itoa(int(time.Now().Unix()))},
+ "step": {"15"}}).Encode(),
+ }}
+ client := &Client{}
+ _, err := client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf(`expected "%s", got NO ERROR`, expected)
+ return
+ }
+ if err.Error() != expected.Error() {
+ t.Errorf(`expected "%s", got "%s"`, expected, err.Error())
+ }
+}
+
+func TestParseTimeRangeBadEndTime(t *testing.T) {
+ const color = "blue"
+ expected := fmt.Errorf(`cannot parse "%s" to a valid timestamp`, color)
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "query": {`up`},
+ "start": {strconv.Itoa(int(time.Now().Add(time.Duration(-6) * time.Hour).Unix()))},
+ "end": {color},
+ "step": {"15"}}).Encode(),
+ }}
+ client := &Client{}
+ _, err := client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf(`expected "%s", got NO ERROR`, expected)
+ return
+ }
+ if err.Error() != expected.Error() {
+ t.Errorf(`expected "%s", got "%s"`, expected, err.Error())
+ }
+}
+
+func TestParseTimeRangeQueryBadDuration(t *testing.T) {
+
+ expected := `unable to parse duration: x`
+
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "query": {`up`},
+ "start": {strconv.Itoa(int(time.Now().Add(time.Duration(-6) * time.Hour).Unix()))},
+ "end": {strconv.Itoa(int(time.Now().Unix()))},
+ "step": {"x"}}).Encode(),
+ }}
+ client := &Client{}
+ _, err := client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf(`expected "%s", got NO ERROR`, expected)
+ return
+ }
+ if err.Error() != expected {
+ t.Errorf(`expected "%s", got "%s"`, expected, err.Error())
+ }
+}
+
+func TestParseTimeRangeQueryNoStart(t *testing.T) {
+
+ expected := `missing URL parameter: [start]`
+
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "query": {`up`},
+ "end": {strconv.Itoa(int(time.Now().Unix()))},
+ "step": {"x"}}).Encode(),
+ }}
+ client := &Client{}
+ _, err := client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf(`expected "%s", got NO ERROR`, expected)
+ return
+ }
+ if err.Error() != expected {
+ t.Errorf(`expected "%s", got "%s"`, expected, err.Error())
+ }
+}
+
+func TestParseTimeRangeQueryNoEnd(t *testing.T) {
+
+ expected := `missing URL parameter: [end]`
+
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "query": {`up`},
+ "start": {strconv.Itoa(int(time.Now().Add(time.Duration(-6) * time.Hour).Unix()))},
+ "step": {"x"}}).Encode(),
+ }}
+ client := &Client{}
+ _, err := client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf(`expected "%s", got NO ERROR`, expected)
+ return
+ }
+ if err.Error() != expected {
+ t.Errorf(`expected "%s", got "%s"`, expected, err.Error())
+ }
+}
+
+func TestParseTimeRangeQueryNoStep(t *testing.T) {
+
+ expected := `missing URL parameter: [step]`
+
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "query": {`up`},
+ "start": {strconv.Itoa(int(time.Now().Add(time.Duration(-6) * time.Hour).Unix()))},
+ "end": {strconv.Itoa(int(time.Now().Unix()))}},
+ ).Encode(),
+ }}
+ client := &Client{}
+ _, err := client.ParseTimeRangeQuery(req)
+ if err == nil {
+ t.Errorf(`expected "%s", got NO ERROR`, expected)
+ return
+ }
+ if err.Error() != expected {
+ t.Errorf(`expected "%s", got "%s"`, expected, err.Error())
+ }
+}
+
+func TestParseTimeRangeQueryWithOffset(t *testing.T) {
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: "blah.com",
+ Path: "/",
+ RawQuery: url.Values(map[string][]string{
+ "query": {`up and has offset `},
+ "start": {strconv.Itoa(int(time.Now().Add(time.Duration(-6) * time.Hour).Unix()))},
+ "end": {strconv.Itoa(int(time.Now().Unix()))},
+ "step": {"15"},
+ }).Encode(),
+ }}
+ client := &Client{}
+ res, err := client.ParseTimeRangeQuery(req)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ if !res.IsOffset {
+ t.Errorf("expected true got %t", res.IsOffset)
+ }
+
+}
+
+func TestSetCache(t *testing.T) {
+ c, err := NewClient("test", config.NewOriginConfig(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+ c.SetCache(nil)
+ if c.Cache() != nil {
+ t.Errorf("expected nil cache for client named %s", "test")
+ }
+}
diff --git a/internal/proxy/origins/prometheus/routes.go b/internal/proxy/origins/prometheus/routes.go
new file mode 100644
index 000000000..f0a79669c
--- /dev/null
+++ b/internal/proxy/origins/prometheus/routes.go
@@ -0,0 +1,225 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+func (c *Client) registerHandlers() {
+ c.handlersRegistered = true
+ c.handlers = make(map[string]http.Handler)
+ // This is the registry of handlers that Trickster supports for Prometheus,
+ // and are able to be referenced by name (map key) in Config Files
+ c.handlers["health"] = http.HandlerFunc(c.HealthHandler)
+ c.handlers["query_range"] = http.HandlerFunc(c.QueryRangeHandler)
+ c.handlers["query"] = http.HandlerFunc(c.QueryHandler)
+ c.handlers["series"] = http.HandlerFunc(c.SeriesHandler)
+ c.handlers["proxycache"] = http.HandlerFunc(c.ObjectProxyCacheHandler)
+ c.handlers["proxy"] = http.HandlerFunc(c.ProxyHandler)
+}
+
+// Handlers returns a map of the HTTP Handlers the client has registered
+func (c *Client) Handlers() map[string]http.Handler {
+ if !c.handlersRegistered {
+ c.registerHandlers()
+ }
+ return c.handlers
+}
+
+func populateHeathCheckRequestValues(oc *config.OriginConfig) {
+ if oc.HealthCheckUpstreamPath == "-" {
+ oc.HealthCheckUpstreamPath = APIPath + mnQuery
+ }
+ if oc.HealthCheckVerb == "-" {
+ oc.HealthCheckVerb = http.MethodGet
+ }
+ if oc.HealthCheckQuery == "-" {
+ oc.HealthCheckQuery = "query=up"
+ }
+}
+
+// DefaultPathConfigs returns the default PathConfigs for the given OriginType
+func (c *Client) DefaultPathConfigs(oc *config.OriginConfig) map[string]*config.PathConfig {
+
+ populateHeathCheckRequestValues(oc)
+
+ var rhts map[string]string
+ if oc != nil {
+ rhts = map[string]string{headers.NameCacheControl: fmt.Sprintf("%s=%d", headers.ValueSharedMaxAge, oc.TimeseriesTTLSecs)}
+ }
+ rhinst := map[string]string{headers.NameCacheControl: fmt.Sprintf("%s=%d", headers.ValueSharedMaxAge, 30)}
+
+ paths := map[string]*config.PathConfig{
+
+ APIPath + mnQueryRange: {
+ Path: APIPath + mnQueryRange,
+ HandlerName: mnQueryRange,
+ Methods: []string{http.MethodGet, http.MethodPost},
+ CacheKeyParams: []string{upQuery, upStep},
+ CacheKeyHeaders: []string{},
+ ResponseHeaders: rhts,
+ OriginConfig: oc,
+ MatchTypeName: "exact",
+ MatchType: config.PathMatchTypeExact,
+ },
+
+ APIPath + mnQuery: {
+ Path: APIPath + mnQuery,
+ HandlerName: mnQuery,
+ Methods: []string{http.MethodGet, http.MethodPost},
+ CacheKeyParams: []string{upQuery, upTime},
+ CacheKeyHeaders: []string{},
+ ResponseHeaders: rhinst,
+ OriginConfig: oc,
+ MatchTypeName: "exact",
+ MatchType: config.PathMatchTypeExact,
+ },
+
+ APIPath + mnSeries: {
+ Path: APIPath + mnSeries,
+ HandlerName: mnSeries,
+ Methods: []string{http.MethodGet, http.MethodPost},
+ CacheKeyParams: []string{upMatch, upStart, upEnd},
+ CacheKeyHeaders: []string{},
+ ResponseHeaders: rhinst,
+ OriginConfig: oc,
+ MatchTypeName: "exact",
+ MatchType: config.PathMatchTypeExact,
+ },
+
+ APIPath + mnLabels: {
+ Path: APIPath + mnLabels,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet, http.MethodPost},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{},
+ ResponseHeaders: rhinst,
+ OriginConfig: oc,
+ MatchTypeName: "exact",
+ MatchType: config.PathMatchTypeExact,
+ },
+
+ APIPath + mnLabel + "/": {
+ Path: APIPath + mnLabel + "/",
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{},
+ MatchTypeName: "prefix",
+ MatchType: config.PathMatchTypePrefix,
+ ResponseHeaders: rhinst,
+ OriginConfig: oc,
+ },
+
+ APIPath + mnTargets: {
+ Path: APIPath + mnTargets,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{},
+ ResponseHeaders: rhinst,
+ OriginConfig: oc,
+ MatchTypeName: "exact",
+ MatchType: config.PathMatchTypeExact,
+ },
+
+ APIPath + mnTargetsMeta: {
+ Path: APIPath + mnTargetsMeta,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{"match_target", "metric", "limit"},
+ CacheKeyHeaders: []string{},
+ ResponseHeaders: rhinst,
+ OriginConfig: oc,
+ MatchTypeName: "exact",
+ MatchType: config.PathMatchTypeExact,
+ },
+
+ APIPath + mnRules: {
+ Path: APIPath + mnRules,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{},
+ ResponseHeaders: rhinst,
+ OriginConfig: oc,
+ MatchTypeName: "exact",
+ MatchType: config.PathMatchTypeExact,
+ },
+
+ APIPath + mnAlerts: {
+ Path: APIPath + mnAlerts,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{},
+ ResponseHeaders: rhinst,
+ OriginConfig: oc,
+ MatchTypeName: "exact",
+ MatchType: config.PathMatchTypeExact,
+ },
+
+ APIPath + mnAlertManagers: {
+ Path: APIPath + mnAlertManagers,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{},
+ ResponseHeaders: rhinst,
+ OriginConfig: oc,
+ MatchTypeName: "exact",
+ MatchType: config.PathMatchTypeExact,
+ },
+
+ APIPath + mnStatus: {
+ Path: APIPath + mnStatus,
+ HandlerName: "proxycache",
+ Methods: []string{http.MethodGet},
+ CacheKeyParams: []string{},
+ CacheKeyHeaders: []string{},
+ MatchTypeName: "prefix",
+ MatchType: config.PathMatchTypePrefix,
+ ResponseHeaders: rhinst,
+ OriginConfig: oc,
+ },
+
+ APIPath: {
+ Path: APIPath,
+ HandlerName: "proxy",
+ Methods: []string{http.MethodGet, http.MethodPost},
+ OriginConfig: oc,
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+
+ "/": {
+ Path: "/",
+ HandlerName: "proxy",
+ Methods: []string{http.MethodGet, http.MethodPost},
+ OriginConfig: oc,
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+ }
+
+ oc.FastForwardPath = paths[APIPath+mnQuery].Clone()
+
+ return paths
+
+}
diff --git a/internal/proxy/origins/prometheus/routes_test.go b/internal/proxy/origins/prometheus/routes_test.go
new file mode 100644
index 000000000..eca0a7c34
--- /dev/null
+++ b/internal/proxy/origins/prometheus/routes_test.go
@@ -0,0 +1,63 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestRegisterHandlers(t *testing.T) {
+ c := &Client{}
+ c.registerHandlers()
+ if _, ok := c.handlers[mnQueryRange]; !ok {
+ t.Errorf("expected to find handler named: %s", mnQueryRange)
+ }
+}
+
+func TestHandlers(t *testing.T) {
+ c := &Client{}
+ m := c.Handlers()
+ if _, ok := m[mnQueryRange]; !ok {
+ t.Errorf("expected to find handler named: %s", mnQueryRange)
+ }
+}
+
+func TestDefaultPathConfigs(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, _, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "prometheus", "/health", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ dpc := client.DefaultPathConfigs(client.config)
+
+ if _, ok := dpc["/"]; !ok {
+ t.Errorf("expected to find path named: %s", "/")
+ }
+
+ const expectedLen = 13
+ if len(dpc) != expectedLen {
+ t.Errorf("expected ordered length to be: %d got %d", expectedLen, len(dpc))
+ }
+
+}
diff --git a/internal/proxy/origins/prometheus/url.go b/internal/proxy/origins/prometheus/url.go
new file mode 100644
index 000000000..2b0bcfd82
--- /dev/null
+++ b/internal/proxy/origins/prometheus/url.go
@@ -0,0 +1,75 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/proxy/urls"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// BaseURL returns a URL in the form of scheme://host/path based on the proxy configuration
+func (c *Client) BaseURL() *url.URL {
+ u := &url.URL{}
+ u.Scheme = c.config.Scheme
+ u.Host = c.config.Host
+ u.Path = c.config.PathPrefix
+ return u
+}
+
+// BuildUpstreamURL will merge the downstream request with the BaseURL to construct the full upstream URL
+func (c *Client) BuildUpstreamURL(r *http.Request) *url.URL {
+ u := c.BaseURL()
+
+ if strings.HasPrefix(r.URL.Path, "/"+c.name+"/") {
+ u.Path += strings.Replace(r.URL.Path, "/"+c.name+"/", "/", 1)
+ } else {
+ u.Path += r.URL.Path
+ }
+
+ u.RawQuery = r.URL.RawQuery
+ u.Fragment = r.URL.Fragment
+ u.User = r.URL.User
+ return u
+}
+
+// SetExtent will change the upstream request query to use the provided Extent
+func (c *Client) SetExtent(r *http.Request, trq *timeseries.TimeRangeQuery, extent *timeseries.Extent) {
+ params := r.URL.Query()
+ params.Set(upStart, strconv.FormatInt(extent.Start.Unix(), 10))
+ params.Set(upEnd, strconv.FormatInt(extent.End.Unix(), 10))
+ r.URL.RawQuery = params.Encode()
+}
+
+// FastForwardURL returns the url to fetch the Fast Forward value based on a timerange url
+func (c *Client) FastForwardURL(r *http.Request) (*url.URL, error) {
+
+ u := urls.Clone(r.URL)
+
+ if strings.HasSuffix(u.Path, "/query_range") {
+ u.Path = u.Path[0 : len(u.Path)-6]
+ }
+
+ p := u.Query()
+ p.Del(upStart)
+ p.Del(upEnd)
+ p.Del(upStep)
+ u.RawQuery = p.Encode()
+
+ return u, nil
+}
diff --git a/internal/proxy/origins/prometheus/url_test.go b/internal/proxy/origins/prometheus/url_test.go
new file mode 100644
index 000000000..145aab462
--- /dev/null
+++ b/internal/proxy/origins/prometheus/url_test.go
@@ -0,0 +1,96 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package prometheus
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+func TestSetExtent(t *testing.T) {
+
+ start := time.Now().Add(time.Duration(-6) * time.Hour)
+ end := time.Now()
+
+ startSecs := fmt.Sprintf("%d", start.Unix())
+ endSecs := fmt.Sprintf("%d", end.Unix())
+
+ expected := "end=" + endSecs + "&q=up&start=" + startSecs
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "none:9090", "-origin-type", "prometheus", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ client := Client{config: oc}
+
+ u := &url.URL{RawQuery: "q=up"}
+ r, _ := http.NewRequest(http.MethodGet, u.String(), nil)
+ e := ×eries.Extent{Start: start, End: end}
+ client.SetExtent(r, nil, e)
+
+ if expected != r.URL.RawQuery {
+ t.Errorf("\nexpected [%s]\ngot [%s]", expected, r.URL.RawQuery)
+ }
+}
+
+func TestFastForwardURL(t *testing.T) {
+
+ expected := "q=up"
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "none:9090", "-origin-type", "prometheus", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ client := Client{config: oc}
+
+ u := &url.URL{Path: "/query_range", RawQuery: "q=up&start=1&end=1&step=1"}
+ r, _ := http.NewRequest(http.MethodGet, u.String(), nil)
+
+ u2, err := client.FastForwardURL(r)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if expected != u2.RawQuery {
+ t.Errorf("\nexpected [%s]\ngot [%s]", expected, u2.RawQuery)
+ }
+
+}
+
+func TestBuildUpstreamURL(t *testing.T) {
+
+ cfg := config.NewConfig()
+ oc := cfg.Origins["default"]
+ oc.Scheme = "http"
+ oc.Host = "0"
+ oc.PathPrefix = ""
+
+ client := &Client{name: "default", config: oc}
+ r, err := http.NewRequest(http.MethodGet, "http://0/default/query_range", nil)
+ if err != nil {
+ t.Error(err)
+ }
+ client.BuildUpstreamURL(r)
+
+}
diff --git a/internal/proxy/origins/reverseproxycache/handler_health.go b/internal/proxy/origins/reverseproxycache/handler_health.go
new file mode 100644
index 000000000..9fe4229f7
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/handler_health.go
@@ -0,0 +1,63 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+// HealthHandler checks the health of the Configured Upstream Origin
+func (c *Client) HealthHandler(w http.ResponseWriter, r *http.Request) {
+
+ if c.healthURL == nil {
+ c.populateHeathCheckRequestValues()
+ }
+
+ if c.healthMethod == "-" {
+ w.WriteHeader(400)
+ w.Write([]byte("Health Check URL not Configured for origin: " + c.config.Name))
+ return
+ }
+
+ req, _ := http.NewRequest(c.healthMethod, c.healthURL.String(), nil)
+ req = req.WithContext(r.Context())
+
+ req.Header = c.healthHeaders
+ engines.DoProxy(w, req)
+}
+
+func (c *Client) populateHeathCheckRequestValues() {
+
+ oc := c.config
+
+ if oc.HealthCheckUpstreamPath == "-" {
+ oc.HealthCheckUpstreamPath = "/"
+ }
+ if oc.HealthCheckVerb == "-" {
+ oc.HealthCheckVerb = http.MethodGet
+ }
+
+ c.healthURL = c.BaseURL()
+ c.healthURL.Path += oc.HealthCheckUpstreamPath
+ c.healthURL.RawQuery = oc.HealthCheckQuery
+ c.healthMethod = oc.HealthCheckVerb
+
+ if oc.HealthCheckHeaders != nil {
+ c.healthHeaders = http.Header{}
+ headers.UpdateHeaders(c.healthHeaders, oc.HealthCheckHeaders)
+ }
+}
diff --git a/internal/proxy/origins/reverseproxycache/handler_health_test.go b/internal/proxy/origins/reverseproxycache/handler_health_test.go
new file mode 100644
index 000000000..90b561b8f
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/handler_health_test.go
@@ -0,0 +1,95 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "io/ioutil"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/util/metrics"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func init() {
+ metrics.Init()
+}
+
+func TestHealthHandler(t *testing.T) {
+
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "rpc", "/health", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ client.healthURL = &url.URL{}
+ client.healthMethod = "-"
+ client.HealthHandler(w, r)
+ resp := w.Result()
+ if resp.StatusCode != 400 {
+ t.Errorf("Expected status: 400 got %d.", resp.StatusCode)
+ }
+
+ client.healthURL = nil
+ client.HealthHandler(w, r)
+ w = httptest.NewRecorder()
+ resp = w.Result()
+ if resp.StatusCode != 200 {
+ t.Errorf("Expected status: 200 got %d.", resp.StatusCode)
+ }
+
+}
+
+func TestHealthHandlerCustomPath(t *testing.T) {
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("../../../../testdata/test.custom_health.conf", client.DefaultPathConfigs, 200, "{}", nil, "rpc", "/health", "debug")
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+
+ client.HealthHandler(w, r)
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d.", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != "{}" {
+ t.Errorf("expected '{}' got %s.", bodyBytes)
+ }
+
+}
diff --git a/internal/proxy/origins/reverseproxycache/handler_proxy.go b/internal/proxy/origins/reverseproxycache/handler_proxy.go
new file mode 100644
index 000000000..3e76db4ee
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/handler_proxy.go
@@ -0,0 +1,26 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// ProxyHandler will proxy the inbound HTTP Request to the configured origin
+func (c *Client) ProxyHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.DoProxy(w, r)
+}
diff --git a/internal/proxy/origins/reverseproxycache/handler_proxy_test.go b/internal/proxy/origins/reverseproxycache/handler_proxy_test.go
new file mode 100644
index 000000000..d431143ea
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/handler_proxy_test.go
@@ -0,0 +1,41 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestProxyHandler(t *testing.T) {
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "rpc", "/health", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+ client.ProxyHandler(w, r)
+ resp := w.Result()
+ if resp.StatusCode != 200 {
+ t.Errorf("Expected status: 200 got %d.", resp.StatusCode)
+ }
+}
diff --git a/internal/proxy/origins/reverseproxycache/handler_proxycache.go b/internal/proxy/origins/reverseproxycache/handler_proxycache.go
new file mode 100644
index 000000000..94e7ed534
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/handler_proxycache.go
@@ -0,0 +1,26 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/proxy/engines"
+)
+
+// ProxyCacheHandler routes an HTTP Request through the Object Proxy Cache
+func (c *Client) ProxyCacheHandler(w http.ResponseWriter, r *http.Request) {
+ r.URL = c.BuildUpstreamURL(r)
+ engines.ObjectProxyCacheRequest(w, r)
+}
diff --git a/internal/proxy/origins/reverseproxycache/handler_proxycache_test.go b/internal/proxy/origins/reverseproxycache/handler_proxycache_test.go
new file mode 100644
index 000000000..1d5a5c39c
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/handler_proxycache_test.go
@@ -0,0 +1,40 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+func TestProxyCacheHandler(t *testing.T) {
+ client := &Client{name: "test"}
+ ts, w, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "rpc", "/health", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ client.config.HTTPClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+ client.ProxyCacheHandler(w, r)
+ resp := w.Result()
+ if resp.StatusCode != 200 {
+ t.Errorf("Expected status: 200 got %d.", resp.StatusCode)
+ }
+}
diff --git a/internal/proxy/origins/reverseproxycache/routes.go b/internal/proxy/origins/reverseproxycache/routes.go
new file mode 100644
index 000000000..490c504d6
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/routes.go
@@ -0,0 +1,69 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/handlers"
+ "github.com/Comcast/trickster/internal/proxy/methods"
+)
+
+func (c *Client) registerHandlers() {
+ c.handlersRegistered = true
+ c.handlers = make(map[string]http.Handler)
+ // This is the registry of handlers that Trickster supports for the Reverse Proxy Cache,
+ // and are able to be referenced by name (map key) in Config Files
+ c.handlers["health"] = http.HandlerFunc(c.HealthHandler)
+ c.handlers["proxy"] = http.HandlerFunc(c.ProxyHandler)
+ c.handlers["proxycache"] = http.HandlerFunc(c.ProxyCacheHandler)
+ c.handlers["localresponse"] = http.HandlerFunc(handlers.HandleLocalResponse)
+}
+
+// Handlers returns a map of the HTTP Handlers the client has registered
+func (c *Client) Handlers() map[string]http.Handler {
+ if !c.handlersRegistered {
+ c.registerHandlers()
+ }
+ return c.handlers
+}
+
+// DefaultPathConfigs returns the default PathConfigs for the given OriginType
+func (c *Client) DefaultPathConfigs(oc *config.OriginConfig) map[string]*config.PathConfig {
+
+ cm := methods.CacheableHTTPMethods()
+ um := methods.UncacheableHTTPMethods()
+
+ paths := map[string]*config.PathConfig{
+ "/-" + strings.Join(cm, "-"): {
+ Path: "/",
+ HandlerName: "proxycache",
+ Methods: cm,
+ OriginConfig: oc,
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+ "/-" + strings.Join(um, "-"): {
+ Path: "/",
+ HandlerName: "proxy",
+ Methods: um,
+ OriginConfig: oc,
+ MatchType: config.PathMatchTypePrefix,
+ MatchTypeName: "prefix",
+ },
+ }
+ return paths
+}
diff --git a/internal/proxy/origins/reverseproxycache/routes_test.go b/internal/proxy/origins/reverseproxycache/routes_test.go
new file mode 100644
index 000000000..f4819af8c
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/routes_test.go
@@ -0,0 +1,64 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/request"
+ tu "github.com/Comcast/trickster/internal/util/testing"
+)
+
+const localResponse = "localresponse"
+
+func TestRegisterHandlers(t *testing.T) {
+ c := &Client{}
+ c.registerHandlers()
+ if _, ok := c.handlers[localResponse]; !ok {
+ t.Errorf("expected to find handler named: %s", localResponse)
+ }
+}
+
+func TestHandlers(t *testing.T) {
+ c := &Client{}
+ m := c.Handlers()
+ if _, ok := m[localResponse]; !ok {
+ t.Errorf("expected to find handler named: %s", localResponse)
+ }
+}
+
+func TestDefaultPathConfigs(t *testing.T) {
+ client := &Client{name: "test"}
+ ts, _, r, hc, err := tu.NewTestInstance("", client.DefaultPathConfigs, 200, "{}", nil, "rpc", "/", "debug")
+ rsc := request.GetResources(r)
+ rsc.OriginClient = client
+ client.config = rsc.OriginConfig
+ client.webClient = hc
+ defer ts.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ dpc := client.DefaultPathConfigs(client.config)
+
+ if _, ok := dpc["/-GET-HEAD"]; !ok {
+ t.Errorf("expected to find path named: %s", "/")
+ }
+
+ const expectedLen = 2
+ if len(dpc) != expectedLen {
+ t.Errorf("expected ordered length to be: %d got %d", expectedLen, len(dpc))
+ }
+
+}
diff --git a/internal/proxy/origins/reverseproxycache/rpc.go b/internal/proxy/origins/reverseproxycache/rpc.go
new file mode 100644
index 000000000..0a9094f8d
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/rpc.go
@@ -0,0 +1,68 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy"
+)
+
+// Client Implements the Proxy Client Interface
+type Client struct {
+ name string
+ config *config.OriginConfig
+ cache cache.Cache
+ webClient *http.Client
+ handlers map[string]http.Handler
+ handlersRegistered bool
+
+ healthURL *url.URL
+ healthMethod string
+ healthHeaders http.Header
+}
+
+// NewClient returns a new Client Instance
+func NewClient(name string, oc *config.OriginConfig, cache cache.Cache) (*Client, error) {
+ c, err := proxy.NewHTTPClient(oc)
+ return &Client{name: name, config: oc, cache: cache, webClient: c}, err
+}
+
+// Configuration returns the upstream Configuration for this Client
+func (c *Client) Configuration() *config.OriginConfig {
+ return c.config
+}
+
+// HTTPClient returns the HTTP Transport the client is using
+func (c *Client) HTTPClient() *http.Client {
+ return c.webClient
+}
+
+// Cache returns and handle to the Cache instance used by the Client
+func (c *Client) Cache() cache.Cache {
+ return c.cache
+}
+
+// Name returns the name of the upstream Configuration proxied by the Client
+func (c *Client) Name() string {
+ return c.name
+}
+
+// SetCache sets the Cache object the client will use when caching origin content
+func (c *Client) SetCache(cc cache.Cache) {
+ c.cache = cc
+}
diff --git a/internal/proxy/origins/reverseproxycache/rpc_test.go b/internal/proxy/origins/reverseproxycache/rpc_test.go
new file mode 100644
index 000000000..e3d77e3a2
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/rpc_test.go
@@ -0,0 +1,97 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/origins"
+)
+
+func TestReverseProxyCacheClientInterfacing(t *testing.T) {
+
+ // this test ensures the client will properly conform to the
+ // Client interface
+
+ c := &Client{name: "test"}
+ var oc origins.Client = c
+
+ if oc.Name() != "test" {
+ t.Errorf("expected %s got %s", "test", oc.Name())
+ }
+
+}
+
+func TestNewNewClient(t *testing.T) {
+ c, err := NewClient("test", config.NewOriginConfig(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+ if c == nil {
+ t.Errorf("expected client named %s", "test")
+ }
+}
+
+func TestHTTPClient(t *testing.T) {
+ c, err := NewClient("test", config.NewOriginConfig(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+ if c.HTTPClient() == nil {
+ t.Errorf("expected HTTPClient for RPC client named %s", "test")
+ }
+}
+
+func TestGetCache(t *testing.T) {
+ c, err := NewClient("test", config.NewOriginConfig(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+ if c.Cache() != nil {
+ t.Errorf("expected nil Cache for RPC client named %s", "test")
+ }
+}
+
+func TestClientName(t *testing.T) {
+ c, err := NewClient("test", config.NewOriginConfig(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+ if c.Name() != "test" {
+ t.Errorf("expected RPC client named %s", "test")
+ }
+}
+
+func TestSetCache(t *testing.T) {
+ c, err := NewClient("test", config.NewOriginConfig(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+ c.SetCache(nil)
+ if c.Cache() != nil {
+ t.Errorf("expected nil cache for client named %s", "test")
+ }
+}
+
+func TestConfiguration(t *testing.T) {
+ c, err := NewClient("test", config.NewOriginConfig(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+ if c.Configuration() == nil {
+ t.Error(errors.New("expected non-nil config"))
+ }
+}
diff --git a/internal/proxy/origins/reverseproxycache/url.go b/internal/proxy/origins/reverseproxycache/url.go
new file mode 100644
index 000000000..7e5014f10
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/url.go
@@ -0,0 +1,48 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// BaseURL returns a URL in the form of scheme://host/path based on the proxy configuration
+func (c *Client) BaseURL() *url.URL {
+ u := &url.URL{}
+ u.Scheme = c.config.Scheme
+ u.Host = c.config.Host
+ u.Path = c.config.PathPrefix
+ return u
+}
+
+// BuildUpstreamURL will merge the downstream request with the BaseURL to construct the full upstream URL
+func (c *Client) BuildUpstreamURL(r *http.Request) *url.URL {
+ u := c.BaseURL()
+
+ if strings.HasPrefix(r.URL.Path, "/"+c.name+"/") {
+ u.Path += strings.Replace(r.URL.Path, "/"+c.name+"/", "/", 1)
+ if u.Path == "//" {
+ u.Path = "/"
+ }
+ } else {
+ u.Path += r.URL.Path
+ }
+
+ u.RawQuery = r.URL.RawQuery
+ u.Fragment = r.URL.Fragment
+ u.User = r.URL.User
+ return u
+}
diff --git a/internal/proxy/origins/reverseproxycache/url_test.go b/internal/proxy/origins/reverseproxycache/url_test.go
new file mode 100644
index 000000000..aa32ac11d
--- /dev/null
+++ b/internal/proxy/origins/reverseproxycache/url_test.go
@@ -0,0 +1,62 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package reverseproxycache
+
+import (
+ "net/http"
+ "net/url"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+)
+
+func TestBuildUpstreamURL(t *testing.T) {
+
+ expected := "q=up&start=1&end=1&step=1"
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "none:9090", "-origin-type", "rpc", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ oc := config.Origins["default"]
+ client := Client{config: oc, name: "default"}
+
+ u := &url.URL{Path: "/default/query_range", RawQuery: expected}
+
+ r, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ u2 := client.BuildUpstreamURL(r)
+
+ if expected != u2.RawQuery {
+ t.Errorf("\nexpected [%s]\ngot [%s]", expected, u2.RawQuery)
+ }
+
+ u = &url.URL{Path: "/default//", RawQuery: ""}
+
+ r, err = http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ u2 = client.BuildUpstreamURL(r)
+
+ if u2.Path != "/" {
+ t.Errorf("\nexpected [%s]\ngot [%s]", "/", u2.Path)
+ }
+
+}
diff --git a/internal/proxy/origins/timeseries_client.go b/internal/proxy/origins/timeseries_client.go
new file mode 100644
index 000000000..995feaecf
--- /dev/null
+++ b/internal/proxy/origins/timeseries_client.go
@@ -0,0 +1,51 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package origins
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// TimeseriesClient is the primary interface for interoperating with Trickster and upstream TSDB's
+type TimeseriesClient interface {
+ // Handlers returns a map of the HTTP Handlers the client has registered
+ Handlers() map[string]http.Handler
+ // DefaultPathConfigs returns the default PathConfigs for the given OriginType
+ DefaultPathConfigs(*config.OriginConfig) map[string]*config.PathConfig
+ // ParseTimeRangeQuery returns a timeseries.TimeRangeQuery based on the provided HTTP Request
+ ParseTimeRangeQuery(*http.Request) (*timeseries.TimeRangeQuery, error)
+ // Configuration returns the configuration for the Proxy Client
+ Configuration() *config.OriginConfig
+ // Name returns the name of the origin the Proxy Client is handling
+ Name() string
+ // FastForwardURL returns the URL to the origin to collect Fast Forward data points based on the provided HTTP Request
+ FastForwardURL(*http.Request) (*url.URL, error)
+ // SetExtent will update an upstream request's timerange parameters based on the provided timeseries.Extent
+ SetExtent(*http.Request, *timeseries.TimeRangeQuery, *timeseries.Extent)
+ // UnmarshalTimeseries will return a Timeseries from the provided byte slice
+ UnmarshalTimeseries([]byte) (timeseries.Timeseries, error)
+ // MarshalTimeseries will return a byte slice from the provided Timeseries
+ MarshalTimeseries(timeseries.Timeseries) ([]byte, error)
+ // UnmarshalInstantaneous will return an Instantaneous Timeseries (only one value instead of a series) from the provided byte slice
+ UnmarshalInstantaneous([]byte) (timeseries.Timeseries, error)
+ // HTTPClient will return the HTTP Client for this Origin
+ HTTPClient() *http.Client
+ // SetCache sets the Cache object the client will use when caching origin content
+ SetCache(cache.Cache)
+}
diff --git a/internal/proxy/params/params.go b/internal/proxy/params/params.go
new file mode 100644
index 000000000..ae9d4048b
--- /dev/null
+++ b/internal/proxy/params/params.go
@@ -0,0 +1,39 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package params
+
+import "net/url"
+
+// UpdateParams updates the provided query parameters collection with the provided updates
+func UpdateParams(params url.Values, updates map[string]string) {
+ if params == nil || updates == nil || len(updates) == 0 {
+ return
+ }
+ for k, v := range updates {
+ if len(k) == 0 {
+ continue
+ }
+ if k[0:1] == "-" {
+ k = k[1:]
+ params.Del(k)
+ continue
+ }
+ if k[0:1] == "+" {
+ k = k[1:]
+ params.Add(k, v)
+ continue
+ }
+ params.Set(k, v)
+ }
+}
diff --git a/internal/proxy/params/params_test.go b/internal/proxy/params/params_test.go
new file mode 100644
index 000000000..18f7115d6
--- /dev/null
+++ b/internal/proxy/params/params_test.go
@@ -0,0 +1,44 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package params
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+ "testing"
+)
+
+func TestUpdateParams(t *testing.T) {
+
+ params := url.Values{"param1": {"value1"}, "param3": {"value3"}, "param4": {"value4"}}
+ updates := map[string]string{"param2": "value2", "+param3": "value3.1", "-param4": "", "": "empty_key_ignored"}
+ expected := url.Values{"param1": {"value1"}, "param2": {"value2"}, "param3": {"value3", "value3.1"}}
+
+ UpdateParams(params, nil)
+ if len(params) != 3 {
+ t.Errorf("expected %d got %d", 1, len(params))
+ }
+
+ UpdateParams(params, map[string]string{})
+ if len(params) != 3 {
+ t.Errorf("expected %d got %d", 1, len(params))
+ }
+
+ UpdateParams(params, updates)
+ if !reflect.DeepEqual(params, expected) {
+ fmt.Printf("mismatch\nexpected: %v\n got: %v\n", expected, params)
+ }
+
+}
diff --git a/internal/proxy/proxy.go b/internal/proxy/proxy.go
new file mode 100644
index 000000000..acc4f27d9
--- /dev/null
+++ b/internal/proxy/proxy.go
@@ -0,0 +1,179 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package proxy
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/netutil"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+// NewHTTPClient returns an HTTP client configured to the specifications of the
+// running Trickster config.
+func NewHTTPClient(oc *config.OriginConfig) (*http.Client, error) {
+
+ if oc == nil {
+ return nil, nil
+ }
+
+ var TLSConfig *tls.Config
+
+ if oc.TLS != nil {
+ TLSConfig = &tls.Config{InsecureSkipVerify: oc.TLS.InsecureSkipVerify}
+
+ if oc.TLS.ClientCertPath != "" && oc.TLS.ClientKeyPath != "" {
+ // load client cert
+ cert, err := tls.LoadX509KeyPair(oc.TLS.ClientCertPath, oc.TLS.ClientKeyPath)
+ if err != nil {
+ return nil, err
+ }
+ TLSConfig.Certificates = []tls.Certificate{cert}
+ }
+
+ if oc.TLS.CertificateAuthorityPaths != nil && len(oc.TLS.CertificateAuthorityPaths) > 0 {
+
+ // credit snippet to https://forfuncsake.github.io/post/2017/08/trust-extra-ca-cert-in-go-app/
+ // Get the SystemCertPool, continue with an empty pool on error
+ rootCAs, _ := x509.SystemCertPool()
+ if rootCAs == nil {
+ rootCAs = x509.NewCertPool()
+ }
+
+ for _, path := range oc.TLS.CertificateAuthorityPaths {
+ // Read in the cert file
+ certs, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ // Append our cert to the system pool
+ if ok := rootCAs.AppendCertsFromPEM(certs); !ok {
+ return nil, fmt.Errorf("unable to append to CA Certs from file %s", path)
+ }
+ }
+
+ // Trust the augmented cert pool in our client
+ TLSConfig.RootCAs = rootCAs
+ }
+ }
+
+ return &http.Client{
+ Timeout: oc.Timeout,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ Transport: &http.Transport{
+ Dial: (&net.Dialer{KeepAlive: time.Duration(oc.KeepAliveTimeoutSecs) * time.Second}).Dial,
+ MaxIdleConns: oc.MaxIdleConns,
+ MaxIdleConnsPerHost: oc.MaxIdleConns,
+ TLSClientConfig: TLSConfig,
+ },
+ }, nil
+
+}
+
+// NewListener create a new network listener which obeys to the configuration max
+// connection limit, and also monitors connections with prometheus metrics.
+//
+// The way this works is by creating a listener and wrapping it with a
+// netutil.LimitListener to set a limit.
+//
+// This limiter will simply block waiting for resources to become available
+// whenever clients go above the limit.
+//
+// To simplify settings limits the listener is wrapped with yet another object
+// which observes the connections to set a gauge with the current number of
+// connections (with operates with sampling through scrapes), and a set of
+// counter metrics for connections accepted, rejected and closed.
+func NewListener(listenAddress string, listenPort, connectionsLimit int, tlsConfig *tls.Config) (net.Listener, error) {
+
+ var listener net.Listener
+ var err error
+
+ listenerType := "http"
+
+ if tlsConfig != nil {
+ listenerType = "https"
+ listener, err = tls.Listen("tcp", fmt.Sprintf("%s:%d", listenAddress, listenPort), tlsConfig)
+ } else {
+ listener, err = net.Listen("tcp", fmt.Sprintf("%s:%d", listenAddress, listenPort))
+ }
+ if err != nil {
+ // so we can exit one level above, this usually means that the port is in use
+ return nil, err
+ }
+
+ log.Debug("starting proxy listener", log.Pairs{
+ "connectionsLimit": connectionsLimit,
+ "scheme": listenerType,
+ "address": listenAddress,
+ "port": listenPort,
+ })
+
+ if connectionsLimit > 0 {
+ listener = netutil.LimitListener(listener, connectionsLimit)
+ metrics.ProxyMaxConnections.Set(float64(connectionsLimit))
+ }
+
+ return &connectionsLimitObProxy{
+ listener,
+ }, nil
+}
+
+type connectionsLimitObProxy struct {
+ net.Listener
+}
+
+// Accept implements Listener.Accept
+func (l *connectionsLimitObProxy) Accept() (net.Conn, error) {
+
+ metrics.ProxyConnectionRequested.Inc()
+
+ c, err := l.Listener.Accept()
+ if err != nil {
+ // This generally happens when a connection gives up waiting for resources and
+ // just goes away on timeout, thus it's more of a client side error, which
+ // gets reflected on the server.
+ log.Debug("failed to accept client connection", log.Pairs{"reason": err})
+ metrics.ProxyConnectionFailed.Inc()
+ return c, err
+ }
+
+ metrics.ProxyActiveConnections.Inc()
+ metrics.ProxyConnectionAccepted.Inc()
+
+ return observedConnection{c}, nil
+}
+
+type observedConnection struct {
+ net.Conn
+}
+
+func (o observedConnection) Close() error {
+ err := o.Conn.Close()
+
+ metrics.ProxyActiveConnections.Dec()
+ metrics.ProxyConnectionClosed.Inc()
+
+ return err
+}
diff --git a/internal/proxy/proxy_test.go b/internal/proxy/proxy_test.go
new file mode 100644
index 000000000..efe78138d
--- /dev/null
+++ b/internal/proxy/proxy_test.go
@@ -0,0 +1,206 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package proxy
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/routing"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+func init() {
+ metrics.Init() // For some reason I need to call it specifically
+}
+
+func TestNewHTTPClient(t *testing.T) {
+
+ // test invalid origin config
+ c, err := NewHTTPClient(nil)
+ if c != nil {
+ t.Errorf("expected nil client, got %v", c)
+ }
+ if err != nil {
+ t.Error(err)
+ }
+
+ const caFile = "../../testdata/test.rootca.pem"
+ const caFileInvalid1 = caFile + ".invalid"
+ const caFileInvalid2 = "../../testdata/test.06.cert.pem"
+
+ // test good originconfig, no CA
+ oc := config.NewOriginConfig()
+ _, err = NewHTTPClient(oc)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // test good originconfig, 1 good CA
+ oc.TLS.CertificateAuthorityPaths = []string{caFile}
+ _, err = NewHTTPClient(oc)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // test good originconfig, 1 bad CA (file not found)
+ oc.TLS.CertificateAuthorityPaths = []string{caFileInvalid1}
+ _, err = NewHTTPClient(oc)
+ if err == nil {
+ t.Errorf("expected error for no such file or directory on %s", caFileInvalid1)
+ }
+
+ // test good originconfig, 1 bad CA (junk content)
+ oc.TLS.CertificateAuthorityPaths = []string{caFileInvalid2}
+ _, err = NewHTTPClient(oc)
+ if err == nil {
+ t.Errorf("expected error for unable to append to CA Certs from file %s", caFileInvalid2)
+ }
+
+ oc.TLS.CertificateAuthorityPaths = []string{}
+ oc.TLS.ClientCertPath = "../../testdata/test.01.cert.pem"
+ oc.TLS.ClientKeyPath = "../../testdata/test.01.key.pem"
+ _, err = NewHTTPClient(oc)
+ if err != nil {
+ t.Error(err)
+ }
+
+ oc.TLS.ClientCertPath = "../../testdata/test.05.cert.pem"
+ oc.TLS.ClientKeyPath = "../../testdata/test.05.key.pem"
+ oc.TLS.CertificateAuthorityPaths = []string{}
+ _, err = NewHTTPClient(oc)
+ if err == nil {
+ t.Errorf("failed to find any PEM data in key input for file %s", oc.TLS.ClientKeyPath)
+ }
+}
+
+func TestNewListenerErr(t *testing.T) {
+ config.NewConfig()
+ l, err := NewListener("-", 0, 0, nil)
+ if err == nil {
+ l.Close()
+ t.Errorf("expected error: %s", `listen tcp: lookup -: no such host`)
+ }
+}
+
+func TestNewListenerTLS(t *testing.T) {
+
+ c := config.NewConfig()
+ oc := c.Origins["default"]
+ c.Frontend.ServeTLS = true
+
+ tc := oc.TLS
+ oc.TLS.ServeTLS = true
+ tc.FullChainCertPath = "../../testdata/test.01.cert.pem"
+ tc.PrivateKeyPath = "../../testdata/test.01.key.pem"
+
+ tlsConfig, err := c.TLSCertConfig()
+ if err != nil {
+ t.Error(err)
+ }
+
+ l, err := NewListener("", 0, 0, tlsConfig)
+ defer l.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestListenerConnectionLimitWorks(t *testing.T) {
+
+ handler := func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(200)
+ fmt.Fprint(w, "hello!")
+ }
+ es := httptest.NewServer(http.HandlerFunc(handler))
+ defer es.Close()
+
+ err := config.Load("trickster", "test", []string{"-origin-url", es.URL, "-origin-type", "prometheus"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ tt := []struct {
+ Name string
+ ListenPort int
+ ConnectionsLimit int
+ Clients int
+ expectedErr string
+ }{
+ {
+ "Without connection limit",
+ 34001,
+ 0,
+ 1,
+ "",
+ },
+ {
+ "With connection limit of 10",
+ 34002,
+ 10,
+ 10,
+ "",
+ },
+ {
+ "With connection limit of 1, but with 10 clients",
+ 34003,
+ 1,
+ 10,
+ "Get http://localhost:34003/: net/http: request canceled (Client.Timeout exceeded while awaiting headers)",
+ },
+ }
+
+ http.DefaultClient.Timeout = 100 * time.Millisecond
+
+ for _, tc := range tt {
+ t.Run(tc.Name, func(t *testing.T) {
+ l, err := NewListener("", tc.ListenPort, tc.ConnectionsLimit, nil)
+ defer l.Close()
+
+ go func() {
+ http.Serve(l, routing.Router)
+ }()
+
+ if err != nil {
+ t.Fatalf("failed to create listener: %s", err)
+ }
+
+ for i := 0; i < tc.Clients; i++ {
+ r, err := http.NewRequest("GET", fmt.Sprintf("http://localhost:%d/", tc.ListenPort), nil)
+ if err != nil {
+ t.Fatalf("failed to create request: %s", err)
+ }
+ res, err := http.DefaultClient.Do(r)
+ if err != nil {
+ if fmt.Sprintf("%s", err) != tc.expectedErr {
+ t.Fatalf("unexpected error when executing request: %s", err)
+ }
+ continue
+ }
+ defer func() {
+ io.Copy(ioutil.Discard, res.Body)
+ res.Body.Close()
+ }()
+ }
+
+ })
+ }
+}
diff --git a/internal/proxy/ranges/byterange/multipart.go b/internal/proxy/ranges/byterange/multipart.go
new file mode 100644
index 000000000..7f35c382f
--- /dev/null
+++ b/internal/proxy/ranges/byterange/multipart.go
@@ -0,0 +1,255 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+//go:generate msgp
+
+package byterange
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "sort"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/util/md5"
+)
+
+// MultipartByteRange represents one part of a list of multipart byte ranges
+type MultipartByteRange struct {
+ Range Range `msg:"range"`
+ Content []byte `msg:"content"`
+}
+
+// MultipartByteRanges is a list of type MultipartByteRange
+type MultipartByteRanges map[Range]*MultipartByteRange
+
+// Merge merges the source MultipartByteRanges map into the subject map
+func (mbrs MultipartByteRanges) Merge(src MultipartByteRanges) {
+ if src == nil || len(src) == 0 || mbrs == nil {
+ return
+ }
+ for _, v := range src.Ranges() {
+ mbrs[v] = src[v]
+ }
+ mbrs.Compress()
+}
+
+// PackableMultipartByteRanges returns a version of the subject MultipartByteRanges map
+// that is packable by most marshallers, which may require that maps have a key type of string
+func (mbrs MultipartByteRanges) PackableMultipartByteRanges() map[string]*MultipartByteRange {
+ out := make(map[string]*MultipartByteRange)
+ for r, p := range mbrs {
+ out[r.String()] = p
+ }
+ return out
+}
+
+// Body returns http headers and body representing the subject MultipartByteRanges map,
+// which is suitable for responding to an HTTP request for the full cached range
+func (mbrs MultipartByteRanges) Body(fullContentLength int64, contentType string) (http.Header, []byte) {
+
+ ranges := mbrs.Ranges()
+ if ranges == nil || len(ranges) == 0 {
+ return nil, []byte{}
+ }
+
+ // if just one range part, return a normal range response (not multipart)
+ if len(ranges) == 1 {
+ r := ranges[0]
+ return http.Header{
+ headers.NameContentType: []string{contentType},
+ headers.NameContentRange: []string{mbrs[r].Range.ContentRangeHeader(fullContentLength)},
+ }, mbrs[r].Content
+ }
+
+ // otherwise, we return a multipart response
+
+ sort.Sort(ranges)
+
+ boundary := md5.Checksum(ranges.String())
+ var bw = bytes.NewBuffer(make([]byte, 0))
+ mw := multipart.NewWriter(bw)
+ mw.SetBoundary(boundary)
+
+ for _, r := range ranges {
+ pw, err := mw.CreatePart(
+ textproto.MIMEHeader{
+ headers.NameContentType: []string{contentType},
+ headers.NameContentRange: []string{mbrs[r].Range.ContentRangeHeader(fullContentLength)},
+ },
+ )
+ if err != nil {
+ continue
+ }
+ pw.Write(mbrs[r].Content)
+ }
+ mw.Close()
+
+ return http.Header{
+ headers.NameContentType: []string{headers.ValueMultipartByteRanges + boundary},
+ }, bw.Bytes()
+
+}
+
+// Ranges returns a Ranges object from the MultipartByteRanges Object
+func (mbrs MultipartByteRanges) Ranges() Ranges {
+ if len(mbrs) == 0 {
+ return Ranges{}
+ }
+ ranges := make(Ranges, 0, len(mbrs))
+ for _, v := range mbrs {
+ ranges = append(ranges, v.Range)
+ }
+ sort.Sort(ranges)
+ return ranges
+}
+
+// Compress will take a Multipart Byte Range Map and compress it such that adajecent ranges are merged
+func (mbrs MultipartByteRanges) Compress() {
+
+ if len(mbrs.Ranges()) < 2 {
+ return
+ }
+
+ cnt := 0
+ for len(mbrs) != cnt {
+ cnt = len(mbrs)
+ var prev *MultipartByteRange
+ for _, r := range mbrs.Ranges() {
+ curr := mbrs[r]
+ if prev != nil && r.Start == prev.Range.End+1 {
+
+ newPart := &MultipartByteRange{Range: Range{Start: prev.Range.Start, End: curr.Range.End}}
+ l := newPart.Range.End - newPart.Range.Start + 1
+ body := make([]byte, l)
+
+ copy(body[:len(prev.Content)], prev.Content[:])
+ copy(body[len(prev.Content):], curr.Content[:])
+ newPart.Content = body
+ delete(mbrs, r)
+ delete(mbrs, prev.Range)
+ mbrs[newPart.Range] = newPart
+ curr = newPart
+ }
+ prev = curr
+ }
+ }
+
+}
+
+// ParseMultipartRangeResponseBody returns a MultipartByteRanges from the provided body
+func ParseMultipartRangeResponseBody(body io.Reader, contentTypeHeader string) (MultipartByteRanges, string, Ranges, int64, error) {
+ parts := make(MultipartByteRanges)
+ ranges := make(Ranges, 0)
+ fullContentLength := int64(-1)
+ ct := ""
+ if strings.HasPrefix(contentTypeHeader, headers.ValueMultipartByteRanges) {
+ separator := contentTypeHeader[len(headers.ValueMultipartByteRanges):]
+ if separator != "" {
+ mr := multipart.NewReader(body, separator)
+ for {
+ p, err := mr.NextPart()
+ if err != nil {
+ // EOF triggers the end of the loop, but
+ // it can sometimes come in as "multipart: NextPart: EOF"
+ // so it is more reliable to check for EOF as a suffix
+ if strings.HasSuffix(err.Error(), "EOF") {
+ break
+ }
+ return nil, "", nil, -1, err
+ }
+
+ if _, ok := p.Header[headers.NameContentRange]; ok {
+ r, rcl, err := ParseContentRangeHeader(p.Header.Get(headers.NameContentRange))
+ if ct == "" {
+ ct = p.Header.Get(headers.NameContentType)
+ }
+
+ fullContentLength = rcl
+ if err != nil {
+ return nil, "", nil, -1, err
+ }
+ ranges = append(ranges, r)
+ bdy, err := ioutil.ReadAll(p)
+ if err != nil {
+ return nil, "", nil, -1, err
+ }
+ mpbr := &MultipartByteRange{
+ Range: r,
+ Content: bdy,
+ }
+ parts[r] = mpbr
+ }
+ }
+ }
+ }
+ return parts, ct, ranges, int64(fullContentLength), nil
+}
+
+// ExtractResponseRange returns http headers and body representing the subject MultipartByteRanges map,
+// cropped to the provided ranges
+func (mbrs MultipartByteRanges) ExtractResponseRange(ranges Ranges, fullContentLength int64, contentType string, body []byte) (http.Header, []byte) {
+
+ if ranges == nil || len(ranges) == 0 {
+ return nil, body
+ }
+
+ useBody := len(mbrs) == 0
+ if useBody {
+ if body == nil {
+ return nil, nil
+ }
+ fullContentLength = int64(len(body))
+ }
+
+ m := make(MultipartByteRanges)
+
+ for _, r := range ranges {
+ rcl := (r.End - r.Start) + 1
+ mbr := &MultipartByteRange{Range: r, Content: make([]byte, rcl)}
+
+ if useBody {
+ copy(mbr.Content[:], body[r.Start:r.End+1])
+ } else {
+ brs := mbrs.Ranges()
+ if brs != nil {
+ for _, r2 := range brs {
+
+ p := mbrs[r2]
+
+ if r.Start >= p.Range.Start && r.End <= p.Range.End {
+
+ // unsure if we need this depending upon how ranges are filled and compressed
+ // so leaving it present but commented for now.
+ startOffset := r.Start - p.Range.Start
+ endOffset := (r.End - p.Range.Start) + 1
+ // mbr.Content = p.Content[startOffset : int64(len(p.Content))-endOffset]
+
+ // and the shortcut alternative method that seems to work for current use cases
+ mbr.Content = p.Content[startOffset:endOffset]
+ break
+ }
+ }
+ }
+ }
+
+ m[r] = mbr
+
+ }
+ return m.Body(fullContentLength, contentType)
+}
diff --git a/internal/proxy/ranges/byterange/multipart_gen.go b/internal/proxy/ranges/byterange/multipart_gen.go
new file mode 100644
index 000000000..bd6462c68
--- /dev/null
+++ b/internal/proxy/ranges/byterange/multipart_gen.go
@@ -0,0 +1,126 @@
+package byterange
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *MultipartByteRange) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "range":
+ err = z.Range.DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ case "content":
+ z.Content, err = dc.ReadBytes(z.Content)
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *MultipartByteRange) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "range"
+ err = en.Append(0x82, 0xa5, 0x72, 0x61, 0x6e, 0x67, 0x65)
+ if err != nil {
+ return
+ }
+ err = z.Range.EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ // write "content"
+ err = en.Append(0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74)
+ if err != nil {
+ return
+ }
+ err = en.WriteBytes(z.Content)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *MultipartByteRange) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "range"
+ o = append(o, 0x82, 0xa5, 0x72, 0x61, 0x6e, 0x67, 0x65)
+ o, err = z.Range.MarshalMsg(o)
+ if err != nil {
+ return
+ }
+ // string "content"
+ o = append(o, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74)
+ o = msgp.AppendBytes(o, z.Content)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *MultipartByteRange) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "range":
+ bts, err = z.Range.UnmarshalMsg(bts)
+ if err != nil {
+ return
+ }
+ case "content":
+ z.Content, bts, err = msgp.ReadBytesBytes(bts, z.Content)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *MultipartByteRange) Msgsize() (s int) {
+ s = 1 + 6 + z.Range.Msgsize() + 8 + msgp.BytesPrefixSize + len(z.Content)
+ return
+}
diff --git a/internal/proxy/ranges/byterange/multipart_gen_test.go b/internal/proxy/ranges/byterange/multipart_gen_test.go
new file mode 100644
index 000000000..5b4c3bf69
--- /dev/null
+++ b/internal/proxy/ranges/byterange/multipart_gen_test.go
@@ -0,0 +1,123 @@
+package byterange
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
+func TestMarshalUnmarshalMultipartByteRange(t *testing.T) {
+ v := MultipartByteRange{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func BenchmarkMarshalMsgMultipartByteRange(b *testing.B) {
+ v := MultipartByteRange{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgMultipartByteRange(b *testing.B) {
+ v := MultipartByteRange{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalMultipartByteRange(b *testing.B) {
+ v := MultipartByteRange{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestEncodeDecodeMultipartByteRange(t *testing.T) {
+ v := MultipartByteRange{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+
+ m := v.Msgsize()
+ if buf.Len() > m {
+ t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
+ }
+
+ vn := MultipartByteRange{}
+ err := msgp.Decode(&buf, &vn)
+ if err != nil {
+ t.Error(err)
+ }
+
+ buf.Reset()
+ msgp.Encode(&buf, &v)
+ err = msgp.NewReader(&buf).Skip()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkEncodeMultipartByteRange(b *testing.B) {
+ v := MultipartByteRange{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ en := msgp.NewWriter(msgp.Nowhere)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.EncodeMsg(en)
+ }
+ en.Flush()
+}
+
+func BenchmarkDecodeMultipartByteRange(b *testing.B) {
+ v := MultipartByteRange{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ rd := msgp.NewEndlessReader(buf.Bytes(), b)
+ dc := msgp.NewReader(rd)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := v.DecodeMsg(dc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/internal/proxy/ranges/byterange/multipart_test.go b/internal/proxy/ranges/byterange/multipart_test.go
new file mode 100644
index 000000000..f37a6193f
--- /dev/null
+++ b/internal/proxy/ranges/byterange/multipart_test.go
@@ -0,0 +1,294 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package byterange
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "strings"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/proxy/headers"
+)
+
+const testSeparator = "TEST-SEPARATOR"
+const testRange1 = "0-49"
+const testRange2 = "100-149"
+const testContentLength = "150"
+const testContentType1 = headers.ValueTextPlain
+const testPart1Body = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx"
+const testPart2Body = `{ "body": "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJ" }`
+
+var content = []byte(fmt.Sprintf(`--%s
+Content-Type: %s
+Content-Range: bytes %s/%s
+
+%s
+--%s
+Content-Type: %s
+Content-Range: bytes %s/%s
+
+%s
+--%s--`, testSeparator, testContentType1, testRange1, testContentLength,
+ testPart1Body, testSeparator, testContentType1, testRange2, testContentLength, testPart2Body, testSeparator))
+
+func TestParseMultipartRangeResponseBody(t *testing.T) {
+
+ reader := ioutil.NopCloser(bytes.NewBuffer(content))
+
+ parts, ct, ranges, cl, err := ParseMultipartRangeResponseBody(reader, headers.ValueMultipartByteRanges+testSeparator)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if cl != 150 {
+ t.Errorf("expected %d, got %d", 150, cl)
+ }
+
+ if parts == nil {
+ t.Errorf("expected 2 parts, got %v", parts)
+ } else if len(parts) != 2 {
+ t.Errorf("expected 2 parts, got %d", len(parts))
+ }
+
+ if parts == nil {
+ t.Errorf("expected 2 ranges, got %v", ranges)
+ } else if len(parts) != 2 {
+ t.Errorf("expected 2 ranges got %d", len(ranges))
+ }
+
+ if ct != testContentType1 {
+ t.Errorf("expected %s, got %s", testContentType1, ct)
+ }
+
+ if parts[ranges[0]].Range.String() != testRange1 {
+ t.Errorf("expected %s, got %s", testRange1, parts[ranges[0]].Range.String())
+ }
+
+ if string(parts[ranges[0]].Content) != testPart1Body {
+ t.Errorf("expected %s, got %s", testPart1Body, parts[ranges[0]].Range.String())
+ }
+
+ if parts[ranges[1]].Range.String() != testRange2 {
+ t.Errorf("expected %s, got %s", testRange2, parts[ranges[1]].Range.String())
+ }
+
+ if ct != testContentType1 {
+ t.Errorf("expected %s, got %s", testContentType1, ct)
+ }
+
+ if string(parts[ranges[1]].Content) != testPart2Body {
+ t.Errorf("expected %s, got %s", testPart1Body, string(parts[ranges[1]].Content))
+ }
+}
+
+func testArtifacts() (MultipartByteRanges, MultipartByteRanges) {
+
+ r1 := &MultipartByteRange{
+ Range: Range{Start: 0, End: 6},
+ Content: []byte("Lorem i"),
+ }
+
+ r2 := &MultipartByteRange{
+ Range: Range{Start: 10, End: 20},
+ Content: []byte("m dolor sit"),
+ }
+
+ m1 := MultipartByteRanges{
+ r1.Range: r1,
+ r2.Range: r2,
+ }
+
+ r3 := &MultipartByteRange{
+ Range: Range{Start: 60, End: 65},
+ Content: []byte("ligend"),
+ }
+
+ r4 := &MultipartByteRange{
+ Range: Range{Start: 69, End: 75},
+ Content: []byte("ignifer"),
+ }
+
+ m2 := MultipartByteRanges{
+ r3.Range: r3,
+ r4.Range: r4,
+ }
+
+ return m1, m2
+
+}
+
+func TestMerge(t *testing.T) {
+
+ m1, m2 := testArtifacts()
+ m1.Merge(m2)
+
+ if len(m1) != 4 {
+ t.Errorf("expected %d got %d", 4, len(m1))
+ }
+
+ // coverage for short bail out condition
+ m1.Merge(nil)
+ if len(m1) != 4 {
+ t.Errorf("expected %d got %d", 4, len(m1))
+ }
+
+}
+
+func TestPackableMultipartByteRanges(t *testing.T) {
+ m1, _ := testArtifacts()
+ m2 := m1.PackableMultipartByteRanges()
+ if len(m2) != 2 {
+ t.Errorf("expected %d got %d", 2, len(m2))
+ }
+}
+
+func TestBody(t *testing.T) {
+
+ m1, _ := testArtifacts()
+ // test multiple range
+ h, b := m1.Body(1222, "text/plain")
+ if !strings.Contains(string(b), "m dolor sit") {
+ t.Errorf("expected %d, got %d", 240, len(b))
+ }
+ if !strings.HasPrefix(h.Get(headers.NameContentType), "multipart/byteranges") {
+ t.Errorf("expected %s, got %s", "multipart/byteranges", h.Get(headers.NameContentType))
+ }
+
+ delete(m1, m1.Ranges()[1])
+
+ h, b = m1.Body(1222, "text/plain")
+ if strings.Contains(string(b), "m dolor sit") {
+ t.Errorf("expected %d, got %d", 240, len(b))
+ }
+
+ if !strings.Contains(string(b), "Lorem i") {
+ t.Errorf("expected %d, got %d", 240, len(b))
+ }
+
+ if !strings.HasPrefix(h.Get(headers.NameContentType), "text/plain") {
+ t.Errorf("expected %s, got %s", "text/plain", h.Get(headers.NameContentType))
+ }
+
+ m2 := make(MultipartByteRanges)
+ h, b = m2.Body(1, "test")
+
+ if h != nil {
+ t.Errorf("expected nil header, got %v", h)
+ }
+
+ if len(b) != 0 {
+ t.Errorf("expected %d got %d", 0, len(b))
+ }
+
+}
+
+func TestCompress(t *testing.T) {
+
+ m1, m2 := testArtifacts()
+
+ r1 := &MultipartByteRange{
+ Range: Range{Start: 7, End: 9},
+ Content: []byte("psu"),
+ }
+ m1[r1.Range] = r1
+ m1.Compress()
+
+ if len(m1) != 1 {
+ t.Errorf("expected %d got %d", 1, len(m1))
+ }
+
+ // test short circuit case
+ delete(m2, m2.Ranges()[1])
+ m2.Compress()
+ if len(m2) != 1 {
+ t.Errorf("expected %d got %d", 1, len(m2))
+ }
+
+}
+
+func TestExtractResponseRange(t *testing.T) {
+
+ m1, _ := testArtifacts()
+
+ r := Ranges{Range{Start: 12, End: 15}}
+
+ h, b := m1.ExtractResponseRange(r, 60, "test", nil)
+
+ if v := h.Get(headers.NameContentType); v != "test" {
+ t.Errorf("expected %s got %s", "test", v)
+ }
+
+ if v := h.Get(headers.NameContentRange); v != "bytes 12-15/60" {
+ t.Errorf("expected %s got %s", "bytes 12-15/60", v)
+ }
+
+ const expected = "dolo"
+ if string(b) != expected {
+ t.Errorf("expected %s got %s", expected, string(b))
+ }
+
+ // test empty range
+ h, b = m1.ExtractResponseRange(nil, 60, "test", nil)
+ if h != nil {
+ t.Errorf("expected nil headers, got %v", h)
+ }
+ if b != nil {
+ t.Errorf("expected nil body, got %s", string(b))
+ }
+
+ // test empty map
+ m3 := make(MultipartByteRanges)
+ h, b = m3.ExtractResponseRange(r, 60, "test", nil)
+ if h != nil {
+ t.Errorf("expected nil headers, got %v", h)
+ }
+ if b != nil {
+ t.Errorf("expected nil body, got %s", string(b))
+ }
+
+ // test content length
+ h, b = m1.ExtractResponseRange(r, -1, "test", nil)
+ if v := h.Get(headers.NameContentRange); v != "bytes 12-15/*" {
+ t.Errorf("expected %s got %s", "bytes 12-15/*", v)
+ }
+ if string(b) != expected {
+ t.Errorf("expected %s got %s", expected, string(b))
+ }
+
+ h, b = m1.ExtractResponseRange(r, -1, "test", []byte("test body is large"))
+ if v := h.Get(headers.NameContentRange); v != "bytes 12-15/*" {
+ t.Errorf("expected %s got %s", "bytes 12-15/*", v)
+ }
+ if string(b) != expected {
+ t.Errorf("expected %s got %s", expected, string(b))
+ }
+
+ // test useBody
+ r[0].Start = 13
+ r[0].End = 17
+ h, b = m3.ExtractResponseRange(r, -1, "test", []byte("test body is large"))
+ if v := h.Get(headers.NameContentRange); v != "bytes 13-17/18" {
+ t.Errorf("expected %s got %s", "bytes 13-17/18", v)
+ }
+ if string(b) != "large" {
+ t.Errorf("expected %s got %s", "large", string(b))
+ }
+
+}
+
+// ExtractResponseRange returns http headers and body representing the subject MultipartByteRanges map,
+// cropped to the provided ranges
+//func (mbrs MultipartByteRanges) ExtractResponseRange(ranges Ranges, fullContentLength int64, contentType string, body []byte) (http.Header, []byte) {
diff --git a/internal/proxy/ranges/byterange/range.go b/internal/proxy/ranges/byterange/range.go
new file mode 100644
index 000000000..51f62099a
--- /dev/null
+++ b/internal/proxy/ranges/byterange/range.go
@@ -0,0 +1,310 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package byterange
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+//go:generate msgp
+
+// Range represents the start and end for a byte range object
+type Range struct {
+ Start int64 `msg:"start"`
+ End int64 `msg:"end"`
+}
+
+// Ranges represents a slice of type Range
+// The objects in the slice may not overlap value coverage,
+// meaning values are contained within <= 1 Range in the slice
+// Good: [ 1-10, 21-30, 35-40 ]; Bad: [ 1-10, 10-20 ]; Bad: [ 1-10, 5-20 ]
+type Ranges []Range
+
+const byteRequestRangePrefix = "bytes="
+const byteResponsRangePrefix = "bytes "
+
+var respRE *regexp.Regexp
+
+func init() {
+ respRE = regexp.MustCompile(`^bytes ([0-9]+)-([0-9]+)\/([0-9]+)$`)
+}
+
+func (br Range) String() string {
+
+ var start string
+ var end string
+ if br.Start >= 0 {
+ start = strconv.FormatInt(br.Start, 10)
+ }
+ if br.End >= 0 {
+ end = strconv.FormatInt(br.End, 10)
+ }
+ return start + "-" + end
+}
+
+// ContentRangeHeader returns a 'Content-Range' header representing the extent of the subject range
+func (br Range) ContentRangeHeader(contentLength int64) string {
+ var start string
+ var end string
+ cl := "*"
+ if br.Start >= 0 {
+ start = strconv.FormatInt(br.Start, 10)
+ }
+ if br.End >= 0 {
+ end = strconv.FormatInt(br.End, 10)
+ }
+ if contentLength > 0 {
+ cl = strconv.FormatInt(contentLength, 10)
+ }
+ return byteResponsRangePrefix + start + "-" + end + "/" + cl
+}
+
+func (brs Ranges) String() string {
+ if len(brs) == 0 {
+ return ""
+ }
+ sb := strings.Builder{}
+ sb.WriteString(byteRequestRangePrefix)
+ var sep string
+ for _, r := range brs {
+ sb.WriteString(fmt.Sprintf("%s%s", sep, r.String()))
+ sep = ", "
+ }
+ return sb.String()
+}
+
+// CalculateDelta calculates the delta between two Ranges
+func (brs Ranges) CalculateDelta(haves Ranges, fullContentLength int64) Ranges {
+
+ checkpoint := int64(-1)
+ if len(brs) == 0 {
+ return haves
+ }
+ if haves == nil || fullContentLength < 1 || len(haves) == 0 {
+ return brs
+ }
+ if brs.Equal(haves) {
+ return Ranges{}
+ }
+
+ sort.Sort(brs)
+ sort.Sort(haves)
+ need := make(Ranges, 0, len(brs)+len(haves))
+
+ deltaRange := func() Range {
+ return Range{Start: -1, End: -1}
+ }
+ nr := deltaRange()
+
+ for i, want := range brs {
+
+ // adjust any prefix/suffix ranges to known start/ends
+ if want.Start == -1 || want.End == -1 {
+ if want.Start == -1 {
+ want.Start = fullContentLength - want.End
+ }
+ want.End = fullContentLength - 1
+ brs[i] = want
+ }
+ if want.End > fullContentLength {
+ // end is out of bounds, consider a full miss
+ return brs
+ }
+
+ checked := false
+ // now compare to any cached ranges to determine any ranges that are not in cache
+ for _, have := range haves {
+
+ if have.End < checkpoint {
+ continue
+ }
+
+ if have.Start > want.End {
+ if nr.Start > -1 && nr.End == -1 {
+ nr.End = want.End
+ checkpoint = nr.End
+ need = append(need, nr)
+ checked = true
+ nr = deltaRange()
+ }
+ break
+ }
+ if want.Start > have.End {
+ if i < len(haves) {
+ nr.Start = want.Start
+ }
+ continue
+ }
+ if want.Start >= have.Start && want.Start <= have.End &&
+ want.End <= have.End && want.End >= have.Start {
+ checked = true
+ nr = deltaRange()
+ continue
+ }
+ if nr.Start == -1 {
+ // want and have share mutual start and/or ends
+ if want.Start >= have.Start {
+ // they are identical, break and move on
+ if want.End <= have.End {
+ break
+ }
+ nr.Start = have.End + 1
+ continue
+ }
+ nr.Start = want.Start
+ }
+ if want.End <= have.End {
+
+ if nr.Start > -1 && have.Start > 0 {
+ nr.End = have.Start - 1
+ need = append(need, nr)
+ }
+ checked = true
+ nr = deltaRange()
+ continue
+ }
+ if want.Start < have.Start && want.End > have.End {
+ nr.End = have.Start - 1
+ checkpoint = nr.End
+ need = append(need, nr)
+ checked = true
+ nr = deltaRange()
+ nr.Start = have.End + 1
+ }
+ if want.Start >= have.Start && want.Start <= have.End && want.End > have.End {
+ nr.Start = have.End + 1
+ }
+ }
+ if !checked {
+ if nr.Start > -1 {
+ want.Start = nr.Start
+ }
+ need = append(need, want)
+ nr = deltaRange()
+ }
+ }
+
+ if nr.Start != -1 && nr.End == -1 {
+ nr.End = brs[len(brs)-1].End
+ need = append(need, nr)
+ }
+ sort.Sort(need)
+ return need
+}
+
+// ParseContentRangeHeader returns a Ranges list from the provided input,
+// which must be a properly-formatted HTTP 'Content-Range' Response Header value
+func ParseContentRangeHeader(input string) (Range, int64, error) {
+ parts := respRE.FindAllStringSubmatch(input, -1)
+ if len(parts) == 1 && len(parts[0]) == 4 {
+
+ r := Range{}
+ r.Start, _ = strconv.ParseInt(parts[0][1], 10, 64)
+ r.End, _ = strconv.ParseInt(parts[0][2], 10, 64)
+ if parts[0][3] == "*" {
+ return r, -1, nil
+ }
+ cl, _ := strconv.ParseInt(parts[0][3], 10, 64)
+ return r, cl, nil
+ }
+ return Range{}, -1, errors.New("invalid input format")
+}
+
+// ParseRangeHeader returns a Ranges list from the provided input,
+// which must be a properly-formatted HTTP 'Range' Request Header value
+func ParseRangeHeader(input string) Ranges {
+ if input == "" || !strings.HasPrefix(input, byteRequestRangePrefix) ||
+ input == byteRequestRangePrefix {
+ return nil
+ }
+ input = strings.Replace(input, " ", "", -1)[6:]
+ parts := strings.Split(input, ",")
+ ranges := make(Ranges, len(parts))
+
+ for i, p := range parts {
+
+ j := strings.Index(p, "-")
+ if j < 0 {
+ return nil
+ }
+
+ var start = int64(-1)
+ var end = int64(-1)
+ var err error
+
+ if j > 0 {
+ start, err = strconv.ParseInt(p[0:j], 10, 64)
+ if err != nil {
+ return nil
+ }
+ }
+
+ if j < len(p)-1 {
+ end, err = strconv.ParseInt(p[j+1:], 10, 64)
+ if err != nil {
+ return nil
+ }
+ }
+
+ ranges[i].Start = start
+ ranges[i].End = end
+ }
+
+ sort.Sort(ranges)
+ return ranges
+}
+
+// Equal returns true if the compared byte range slices are equal
+// and assumes that the Ranges are sorted
+func (brs Ranges) Equal(brs2 Ranges) bool {
+ if brs2 == nil {
+ return false
+ }
+ if len(brs) != len(brs2) {
+ return false
+ }
+ for i := range brs {
+ if brs[i] != brs2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// sort.Interface required functions for Ranges
+
+// Len returns the length of an slice of type Ranges
+func (brs Ranges) Len() int {
+ return len(brs)
+}
+
+// Less returns true if element i in the Ranges comes before j
+func (brs Ranges) Less(i, j int) bool {
+ return brs[i].Start < (brs[j].Start)
+}
+
+// Swap modifies an Ranges by swapping the values in indexes i and j
+func (brs Ranges) Swap(i, j int) {
+ brs[i], brs[j] = brs[j], brs[i]
+}
+
+// Less returns true if element i in the Ranges comes before j
+func (br Range) Less(br2 Range) bool {
+ return br.Start < br2.Start
+}
diff --git a/internal/proxy/ranges/byterange/range_gen.go b/internal/proxy/ranges/byterange/range_gen.go
new file mode 100644
index 000000000..bd7862d79
--- /dev/null
+++ b/internal/proxy/ranges/byterange/range_gen.go
@@ -0,0 +1,272 @@
+package byterange
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *Range) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.Start, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ case "end":
+ z.End, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z Range) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "start"
+ err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z.Start)
+ if err != nil {
+ return
+ }
+ // write "end"
+ err = en.Append(0xa3, 0x65, 0x6e, 0x64)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z.End)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z Range) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "start"
+ o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendInt64(o, z.Start)
+ // string "end"
+ o = append(o, 0xa3, 0x65, 0x6e, 0x64)
+ o = msgp.AppendInt64(o, z.End)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *Range) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.Start, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "end":
+ z.End, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z Range) Msgsize() (s int) {
+ s = 1 + 6 + msgp.Int64Size + 4 + msgp.Int64Size
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *Ranges) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zb0002 uint32
+ zb0002, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap((*z)) >= int(zb0002) {
+ (*z) = (*z)[:zb0002]
+ } else {
+ (*z) = make(Ranges, zb0002)
+ }
+ for zb0001 := range *z {
+ var field []byte
+ _ = field
+ var zb0003 uint32
+ zb0003, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ (*z)[zb0001].Start, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ case "end":
+ (*z)[zb0001].End, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z Ranges) EncodeMsg(en *msgp.Writer) (err error) {
+ err = en.WriteArrayHeader(uint32(len(z)))
+ if err != nil {
+ return
+ }
+ for zb0004 := range z {
+ // map header, size 2
+ // write "start"
+ err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z[zb0004].Start)
+ if err != nil {
+ return
+ }
+ // write "end"
+ err = en.Append(0xa3, 0x65, 0x6e, 0x64)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z[zb0004].End)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z Ranges) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendArrayHeader(o, uint32(len(z)))
+ for zb0004 := range z {
+ // map header, size 2
+ // string "start"
+ o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendInt64(o, z[zb0004].Start)
+ // string "end"
+ o = append(o, 0xa3, 0x65, 0x6e, 0x64)
+ o = msgp.AppendInt64(o, z[zb0004].End)
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *Ranges) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var zb0002 uint32
+ zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap((*z)) >= int(zb0002) {
+ (*z) = (*z)[:zb0002]
+ } else {
+ (*z) = make(Ranges, zb0002)
+ }
+ for zb0001 := range *z {
+ var field []byte
+ _ = field
+ var zb0003 uint32
+ zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ (*z)[zb0001].Start, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "end":
+ (*z)[zb0001].End, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z Ranges) Msgsize() (s int) {
+ s = msgp.ArrayHeaderSize + (len(z) * (11 + msgp.Int64Size + msgp.Int64Size))
+ return
+}
diff --git a/internal/proxy/ranges/byterange/range_gen_test.go b/internal/proxy/ranges/byterange/range_gen_test.go
new file mode 100644
index 000000000..3848f43d4
--- /dev/null
+++ b/internal/proxy/ranges/byterange/range_gen_test.go
@@ -0,0 +1,236 @@
+package byterange
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
+func TestMarshalUnmarshalRange(t *testing.T) {
+ v := Range{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func BenchmarkMarshalMsgRange(b *testing.B) {
+ v := Range{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgRange(b *testing.B) {
+ v := Range{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalRange(b *testing.B) {
+ v := Range{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestEncodeDecodeRange(t *testing.T) {
+ v := Range{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+
+ m := v.Msgsize()
+ if buf.Len() > m {
+ t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
+ }
+
+ vn := Range{}
+ err := msgp.Decode(&buf, &vn)
+ if err != nil {
+ t.Error(err)
+ }
+
+ buf.Reset()
+ msgp.Encode(&buf, &v)
+ err = msgp.NewReader(&buf).Skip()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkEncodeRange(b *testing.B) {
+ v := Range{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ en := msgp.NewWriter(msgp.Nowhere)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.EncodeMsg(en)
+ }
+ en.Flush()
+}
+
+func BenchmarkDecodeRange(b *testing.B) {
+ v := Range{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ rd := msgp.NewEndlessReader(buf.Bytes(), b)
+ dc := msgp.NewReader(rd)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := v.DecodeMsg(dc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalRanges(t *testing.T) {
+ v := Ranges{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func BenchmarkMarshalMsgRanges(b *testing.B) {
+ v := Ranges{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgRanges(b *testing.B) {
+ v := Ranges{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalRanges(b *testing.B) {
+ v := Ranges{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestEncodeDecodeRanges(t *testing.T) {
+ v := Ranges{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+
+ m := v.Msgsize()
+ if buf.Len() > m {
+ t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
+ }
+
+ vn := Ranges{}
+ err := msgp.Decode(&buf, &vn)
+ if err != nil {
+ t.Error(err)
+ }
+
+ buf.Reset()
+ msgp.Encode(&buf, &v)
+ err = msgp.NewReader(&buf).Skip()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkEncodeRanges(b *testing.B) {
+ v := Ranges{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ en := msgp.NewWriter(msgp.Nowhere)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.EncodeMsg(en)
+ }
+ en.Flush()
+}
+
+func BenchmarkDecodeRanges(b *testing.B) {
+ v := Ranges{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ rd := msgp.NewEndlessReader(buf.Bytes(), b)
+ dc := msgp.NewReader(rd)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := v.DecodeMsg(dc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/internal/proxy/ranges/byterange/range_test.go b/internal/proxy/ranges/byterange/range_test.go
new file mode 100644
index 000000000..efa757419
--- /dev/null
+++ b/internal/proxy/ranges/byterange/range_test.go
@@ -0,0 +1,348 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package byterange
+
+import (
+ "sort"
+ "strconv"
+ "testing"
+)
+
+func TestRanges_CalculateDelta(t *testing.T) {
+
+ tests := []struct {
+ want, have, expected Ranges
+ cl int64
+ }{
+ {
+ // case 0 where we need both outer permiters of the wanted range
+ want: Ranges{Range{Start: 5, End: 10}},
+ have: Ranges{Range{Start: 6, End: 9}},
+ expected: Ranges{Range{Start: 5, End: 5}, Range{Start: 10, End: 10}},
+ cl: 62,
+ },
+ {
+ // case 1 where the needed range is out of known bounds
+ want: Ranges{Range{Start: 100, End: 100}},
+ have: Ranges{Range{Start: 6, End: 9}},
+ expected: Ranges{Range{Start: 100, End: 100}},
+ cl: 62,
+ },
+ {
+ // case 2 where the needed range is identical to have range
+ want: Ranges{Range{Start: 6, End: 9}},
+ have: Ranges{Range{Start: 6, End: 9}},
+ expected: Ranges{},
+ cl: 62,
+ },
+ {
+ // case 3 where we want a suffix range ("bytes=-50")
+ want: Ranges{Range{Start: -1, End: 50}},
+ have: Ranges{Range{Start: 0, End: 30}},
+ expected: Ranges{Range{Start: 31, End: 69}},
+ cl: 70,
+ },
+ {
+ // case 4 where we want a prefix range ("bytes=50-")
+ want: Ranges{Range{Start: 30, End: -1}},
+ have: Ranges{Range{Start: 0, End: 40}},
+ expected: Ranges{Range{Start: 41, End: 69}},
+ cl: 70,
+ },
+ {
+ // case 5 where we have a few absolute ranges #1
+ want: Ranges{Range{Start: 0, End: 10}, Range{Start: 20, End: 29}},
+ have: Ranges{Range{Start: 0, End: 25}},
+ expected: Ranges{Range{Start: 26, End: 29}},
+ cl: 70,
+ },
+ {
+ // case 6 where we have a few absolute ranges #2
+ want: Ranges{Range{Start: 0, End: 10}, Range{Start: 20, End: 29}},
+ have: Ranges{Range{Start: 0, End: 6}, Range{Start: 17, End: 32}},
+ expected: Ranges{Range{Start: 7, End: 10}},
+ cl: 70,
+ },
+ {
+ // case 7 where we have a few absolute ranges #3
+ want: Ranges{Range{Start: 0, End: 10}, Range{Start: 20, End: 29}},
+ have: Ranges{Range{Start: 0, End: 6}, Range{Start: 25, End: 32}},
+ expected: Ranges{Range{Start: 7, End: 10}, Range{Start: 20, End: 24}},
+ cl: 70,
+ },
+ {
+ // case 8 where we have a few absolute ranges #4
+ want: Ranges{Range{Start: 0, End: 10}, Range{Start: 20, End: 29}},
+ have: Ranges{Range{Start: 0, End: 6}, Range{Start: 20, End: 27}},
+ expected: Ranges{Range{Start: 7, End: 10}, Range{Start: 28, End: 29}},
+ cl: 70,
+ },
+ {
+ // case 9 where we have all empty ranges
+ want: Ranges{},
+ have: Ranges{},
+ expected: Ranges{},
+ cl: 1,
+ },
+ {
+ // case 10 where we have no saved ranges
+ want: Ranges{Range{Start: 0, End: 10}, Range{Start: 20, End: 29}},
+ have: nil,
+ expected: Ranges{Range{Start: 0, End: 10}, Range{Start: 20, End: 29}},
+ cl: 1,
+ },
+ {
+ // case 11 partial hit between 2 ranges
+ want: Ranges{Range{Start: 5, End: 20}},
+ have: Ranges{Range{Start: 1, End: 9}},
+ expected: Ranges{Range{Start: 10, End: 20}},
+ cl: 21,
+ },
+ {
+ // case 12 full range miss
+ want: Ranges{Range{Start: 15, End: 20}},
+ have: Ranges{Range{Start: 1, End: 9}},
+ expected: Ranges{Range{Start: 15, End: 20}},
+ cl: 21,
+ },
+ {
+ // case 13 cache hit
+ want: Ranges{Range{Start: 29, End: 29}},
+ have: Ranges{Range{Start: 0, End: 10}, Range{Start: 20, End: 32}},
+ expected: Ranges{},
+ cl: 70,
+ },
+ // case 14 two separate partial hit areas in the same request
+ {
+ want: Ranges{Range{Start: 9, End: 22}, Range{Start: 28, End: 60}},
+ have: Ranges{Range{Start: 0, End: 10}, Range{Start: 20, End: 32}},
+ expected: Ranges{Range{Start: 11, End: 19}, Range{Start: 33, End: 60}},
+ cl: 70,
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ res := test.want.CalculateDelta(test.have, test.cl)
+ if !res.Equal(test.expected) {
+ t.Errorf("got : %s\nexpected: %s", res, test.expected)
+ }
+ })
+ }
+}
+
+func TestRangesString(t *testing.T) {
+
+ tests := []struct {
+ out, expected string
+ }{
+ {
+ out: Ranges{}.String(),
+ expected: "",
+ },
+ {
+ out: Ranges{Range{Start: 0, End: 50}}.String(),
+ expected: "bytes=0-50",
+ },
+ {
+ out: Ranges{Range{Start: -1, End: 50}}.String(),
+ expected: "bytes=-50",
+ },
+ {
+ out: Ranges{Range{Start: 50, End: -1}}.String(),
+ expected: "bytes=50-",
+ },
+ {
+ out: Ranges{Range{Start: 0, End: 20}, Range{Start: 50, End: -1}}.String(),
+ expected: "bytes=0-20, 50-",
+ },
+ }
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ if test.out != test.expected {
+ t.Errorf("expected: %s\ngot: %s", test.out, test.expected)
+ }
+ })
+ }
+
+}
+
+func TestParseContentRangeHeader(t *testing.T) {
+ er := Range{Start: 0, End: 20}
+ el := int64(100)
+ r, cl, err := ParseContentRangeHeader("bytes 0-20/100")
+ if err != nil {
+ t.Error(err)
+ }
+ if er != r {
+ t.Errorf("expected %s, got %s", er.String(), r.String())
+ }
+ if cl != el {
+ t.Errorf("expected %d, got %d", el, cl)
+ }
+
+ // trickster does not support caching raanges with * content lengths
+ er = Range{Start: 0, End: 20}
+ r, _, err = ParseContentRangeHeader("bytes 0-20/*")
+ if err == nil || err.Error() != "invalid input format" {
+ t.Errorf("expected error: %s", "invalid input format")
+ }
+
+ er = Range{}
+ el = -1
+ r, cl, err = ParseContentRangeHeader("bytes a-20/*")
+ if err == nil || err.Error() != "invalid input format" {
+ t.Errorf("expected error: %s", "invalid input format")
+ }
+ if er != r {
+ t.Errorf("expected %s, got %s", er.String(), r.String())
+ }
+ if cl != el {
+ t.Errorf("expected %d, got %d", el, cl)
+ }
+}
+
+func TestRangesEqual(t *testing.T) {
+
+ want := Ranges{Range{Start: 0, End: 20}}
+ if want.Equal(nil) {
+ t.Errorf("expected %t got %t", false, true)
+ }
+
+}
+
+func TestRangeSort(t *testing.T) {
+ r := Ranges{Range{Start: 10, End: 20}, Range{Start: 0, End: 8}}
+ sort.Sort(r)
+ if r[0].Start != 0 || r[1].End != 20 {
+ t.Errorf("sort failed on %s", r.String())
+ }
+}
+
+func TestRangeLess(t *testing.T) {
+ r1 := Range{Start: 10, End: 20}
+ r2 := Range{Start: 22, End: 30}
+ if !r1.Less(r2) {
+ t.Errorf("expected %t got %t", true, r1.Less(r2))
+ }
+}
+
+func TestContentRangeHeader(t *testing.T) {
+
+ const expected = "bytes 0-20/100"
+
+ r := Range{Start: 0, End: 20}
+ h := r.ContentRangeHeader(100)
+
+ if h != expected {
+ t.Errorf("expected %s got %s", expected, h)
+ }
+
+}
+
+func TestParseRangeHeader_EmptyString(t *testing.T) {
+ r := ParseRangeHeader("")
+ if r != nil {
+ t.Errorf("expected empty byte range")
+ }
+}
+
+func TestParseRangeHeader_InvalidRange(t *testing.T) {
+ r := ParseRangeHeader("bytes=abc-def")
+ if r != nil {
+ t.Errorf("expected empty byte range")
+ }
+ r = ParseRangeHeader("bytes0-100")
+ if r != nil {
+ t.Errorf("expected empty byte range")
+ }
+ r = ParseRangeHeader("0-100")
+ if r != nil {
+ t.Errorf("expected empty byte range")
+ }
+ r = ParseRangeHeader("100")
+ if r != nil {
+ t.Errorf("expected empty byte range")
+ }
+ r = ParseRangeHeader("-")
+ if r != nil {
+ t.Errorf("expected empty byte range")
+ }
+ r = ParseRangeHeader("bytes=20-30-40-50")
+ if r != nil {
+ t.Errorf("expected empty byte range")
+ }
+ r = ParseRangeHeader("bytes=20-blah")
+ if r != nil {
+ t.Errorf("expected empty byte range")
+ }
+}
+
+func TestParseRangeHeader_SingleRange(t *testing.T) {
+ byteRange := "bytes=0-50"
+ res := ParseRangeHeader(byteRange)
+ if res == nil {
+ t.Errorf("expected a non empty byte range, but got an empty range")
+ }
+ if res[0].Start != 0 || res[0].End != 50 {
+ t.Errorf("expected start %d end %d, got start %d end %d", 0, 50, res[0].Start, res[0].End)
+ }
+}
+
+func TestParseRangeHeader_Ends(t *testing.T) {
+ byteRange := "bytes=500-"
+ res := ParseRangeHeader(byteRange)
+ if res == nil {
+ t.Errorf("expected a non empty byte range, but got an empty range")
+ }
+ if res[0].Start != 500 || res[0].End != -1 {
+ t.Errorf("expected start %d end %d, got start %d end %d", 500, -1, res[0].Start, res[0].End)
+ }
+
+ byteRange = "bytes=10-20, 500-"
+ res = ParseRangeHeader(byteRange)
+ if res == nil {
+ t.Errorf("expected a non empty byte range, but got an empty range")
+ }
+ if res[0].Start != 10 || res[0].End != 20 {
+ t.Errorf("expected start %d end %d, got start %d end %d", 10, 20, res[0].Start, res[0].End)
+ }
+ if res[1].Start != 500 || res[1].End != -1 {
+ t.Errorf("expected start %d end %d, got start %d end %d", -1, 500, res[0].Start, res[0].End)
+ }
+
+ byteRange = "bytes=-500"
+ res = ParseRangeHeader(byteRange)
+ if res == nil {
+ t.Errorf("expected a non empty byte range, but got an empty range")
+ }
+ if res[0].Start != -1 || res[0].End != 500 {
+ t.Errorf("expected start %d end %d, got start %d end %d", 500, -1, res[0].Start, res[0].End)
+ }
+
+}
+
+func TestParseRangeHeader_MultiRange(t *testing.T) {
+ byteRange := "bytes=0-50, 100-150"
+ res := ParseRangeHeader(byteRange)
+ if res == nil {
+ t.Errorf("expected a non empty byte range, but got an empty range")
+ }
+ if res[0].Start != 0 || res[0].End != 50 {
+ t.Errorf("expected start %d end %d, got start %d end %d", 0, 50, res[0].Start, res[0].End)
+ }
+ if res[1].Start != 100 || res[1].End != 150 {
+ t.Errorf("expected start %d end %d, got start %d end %d", 100, 150, res[1].Start, res[1].End)
+ }
+}
diff --git a/internal/proxy/request/resources.go b/internal/proxy/request/resources.go
new file mode 100644
index 000000000..9e2b9407b
--- /dev/null
+++ b/internal/proxy/request/resources.go
@@ -0,0 +1,81 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package request
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/context"
+ "github.com/Comcast/trickster/internal/proxy/origins"
+ "github.com/Comcast/trickster/internal/timeseries"
+)
+
+// Resources is a collection of resources a Trickster request would need to fulfill the client request
+// This is stored in the client request's context for use by request handers.
+type Resources struct {
+ OriginConfig *config.OriginConfig
+ PathConfig *config.PathConfig
+ CacheConfig *config.CachingConfig
+ NoLock bool
+ CacheClient cache.Cache
+ OriginClient origins.Client
+ AlternateCacheTTL time.Duration
+ TimeRangeQuery *timeseries.TimeRangeQuery
+}
+
+// Clone returns an exact copy of the subject Resources collection
+func (r Resources) Clone() *Resources {
+ return &Resources{
+ OriginConfig: r.OriginConfig,
+ PathConfig: r.PathConfig,
+ CacheConfig: r.CacheConfig,
+ NoLock: r.NoLock,
+ CacheClient: r.CacheClient,
+ OriginClient: r.OriginClient,
+ AlternateCacheTTL: r.AlternateCacheTTL,
+ TimeRangeQuery: r.TimeRangeQuery,
+ }
+}
+
+// NewResources returns a new Resources collection based on the provided inputs
+func NewResources(oc *config.OriginConfig, pc *config.PathConfig, cc *config.CachingConfig, c cache.Cache, client origins.Client) *Resources {
+ return &Resources{
+ OriginConfig: oc,
+ PathConfig: pc,
+ CacheConfig: cc,
+ CacheClient: c,
+ OriginClient: client,
+ }
+}
+
+// GetResources will return a casted Resource object from the HTTP Request's context
+func GetResources(r *http.Request) *Resources {
+ v := context.Resources(r.Context())
+ rsc, ok := v.(*Resources)
+ if ok {
+ return rsc
+ }
+ return nil
+}
+
+// SetResources will save the Resources collection to the HTTP Request's context
+func SetResources(r *http.Request, rsc *Resources) *http.Request {
+ if rsc == nil {
+ return r
+ }
+ return r.WithContext(context.WithResources(r.Context(), rsc))
+}
diff --git a/internal/proxy/request/resources_test.go b/internal/proxy/request/resources_test.go
new file mode 100644
index 000000000..60fd5b376
--- /dev/null
+++ b/internal/proxy/request/resources_test.go
@@ -0,0 +1,59 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package request
+
+import (
+ "context"
+ "net/http"
+ "testing"
+ "time"
+
+ tc "github.com/Comcast/trickster/internal/proxy/context"
+)
+
+func TestNewAndCloneResources(t *testing.T) {
+ r := NewResources(nil, nil, nil, nil, nil)
+ r.AlternateCacheTTL = time.Duration(1) * time.Second
+ r2 := r.Clone()
+ if r2.AlternateCacheTTL != r.AlternateCacheTTL {
+ t.Errorf("expected %s got %s", r.AlternateCacheTTL.String(), r2.AlternateCacheTTL.String())
+ }
+}
+
+func TestGetAndSetResources(t *testing.T) {
+ r := NewResources(nil, nil, nil, nil, nil)
+ r.AlternateCacheTTL = time.Duration(1) * time.Second
+ req, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/", nil)
+ ctx := context.Background()
+ // test nil short circuit bail out
+ req = SetResources(req.WithContext(ctx), nil)
+ req = SetResources(req.WithContext(ctx), r)
+ r2 := GetResources(req)
+ if r2.AlternateCacheTTL != r.AlternateCacheTTL {
+ t.Errorf("expected %s got %s", r.AlternateCacheTTL.String(), r2.AlternateCacheTTL.String())
+ }
+
+ req, _ = http.NewRequest(http.MethodGet, "http://127.0.0.1/", nil)
+ ctx = context.Background()
+ req = req.WithContext(ctx)
+
+ // set something other than a resource into the context to verify a get returns nil
+ req = req.WithContext(tc.WithResources(req.Context(), req))
+
+ r3 := GetResources(req)
+ if r3 != nil {
+ t.Errorf("expected nil result, got %v", r3)
+ }
+
+}
diff --git a/internal/proxy/timeconv/timeconv.go b/internal/proxy/timeconv/timeconv.go
new file mode 100644
index 000000000..63fee26f3
--- /dev/null
+++ b/internal/proxy/timeconv/timeconv.go
@@ -0,0 +1,70 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package timeconv
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/Comcast/trickster/internal/proxy/errors"
+)
+
+// ParseDuration returns a duration from a string. Slightly improved over the builtin, since it supports units larger than hour.
+func ParseDuration(input string) (time.Duration, error) {
+ for i := range input {
+ if input[i] > 47 && input[i] < 58 {
+ continue
+ }
+ if input[i] == 46 {
+ break
+ }
+ if i > 0 {
+ units, ok := UnitMap[input[i:]]
+ if !ok {
+ return errors.ParseDuration(input)
+ }
+ v, err := strconv.ParseInt(input[0:i], 10, 64)
+ if err != nil {
+ return errors.ParseDuration(input)
+ }
+ v = v * units
+ return time.Duration(v), nil
+ }
+ }
+ return errors.ParseDuration(input)
+}
+
+// ParseDurationParts returns a time.Duration from a value and unit
+func ParseDurationParts(value int64, units string) (time.Duration, error) {
+ if _, ok := UnitMap[units]; !ok {
+ return errors.ParseDuration(fmt.Sprintf("%d%s", value, units))
+ }
+ return time.Duration(value * UnitMap[units]), nil
+}
+
+// UnitMap provides a map of common time unit indicators to nanoseconds of duration per unit
+var UnitMap = map[string]int64{
+ "ns": int64(time.Nanosecond),
+ "us": int64(time.Microsecond),
+ "µs": int64(time.Microsecond), // U+00B5 = micro symbol
+ "μs": int64(time.Microsecond), // U+03BC = Greek letter mu
+ "ms": int64(time.Millisecond),
+ "s": int64(time.Second),
+ "m": int64(time.Minute),
+ "h": int64(time.Hour),
+ "d": int64(24 * time.Hour),
+ "w": int64(24 * 7 * time.Hour),
+ "y": int64(24 * 365 * time.Hour),
+}
diff --git a/internal/proxy/timeconv/timeconv_test.go b/internal/proxy/timeconv/timeconv_test.go
new file mode 100644
index 000000000..5fe1b8f0b
--- /dev/null
+++ b/internal/proxy/timeconv/timeconv_test.go
@@ -0,0 +1,64 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package timeconv
+
+import (
+ "testing"
+ "time"
+)
+
+func TestParseDuration(t *testing.T) {
+ expected := time.Duration(1) * time.Hour
+ d, err := ParseDuration("1h")
+ if err != nil {
+ t.Error(err)
+ }
+ if d != expected {
+ t.Errorf("expected %d got %d", expected, d)
+ }
+}
+
+func TestParseDurationDecimalFailed(t *testing.T) {
+ val := "1.2341"
+ _, err := ParseDuration(val)
+ if err == nil {
+ t.Errorf("expected 'unable to parse duration: %s' error", val)
+ }
+}
+
+func TestParseDurationFailed(t *testing.T) {
+ val := "1x"
+ _, err := ParseDuration(val)
+ if err == nil {
+ t.Errorf("expected 'unable to parse duration: %s' error", val)
+ }
+}
+
+func TestParseDurationParts(t *testing.T) {
+ expected := time.Duration(1) * time.Hour
+ d, err := ParseDurationParts(1, "h")
+ if err != nil {
+ t.Error(err)
+ }
+ if d != expected {
+ t.Errorf("expected %d got %d", expected, d)
+ }
+}
+
+func TestParseDurationPartsFailed(t *testing.T) {
+ _, err := ParseDurationParts(1, "x")
+ if err == nil {
+ t.Errorf("expected 'unable to parse duration 1x' error")
+ }
+}
diff --git a/internal/proxy/urls/url.go b/internal/proxy/urls/url.go
new file mode 100644
index 000000000..7617a5579
--- /dev/null
+++ b/internal/proxy/urls/url.go
@@ -0,0 +1,38 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package urls
+
+import "net/url"
+
+// Clone returns a deep copy of a *url.URL
+func Clone(u *url.URL) *url.URL {
+ u2 := &url.URL{
+ Scheme: u.Scheme,
+ Host: u.Host,
+ Path: u.Path,
+ RawQuery: u.RawQuery,
+ Fragment: u.Fragment,
+ }
+ if u.User != nil {
+ var user *url.Userinfo
+ if p, ok := u.User.Password(); ok {
+ user = url.UserPassword(u.User.Username(), p)
+ } else {
+ user = url.User(u.User.Username())
+ }
+ u2.User = user
+ }
+
+ return u2
+}
diff --git a/internal/proxy/urls/url_test.go b/internal/proxy/urls/url_test.go
new file mode 100644
index 000000000..da7915dd0
--- /dev/null
+++ b/internal/proxy/urls/url_test.go
@@ -0,0 +1,35 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package urls
+
+import (
+ "net/url"
+ "testing"
+)
+
+func TestClone(t *testing.T) {
+
+ u1, _ := url.Parse("http://user:pass@127.0.0.1:8080/path?param1=param2")
+ u2 := Clone(u1)
+ if u2.Hostname() != "127.0.0.1" {
+ t.Errorf("expected %s got %s", "127.0.0.1", u2.Hostname())
+ }
+
+ u1, _ = url.Parse("http://user@127.0.0.1:8080/path?param1=param2")
+ u2 = Clone(u1)
+ if u2.Hostname() != "127.0.0.1" {
+ t.Errorf("expected %s got %s", "127.0.0.1", u2.Hostname())
+ }
+
+}
diff --git a/internal/routing/registration/registration.go b/internal/routing/registration/registration.go
new file mode 100644
index 000000000..b75dc07f0
--- /dev/null
+++ b/internal/routing/registration/registration.go
@@ -0,0 +1,264 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package registration
+
+import (
+ "fmt"
+ "net/http"
+ "sort"
+ "strings"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/methods"
+ "github.com/Comcast/trickster/internal/proxy/origins"
+ "github.com/Comcast/trickster/internal/proxy/origins/clickhouse"
+ "github.com/Comcast/trickster/internal/proxy/origins/influxdb"
+ "github.com/Comcast/trickster/internal/proxy/origins/irondb"
+ "github.com/Comcast/trickster/internal/proxy/origins/prometheus"
+ "github.com/Comcast/trickster/internal/proxy/origins/reverseproxycache"
+ "github.com/Comcast/trickster/internal/routing"
+ "github.com/Comcast/trickster/internal/util/log"
+ "github.com/Comcast/trickster/internal/util/middleware"
+)
+
+// ProxyClients maintains a list of proxy clients configured for use by Trickster
+var ProxyClients = make(map[string]origins.Client)
+
+// RegisterProxyRoutes iterates the Trickster Configuration and registers the routes for the configured origins
+func RegisterProxyRoutes() error {
+
+ defaultOrigin := ""
+ var ndo *config.OriginConfig // points to the origin config named "default"
+ var cdo *config.OriginConfig // points to the origin config with IsDefault set to true
+
+ // This iteration will ensure default origins are handled properly
+ for k, o := range config.Origins {
+
+ if !config.IsValidOriginType(o.OriginType) {
+ return fmt.Errorf(`unknown origin type in origin config. originName: %s, originType: %s`, k, o.OriginType)
+ }
+
+ // Ensure only one default origin exists
+ if o.IsDefault {
+ if cdo != nil {
+ return fmt.Errorf("only one origin can be marked as default. Found both %s and %s", defaultOrigin, k)
+ }
+ log.Debug("default origin identified", log.Pairs{"name": k})
+ defaultOrigin = k
+ cdo = o
+ continue
+ }
+
+ // handle origin named "default" last as it needs special handling based on a full pass over the range
+ if k == "default" {
+ ndo = o
+ continue
+ }
+
+ err := registerOriginRoutes(k, o)
+ if err != nil {
+ return err
+ }
+ }
+
+ if ndo != nil {
+ if cdo == nil {
+ ndo.IsDefault = true
+ cdo = ndo
+ defaultOrigin = "default"
+ } else {
+ err := registerOriginRoutes("default", ndo)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if cdo != nil {
+ return registerOriginRoutes(defaultOrigin, cdo)
+ }
+
+ return nil
+}
+
+func registerOriginRoutes(k string, o *config.OriginConfig) error {
+
+ var client origins.Client
+ var c cache.Cache
+ var err error
+
+ c, err = registration.GetCache(o.CacheName)
+ if err != nil {
+ return err
+ }
+
+ log.Info("registering route paths", log.Pairs{"originName": k, "originType": o.OriginType, "upstreamHost": o.Host})
+
+ switch strings.ToLower(o.OriginType) {
+ case "prometheus", "":
+ client, err = prometheus.NewClient(k, o, c)
+ case "influxdb":
+ client, err = influxdb.NewClient(k, o, c)
+ case "irondb":
+ client, err = irondb.NewClient(k, o, c)
+ case "clickhouse":
+ client, err = clickhouse.NewClient(k, o, c)
+ case "rpc", "reverseproxycache":
+ client, err = reverseproxycache.NewClient(k, o, c)
+ }
+ if err != nil {
+ return err
+ }
+ if client != nil {
+ o.HTTPClient = client.HTTPClient()
+ ProxyClients[k] = client
+ defaultPaths := client.DefaultPathConfigs(o)
+ registerPathRoutes(client.Handlers(), client, o, c, defaultPaths)
+ }
+ return nil
+}
+
+// registerPathRoutes will take the provided default paths map,
+// merge it with any path data in the provided originconfig, and then register
+// the path routes to the appropriate handler from the provided handlers map
+func registerPathRoutes(handlers map[string]http.Handler, client origins.Client, o *config.OriginConfig, c cache.Cache,
+ paths map[string]*config.PathConfig) {
+
+ decorate := func(p *config.PathConfig) http.Handler {
+ // Add Origin, Cache, and Path Configs to the HTTP Request's context
+ p.Handler = middleware.WithResourcesContext(client, o, c, p, p.Handler)
+ if p.NoMetrics {
+ return p.Handler
+ }
+ return middleware.Decorate(o.Name, o.OriginType, p.Path, p.Handler)
+ }
+
+ pathsWithVerbs := make(map[string]*config.PathConfig)
+ for _, p := range paths {
+ if len(p.Methods) == 0 {
+ p.Methods = methods.CacheableHTTPMethods()
+ }
+ pathsWithVerbs[p.Path+"-"+strings.Join(p.Methods, "-")] = p
+ }
+
+ for k, p := range o.Paths {
+ p.OriginConfig = o
+ if p2, ok := pathsWithVerbs[k]; ok {
+ p2.Merge(p)
+ continue
+ }
+ p3 := config.NewPathConfig()
+ p3.Merge(p)
+ pathsWithVerbs[k] = p3
+ }
+
+ if h, ok := handlers["health"]; ok &&
+ o.HealthCheckUpstreamPath != "" && o.HealthCheckVerb != "" {
+ hp := "/trickster/health/" + o.Name
+ log.Debug("registering health handler path", log.Pairs{"path": hp, "originName": o.Name, "upstreamPath": o.HealthCheckUpstreamPath, "upstreamVerb": o.HealthCheckVerb})
+ routing.Router.PathPrefix(hp).Handler(middleware.WithResourcesContext(client, o, nil, nil, h)).Methods(methods.CacheableHTTPMethods()...)
+ }
+
+ plist := make([]string, 0, len(pathsWithVerbs))
+ deletes := make([]string, 0, len(pathsWithVerbs))
+ for k, p := range pathsWithVerbs {
+ if h, ok := handlers[p.HandlerName]; ok && h != nil {
+ p.Handler = h
+ plist = append(plist, k)
+ } else {
+ log.Info("invalid handler name for path", log.Pairs{"path": p.Path, "handlerName": p.HandlerName})
+ deletes = append(deletes, p.Path)
+ }
+ }
+ for _, p := range deletes {
+ delete(pathsWithVerbs, p)
+ }
+
+ sort.Sort(ByLen(plist))
+ for i := len(plist)/2 - 1; i >= 0; i-- {
+ opp := len(plist) - 1 - i
+ plist[i], plist[opp] = plist[opp], plist[i]
+ }
+
+ for _, v := range plist {
+ p, ok := pathsWithVerbs[v]
+ if !ok {
+ continue
+ }
+ log.Debug("registering origin handler path",
+ log.Pairs{"originName": o.Name, "path": v, "handlerName": p.HandlerName,
+ "originHost": o.Host, "handledPath": "/" + o.Name + p.Path, "matchType": p.MatchType})
+ if p.Handler != nil && len(p.Methods) > 0 {
+
+ if p.Methods[0] == "*" {
+ p.Methods = methods.AllHTTPMethods()
+ }
+
+ switch p.MatchType {
+ case config.PathMatchTypePrefix:
+ // Case where we path match by prefix
+ // Host Header Routing
+ routing.Router.PathPrefix(p.Path).Handler(decorate(p)).Methods(p.Methods...).Host(o.Name)
+ // Path Routing
+ routing.Router.PathPrefix("/" + o.Name + p.Path).Handler(decorate(p)).Methods(p.Methods...)
+ default:
+ // default to exact match
+ // Host Header Routing
+ routing.Router.Handle(p.Path, decorate(p)).Methods(p.Methods...).Host(o.Name)
+ // Path Routing
+ routing.Router.Handle("/"+o.Name+p.Path, decorate(p)).Methods(p.Methods...)
+ }
+ }
+ }
+
+ if o.IsDefault {
+ log.Info("registering default origin handler paths", log.Pairs{"originName": o.Name})
+ for _, v := range plist {
+ p, ok := pathsWithVerbs[v]
+ if !ok {
+ continue
+ }
+ if p.Handler != nil && len(p.Methods) > 0 {
+ log.Debug("registering default origin handler paths", log.Pairs{"originName": o.Name, "path": p.Path, "handlerName": p.HandlerName, "matchType": p.MatchType})
+ switch p.MatchType {
+ case config.PathMatchTypePrefix:
+ // Case where we path match by prefix
+ routing.Router.PathPrefix(p.Path).Handler(decorate(p)).Methods(p.Methods...)
+ default:
+ // default to exact match
+ routing.Router.Handle(p.Path, decorate(p)).Methods(p.Methods...)
+ }
+ routing.Router.Handle(p.Path, decorate(p)).Methods(p.Methods...)
+ }
+ }
+ }
+ o.Paths = pathsWithVerbs
+}
+
+// ByLen allows sorting of a string slice by string length
+type ByLen []string
+
+func (a ByLen) Len() int {
+ return len(a)
+}
+
+func (a ByLen) Less(i, j int) bool {
+ return len(a[i]) < len(a[j])
+}
+
+func (a ByLen) Swap(i, j int) {
+ a[i], a[j] = a[j], a[i]
+}
diff --git a/internal/routing/registration/registration_test.go b/internal/routing/registration/registration_test.go
new file mode 100644
index 000000000..0847d24aa
--- /dev/null
+++ b/internal/routing/registration/registration_test.go
@@ -0,0 +1,239 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package registration
+
+import (
+ "testing"
+
+ "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+func init() {
+ metrics.Init()
+}
+
+func TestRegisterProxyRoutes(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-log-level", "debug", "-origin-url", "http://1", "-origin-type", "prometheus"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+ registration.LoadCachesFromConfig()
+ RegisterProxyRoutes()
+
+ if len(ProxyClients) == 0 {
+ t.Errorf("expected %d got %d", 1, 0)
+ }
+
+ config.Origins["default"] = config.NewOriginConfig()
+
+ // Test Too Many Defaults
+ o1 := config.Origins["default"]
+ o2 := config.NewOriginConfig()
+
+ o1.IsDefault = true
+ o2.IsDefault = true
+
+ o1.OriginType = "rpc"
+ o2.OriginType = "rpc"
+
+ config.Origins["2"] = o2
+
+ err = RegisterProxyRoutes()
+ if err == nil {
+ t.Errorf("Expected error for too many default origins.%s", "")
+ }
+
+ o1.IsDefault = false
+ err = RegisterProxyRoutes()
+ if err != nil {
+ t.Error(err)
+ }
+
+ o2.IsDefault = false
+ o2.CacheName = "invalid"
+ err = RegisterProxyRoutes()
+ if err == nil {
+ t.Errorf("Expected error for invalid cache name%s", "")
+ }
+
+ o2.CacheName = "default"
+ err = RegisterProxyRoutes()
+ if err != nil {
+ t.Error(err)
+ }
+
+ // teset the condition where no origins are IsDefault true,
+ // and no origins are named default
+
+ o1.IsDefault = false
+ o2.IsDefault = false
+ config.Origins["1"] = o1
+ delete(config.Origins, "default")
+
+ err = RegisterProxyRoutes()
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestRegisterProxyRoutesInflux(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-log-level", "debug", "-origin-url", "http://1", "-origin-type", "influxdb"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ registration.LoadCachesFromConfig()
+ err = RegisterProxyRoutes()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(ProxyClients) == 0 {
+ t.Errorf("expected %d got %d", 1, 0)
+ }
+
+}
+
+func TestRegisterProxyRoutesClickHouse(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-log-level", "debug", "-origin-url", "http://1", "-origin-type", "clickhouse"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ registration.LoadCachesFromConfig()
+ err = RegisterProxyRoutes()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(ProxyClients) == 0 {
+ t.Errorf("expected %d got %d", 1, 0)
+ }
+
+}
+
+func TestRegisterProxyRoutesIRONdb(t *testing.T) {
+
+ err := config.Load("trickster", "test", []string{"-origin-url", "http://example.com", "-origin-type", "irondb", "-log-level", "debug"})
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ registration.LoadCachesFromConfig()
+ err = RegisterProxyRoutes()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(ProxyClients) == 0 {
+ t.Errorf("expected %d got %d", 1, 0)
+ }
+}
+
+func TestRegisterProxyRoutesMultipleDefaults(t *testing.T) {
+ expected1 := "only one origin can be marked as default. Found both test and test2"
+ expected2 := "only one origin can be marked as default. Found both test2 and test"
+
+ a := []string{"-config", "../../../testdata/test.too_many_defaults.conf"}
+ err := config.Load("trickster", "test", a)
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+ registration.LoadCachesFromConfig()
+ err = RegisterProxyRoutes()
+ if err == nil {
+ t.Errorf("expected error `%s` got nothing", expected1)
+ } else if err.Error() != expected1 && err.Error() != expected2 {
+ t.Errorf("expected error `%s` got `%s`", expected1, err.Error())
+ }
+}
+
+func TestRegisterProxyRoutesInvalidCert(t *testing.T) {
+ expected := "tls: failed to find any PEM data in certificate input"
+ a := []string{"-config", "../../../testdata/test.bad_tls_cert.conf"}
+ err := config.Load("trickster", "test", a)
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+ registration.LoadCachesFromConfig()
+ err = RegisterProxyRoutes()
+ if err == nil {
+ t.Errorf("expected error: %s", expected)
+ }
+ if err != nil && err.Error() != expected {
+ t.Errorf("expected error: %s, got: %s", expected, err.Error())
+ }
+}
+
+func TestRegisterProxyRoutesBadCacheName(t *testing.T) {
+ expected := "invalid cache name [test2] provided in origin config [test]"
+ a := []string{"-config", "../../../testdata/test.bad_cache_name.conf"}
+ err := config.Load("trickster", "test", a)
+ if err == nil {
+ t.Errorf("expected error `%s` got nothing", expected)
+ } else if err.Error() != expected {
+ t.Errorf("expected error `%s` got `%s`", expected, err.Error())
+ }
+}
+
+func TestRegisterProxyRoutesBadOriginType(t *testing.T) {
+ expected := "unknown origin type in origin config. originName: test, originType: foo"
+ a := []string{"-config", "../../../testdata/test.unknown_origin_type.conf"}
+ err := config.Load("trickster", "test", a)
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+ registration.LoadCachesFromConfig()
+ err = RegisterProxyRoutes()
+ if err == nil {
+ t.Errorf("expected error `%s` got nothing", expected)
+ } else if err.Error() != expected {
+ t.Errorf("expected error `%s` got `%s`", expected, err.Error())
+ }
+}
+
+func TestRegisterMultipleOrigins(t *testing.T) {
+ a := []string{"-config", "../../../testdata/test.multiple_origins.conf"}
+ err := config.Load("trickster", "test", a)
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+ registration.LoadCachesFromConfig()
+ err = RegisterProxyRoutes()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestRegisterMultipleOriginsPlusDefault(t *testing.T) {
+ a := []string{"-config", "../../../testdata/test.multiple_origins_plus_default.conf"}
+ err := config.Load("trickster", "test", a)
+ if err != nil {
+ t.Errorf("Could not load configuration: %s", err.Error())
+ }
+ registration.LoadCachesFromConfig()
+ err = RegisterProxyRoutes()
+ if err != nil {
+ t.Error(err)
+ }
+ if !config.Origins["default"].IsDefault {
+ t.Errorf("expected origin %s.IsDefault to be true", "default")
+ }
+}
diff --git a/internal/routing/routing.go b/internal/routing/routing.go
new file mode 100644
index 000000000..429b27a25
--- /dev/null
+++ b/internal/routing/routing.go
@@ -0,0 +1,24 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package routing
+
+import (
+ "github.com/gorilla/mux"
+)
+
+// Router is the HTTP Routing Object
+var Router = mux.NewRouter()
+
+// TLSRouter is the HTTPS Routing Object
+var TLSRouter = mux.NewRouter()
diff --git a/internal/runtime/runtime.go b/internal/runtime/runtime.go
new file mode 100644
index 000000000..ca43026d2
--- /dev/null
+++ b/internal/runtime/runtime.go
@@ -0,0 +1,20 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package runtime
+
+// ApplicationName is the name of the Application
+var ApplicationName string
+
+// ApplicationVersion holds the version of the Application
+var ApplicationVersion string
diff --git a/internal/timeseries/extent.go b/internal/timeseries/extent.go
new file mode 100644
index 000000000..74ababbb0
--- /dev/null
+++ b/internal/timeseries/extent.go
@@ -0,0 +1,51 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package timeseries
+
+import (
+ "fmt"
+ "time"
+)
+
+// Extent describes the start and end times for a given range of data
+type Extent struct {
+ Start time.Time `json:"start"`
+ End time.Time `json:"end"`
+ LastUsed time.Time `json:"-"`
+}
+
+// Includes returns true if the Extent includes the provided Time
+func (e *Extent) Includes(t time.Time) bool {
+ return !t.Before(e.Start) && !t.After(e.End)
+}
+
+// StartsAt returns true if the t is equal to the Extent's start time
+func (e *Extent) StartsAt(t time.Time) bool {
+ return t.Equal(e.Start)
+}
+
+// EndsAt returns true if the t is equal to the Extent's end time
+func (e *Extent) EndsAt(t time.Time) bool {
+ return t.Equal(e.End)
+}
+
+// After returns true if the range of the Extent is completely after the provided time
+func (e *Extent) After(t time.Time) bool {
+ return t.Before(e.Start)
+}
+
+// After returns true if the range of the Extent is completely after the provided time
+func (e Extent) String() string {
+ return fmt.Sprintf("%d-%d", e.Start.Unix(), e.End.Unix())
+}
diff --git a/internal/timeseries/extent_list.go b/internal/timeseries/extent_list.go
new file mode 100644
index 000000000..0670f39c4
--- /dev/null
+++ b/internal/timeseries/extent_list.go
@@ -0,0 +1,265 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package timeseries
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+)
+
+// ExtentList is a type of []Extent used for sorting the slice
+type ExtentList []Extent
+
+// String returns a string representation of the extentlist
+// in the format startEpochSec1-endEpochSec1;startEpochSec2-endEpochSec2
+func (el ExtentList) String() string {
+ if len(el) == 0 {
+ return ""
+ }
+ lines := make([]string, 0, len(el))
+ for _, e := range el {
+ lines = append(lines, fmt.Sprintf("%d-%d", e.Start.Unix(), e.End.Unix()))
+ }
+ return strings.Join(lines, ";")
+}
+
+// InsideOf ...
+func (el ExtentList) InsideOf(e Extent) bool {
+ x := len(el)
+ if x == 0 {
+ return false
+ }
+
+ return ((!el[0].Start.Before(e.Start)) &&
+ (!el[0].Start.After(e.End)) &&
+ (!el[x-1].End.Before(e.Start)) &&
+ (!el[x-1].End.After(e.End)))
+
+}
+
+// OutsideOf ...
+func (el ExtentList) OutsideOf(e Extent) bool {
+ x := len(el)
+ if x == 0 {
+ return true
+ }
+ return e.After(el[x-1].End) || el[0].After(e.End)
+}
+
+// Crop ...
+func (el ExtentList) Crop(e Extent) ExtentList {
+ var startIndex = -1
+ var endIndex = -1
+ for i, f := range el {
+ if startIndex == -1 {
+ if f.Includes(e.Start) {
+ if !f.StartsAt(e.Start) {
+ el[i].Start = e.Start
+ }
+ startIndex = i
+ } else if f.After(e.Start) && !f.After(e.End) {
+ startIndex = i
+ } else if f.After(e.Start) && f.After(e.End) {
+ return make(ExtentList, 0)
+ }
+ }
+ if endIndex == -1 {
+ if f.Includes(e.End) {
+ if !f.EndsAt(e.End) {
+ el[i].End = e.End
+ }
+ endIndex = i
+ }
+ }
+ }
+ if startIndex != -1 {
+ if endIndex == -1 {
+ endIndex = len(el) - 1
+ }
+ endIndex++
+ if endIndex >= startIndex {
+ return el[startIndex:endIndex]
+ }
+ }
+ return make(ExtentList, 0)
+}
+
+// Compress sorts an ExtentList and merges time-adjacent Extents so that the total extent of
+// data is accurately represented in as few Extents as possible
+func (el ExtentList) Compress(step time.Duration) ExtentList {
+ exc := ExtentList(el).Clone()
+ if len(el) == 0 {
+ return exc
+ }
+ l := len(el)
+ compressed := make(ExtentList, 0, l)
+ sort.Sort(exc)
+ e := Extent{}
+ extr := Extent{}
+ for i := range exc {
+ e.LastUsed = exc[i].LastUsed
+ if e.Start.IsZero() && !exc[i].Start.IsZero() {
+ e.Start = exc[i].Start
+ if extr.Start.IsZero() {
+ extr.Start = e.Start
+ }
+ }
+ if exc[i].End.Before(extr.End) {
+ continue
+ }
+ if i+1 < l && ((exc[i].End.Add(step).Equal(exc[i+1].Start) ||
+ exc[i].End.Equal(exc[i+1].Start)) && exc[i].LastUsed.Equal(exc[i+1].LastUsed) ||
+ exc[i].End.Equal(exc[i+1].End) && exc[i].Start.Equal(exc[i+1].Start)) {
+ continue
+ }
+ e.End = exc[i].End
+ if e.End.After(extr.End) {
+ extr.End = e.End
+ }
+ compressed = append(compressed, e)
+ e = Extent{}
+ }
+ return compressed
+}
+
+// Len returns the length of a slice of type ExtentList
+func (el ExtentList) Len() int {
+ return len(el)
+}
+
+// Less returns true if element i in the ExtentList comes before j
+func (el ExtentList) Less(i, j int) bool {
+ return el[i].Start.Before(el[j].Start)
+}
+
+// Swap modifies an ExtentList by swapping the values in indexes i and j
+func (el ExtentList) Swap(i, j int) {
+ el[i], el[j] = el[j], el[i]
+}
+
+// Clone returns a true copy of the ExtentList
+func (el ExtentList) Clone() ExtentList {
+ c := make(ExtentList, len(el))
+ for i := range el {
+ c[i].Start = el[i].Start
+ c[i].End = el[i].End
+ c[i].LastUsed = el[i].LastUsed
+ }
+ return c
+}
+
+// ExtentListLRU is a type of []Extent used for sorting the slice by LRU
+type ExtentListLRU []Extent
+
+// Len returns the length of an slice of type ExtentListLRU
+func (el ExtentListLRU) Len() int {
+ return len(el)
+}
+
+// Less returns true if element i in the ExtentListLRU comes before j
+func (el ExtentListLRU) Less(i, j int) bool {
+ return el[i].LastUsed.Before(el[j].LastUsed)
+}
+
+// Swap modifies an ExtentListLRU by swapping the values in indexes i and j
+func (el ExtentListLRU) Swap(i, j int) {
+ el[i], el[j] = el[j], el[i]
+}
+
+// Clone returns a true copy of the ExtentListLRU
+func (el ExtentListLRU) Clone() ExtentListLRU {
+ c := make(ExtentListLRU, len(el))
+ for i := range el {
+ c[i].Start = el[i].Start
+ c[i].End = el[i].End
+ c[i].LastUsed = el[i].LastUsed
+ }
+ return c
+}
+
+func (el ExtentListLRU) String() string {
+ if len(el) == 0 {
+ return ""
+ }
+ lines := make([]string, 0, len(el))
+ for _, e := range el {
+ lines = append(lines, fmt.Sprintf("%d-%d:%d", e.Start.Unix(), e.End.Unix(), e.LastUsed.Unix()))
+ }
+ return strings.Join(lines, ";")
+}
+
+// UpdateLastUsed updates the ExtentListLRU's LastUsed field for the provided extent.
+// The step is required in order to properly split extents.
+func (el ExtentListLRU) UpdateLastUsed(lur Extent, step time.Duration) ExtentListLRU {
+
+ if el == nil {
+ return nil
+ }
+
+ if len(el) == 0 {
+ return ExtentListLRU{}
+ }
+
+ now := time.Now().Truncate(time.Second)
+ el2 := make(ExtentList, 0, len(el))
+
+ for _, x := range el {
+
+ // This case captures when extent x is sandwiched between the
+ // extents in the list containing lur.Start and lur.End
+ // So we'll mark its Last Used and move on without splitting.
+ if !lur.Start.After(x.Start) && !lur.End.Before(x.End) {
+ x.LastUsed = now
+ el2 = append(el2, x)
+ continue
+ }
+
+ // The LastUsed extent is before or after this entire extent
+ // so we don't do anything
+ if x.Start.After(lur.End) || x.End.Before(lur.Start) {
+ el2 = append(el2, x)
+ continue
+ }
+
+ // The Last Used Range starts in this extent, but not on the starting edge
+ // So we'll break it up into two pieces on that start point
+ if lur.Start.After(x.Start) && !lur.Start.After(x.End) {
+ // v will serve as the left portion of x that we broke off
+ // it is outside of the Last Used Range so LU is untouched
+ v := Extent{Start: x.Start, End: lur.Start.Add(-step), LastUsed: x.LastUsed}
+ x.Start = lur.Start
+ el2 = append(el2, v)
+
+ // The right portion may be fully enclosed by the LUR, if so
+ // go ahead an mark the usage time, append to our new ExtentList and move on
+ if !lur.End.Before(x.End) {
+ x.LastUsed = now
+ el2 = append(el2, x)
+ continue
+ }
+ }
+
+ // If we got here, the LUR covers a left portion of this extent, break it up and append
+ if lur.End.Before(x.End) && !lur.End.Before(x.Start) {
+ y := Extent{Start: lur.End.Add(step), End: x.End, LastUsed: x.LastUsed}
+ x.End = lur.End
+ x.LastUsed = now
+ el2 = append(el2, x, y)
+ continue
+ }
+ }
+ return ExtentListLRU(el2.Compress(step))
+}
diff --git a/internal/timeseries/extent_list_test.go b/internal/timeseries/extent_list_test.go
new file mode 100644
index 000000000..41e0a2f3f
--- /dev/null
+++ b/internal/timeseries/extent_list_test.go
@@ -0,0 +1,602 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package timeseries
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "testing"
+ "time"
+)
+
+var t98 = time.Unix(98, 0)
+var t99 = time.Unix(99, 0)
+var t100 = time.Unix(100, 0)
+var t101 = time.Unix(101, 0)
+var t200 = time.Unix(200, 0)
+var t201 = time.Unix(201, 0)
+var t300 = time.Unix(300, 0)
+var t600 = time.Unix(600, 0)
+var t900 = time.Unix(900, 0)
+var t1000 = time.Unix(1000, 0)
+var t1100 = time.Unix(1100, 0)
+var t1200 = time.Unix(1200, 0)
+var t1300 = time.Unix(1300, 0)
+var t1400 = time.Unix(1400, 0)
+
+func TestUpdateLastUsed(t *testing.T) {
+
+ now := time.Now().Truncate(time.Second).Unix()
+
+ tests := []struct {
+ el ExtentListLRU
+ lu Extent
+ step time.Duration
+ expected string
+ }{
+ { // Run 0 - split 1 into 3
+ el: ExtentListLRU{Extent{Start: t100, End: t1300, LastUsed: t1300}},
+ lu: Extent{Start: t200, End: t600},
+ step: time.Duration(100) * time.Second,
+ expected: fmt.Sprintf("100-100:1300;200-600:%d;700-1300:1300", now),
+ },
+
+ {
+ el: ExtentListLRU{
+ Extent{Start: t100, End: t200, LastUsed: t200},
+ Extent{Start: t600, End: t900, LastUsed: t900},
+ Extent{Start: t1100, End: t1300, LastUsed: t900},
+ Extent{Start: t1400, End: t1400, LastUsed: t1400},
+ },
+ lu: Extent{Start: t1100, End: t1400},
+ step: time.Duration(100) * time.Second,
+ expected: fmt.Sprintf("100-200:200;600-900:900;1100-1400:%d", now),
+ },
+
+ {
+ el: ExtentListLRU{
+ Extent{Start: t100, End: t200, LastUsed: t200},
+ Extent{Start: t600, End: t900, LastUsed: t900},
+ Extent{Start: t1100, End: t1300, LastUsed: t900},
+ Extent{Start: t1400, End: t1400, LastUsed: t1400},
+ },
+ lu: Extent{Start: t1200, End: t1400},
+ step: time.Duration(100) * time.Second,
+ expected: fmt.Sprintf("100-200:200;600-900:900;1100-1100:900;1200-1400:%d", now),
+ },
+
+ {
+ el: ExtentListLRU{
+ Extent{Start: t100, End: t200, LastUsed: t200},
+ Extent{Start: t600, End: t900, LastUsed: t900},
+ Extent{Start: t1100, End: t1300, LastUsed: t900},
+ Extent{Start: t1400, End: t1400, LastUsed: t1400},
+ },
+ lu: Extent{Start: t600, End: t900},
+ step: time.Duration(100) * time.Second,
+ expected: fmt.Sprintf("100-200:200;600-900:%d;1100-1300:900;1400-1400:1400", now),
+ },
+
+ {
+ el: ExtentListLRU{
+ Extent{Start: t100, End: t200, LastUsed: t200},
+ Extent{Start: t300, End: t900, LastUsed: t900},
+ Extent{Start: t1000, End: t1300, LastUsed: t900},
+ Extent{Start: t1400, End: t1400, LastUsed: t1400},
+ },
+ lu: Extent{Start: t200, End: t1300},
+ step: time.Duration(100) * time.Second,
+ expected: fmt.Sprintf("100-100:200;200-1300:%d;1400-1400:1400", now),
+ },
+
+ {
+ el: nil,
+ lu: Extent{Start: t200, End: t1300},
+ step: time.Duration(100) * time.Second,
+ expected: "",
+ },
+
+ {
+ el: ExtentListLRU{},
+ lu: Extent{Start: t200, End: t1300},
+ step: time.Duration(100) * time.Second,
+ expected: "",
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ el := test.el.UpdateLastUsed(test.lu, test.step)
+ if el.String() != test.expected {
+ t.Errorf("got %s expected %s", el.String(), test.expected)
+ }
+ })
+ }
+
+}
+
+func TestInsideOf(t *testing.T) {
+
+ el := ExtentList{
+ Extent{Start: t100, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1300},
+ }
+
+ if el.InsideOf(Extent{Start: t100, End: t100}) {
+ t.Errorf("expected false got %t", true)
+ }
+
+ if el.InsideOf(Extent{Start: time.Unix(0, 0), End: t100}) {
+ t.Errorf("expected false got %t", true)
+ }
+
+ if el.InsideOf(Extent{Start: time.Unix(0, 0), End: time.Unix(0, 0)}) {
+ t.Errorf("expected false got %t", true)
+ }
+
+ if el.InsideOf(Extent{Start: t201, End: t201}) {
+ t.Errorf("expected false got %t", true)
+ }
+
+ if el.InsideOf(Extent{Start: t1400, End: t1400}) {
+ t.Errorf("expected false got %t", true)
+ }
+
+ // test empty
+ el = ExtentList{}
+ if el.InsideOf(Extent{Start: t100, End: t100}) {
+ t.Errorf("expected false got %t", true)
+ }
+
+}
+
+func TestOutsideOf(t *testing.T) {
+
+ el := ExtentList{
+ Extent{Start: t100, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1300},
+ }
+
+ if el.OutsideOf(Extent{Start: t100, End: t100}) {
+ t.Errorf("expected false got %t", true)
+ }
+
+ if el.OutsideOf(Extent{Start: time.Unix(0, 0), End: t100}) {
+ t.Errorf("expected false got %t", true)
+ }
+
+ if !el.OutsideOf(Extent{Start: time.Unix(0, 0), End: time.Unix(0, 0)}) {
+ t.Errorf("expected true got %t", false)
+ }
+
+ if el.OutsideOf(Extent{Start: t201, End: t201}) {
+ t.Errorf("expected false got %t", true)
+ }
+
+ if !el.OutsideOf(Extent{Start: t1400, End: t1400}) {
+ t.Errorf("expected true got %t", false)
+ }
+
+ // test empty
+ el = ExtentList{}
+ if !el.OutsideOf(Extent{Start: t100, End: t100}) {
+ t.Errorf("expected true got %t", false)
+ }
+}
+
+func TestString(t *testing.T) {
+
+ tests := []struct {
+ el ExtentList
+ expected string
+ }{
+ {
+ ExtentList{
+ Extent{Start: t100, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1300},
+ },
+ "100-200;600-900;1100-1300",
+ },
+
+ {
+ ExtentList{},
+ "",
+ },
+
+ {
+ ExtentList{
+ Extent{Start: t100, End: t200},
+ },
+ "100-200",
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ if test.el.String() != test.expected {
+ t.Errorf("got %s expected %s", test.el.String(), test.expected)
+ }
+ })
+ }
+
+}
+
+func TestCrop(t *testing.T) {
+
+ el := ExtentList{
+ Extent{Start: t100, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1300},
+ }
+
+ tests := []struct {
+ cropRange Extent
+ seed, expected ExtentList
+ }{
+
+ { // Run 0
+ Extent{Start: t98, End: t1300},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t100, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1300},
+ },
+ },
+
+ { // Run 1
+ Extent{Start: t100, End: t1300},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t100, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1300},
+ },
+ },
+
+ { // Run 2
+ Extent{Start: t101, End: t1300},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t101, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1300},
+ },
+ },
+
+ { // Run 3
+ Extent{Start: t200, End: t1300},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t200, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1300},
+ },
+ },
+
+ { // Run 4
+ Extent{Start: t201, End: t1300},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1300},
+ },
+ },
+
+ { // Run 5
+ Extent{Start: t99, End: t1200},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t100, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1200},
+ },
+ },
+
+ { // Run 6
+ Extent{Start: t100, End: t1200},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t100, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1200},
+ },
+ },
+
+ { // Run 7
+ Extent{Start: t101, End: t1200},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t101, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1200},
+ },
+ },
+
+ { // Run 8
+ Extent{Start: t200, End: t1200},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t200, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1200},
+ },
+ },
+
+ { // Run 9
+ Extent{Start: t201, End: t1200},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1200},
+ },
+ },
+
+ { // Run 10
+ Extent{Start: t98, End: t98},
+ el.Clone(),
+ ExtentList{},
+ },
+
+ { // Run 11
+ Extent{Start: t98, End: t99},
+ el.Clone(),
+ ExtentList{},
+ },
+
+ { // Run 12
+ Extent{Start: t98, End: t100},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t100, End: t100},
+ },
+ },
+
+ { // Run 13
+ Extent{Start: t98, End: t101},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t100, End: t101},
+ },
+ },
+
+ { // Run 14
+ Extent{Start: t98, End: t200},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t100, End: t200},
+ },
+ },
+
+ { // Run 15
+ Extent{Start: t100, End: t200},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t100, End: t200},
+ },
+ },
+
+ { // Run 16
+ Extent{Start: t100, End: t101},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t100, End: t101},
+ },
+ },
+
+ { // Run 17
+ Extent{Start: t1000, End: t1300},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t1100, End: t1300},
+ },
+ },
+
+ { // Run 18
+ Extent{Start: t1100, End: t1300},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t1100, End: t1300},
+ },
+ },
+
+ { // Run 19
+ Extent{Start: t1200, End: t1300},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t1200, End: t1300},
+ },
+ },
+
+ { // Run 20
+ Extent{Start: t1300, End: t1300},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t1300, End: t1300},
+ },
+ },
+
+ { // Run 21
+ Extent{Start: t1300, End: t1400},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t1300, End: t1300},
+ },
+ },
+
+ { // Run 22
+ Extent{Start: t1200, End: t1400},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t1200, End: t1300},
+ },
+ },
+
+ { // Run 23
+ Extent{Start: t1000, End: t1400},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t1100, End: t1300},
+ },
+ },
+
+ { // Run 24
+ Extent{Start: t900, End: t1400},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t900, End: t900},
+ Extent{Start: t1100, End: t1300},
+ },
+ },
+
+ { // Run 25
+ Extent{Start: t98, End: t1400},
+ el.Clone(),
+ ExtentList{
+ Extent{Start: t100, End: t200},
+ Extent{Start: t600, End: t900},
+ Extent{Start: t1100, End: t1300},
+ },
+ },
+
+ { // Run 26
+ Extent{Start: t98, End: t1400},
+ ExtentList{},
+ ExtentList{},
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ result := test.seed.Clone().Crop(test.cropRange)
+ if !reflect.DeepEqual(test.expected, result) {
+ t.Errorf("mismatch in Crop: expected=%s got=%s", test.expected, result)
+ }
+ })
+ }
+
+}
+
+func TestExtentListLRUSort(t *testing.T) {
+ el := ExtentListLRU{
+ Extent{Start: t600, End: t900, LastUsed: t900},
+ Extent{Start: t100, End: t200, LastUsed: t200},
+ Extent{Start: t1100, End: t1300, LastUsed: t1100},
+ }
+ el2 := ExtentListLRU{
+ Extent{Start: t100, End: t200, LastUsed: t200},
+ Extent{Start: t600, End: t900, LastUsed: t900},
+ Extent{Start: t1100, End: t1300, LastUsed: t1100},
+ }
+ sort.Sort(el)
+ if !reflect.DeepEqual(el, el2) {
+ t.Errorf("mismatch in sort: expected=%s got=%s", el2, el)
+ }
+
+}
+
+func TestExtentListLRUCopy(t *testing.T) {
+ el := ExtentListLRU{
+ Extent{Start: t100, End: t200, LastUsed: t200},
+ Extent{Start: t600, End: t900, LastUsed: t900},
+ Extent{Start: t1100, End: t1300, LastUsed: t1100},
+ }
+
+ el2 := el.Clone()
+
+ if !reflect.DeepEqual(el, el2) {
+ t.Errorf("mismatch in sort: expected=%s got=%s", el2, el)
+ }
+
+}
+
+func TestCompress(t *testing.T) {
+
+ tests := []struct {
+ uncompressed, compressed ExtentList
+ }{
+ {
+ ExtentList{},
+ ExtentList{},
+ },
+
+ {
+ ExtentList{
+ Extent{Start: time.Unix(30, 0), End: time.Unix(30, 0)},
+ Extent{Start: time.Unix(90, 0), End: time.Unix(120, 0)},
+ Extent{Start: time.Unix(120, 0), End: time.Unix(180, 0)},
+ Extent{Start: time.Unix(180, 0), End: time.Unix(210, 0)},
+ },
+ ExtentList{
+ Extent{Start: time.Unix(30, 0), End: time.Unix(30, 0)},
+ Extent{Start: time.Unix(90, 0), End: time.Unix(210, 0)},
+ },
+ },
+
+ {
+ ExtentList{
+ Extent{Start: time.Unix(0, 0), End: time.Unix(30, 0)},
+ },
+ ExtentList{
+ Extent{Start: time.Unix(0, 0), End: time.Unix(30, 0)},
+ },
+ },
+
+ {
+ ExtentList{
+ Extent{Start: time.Unix(0, 0), End: time.Unix(30, 0)},
+ Extent{Start: time.Unix(90, 0), End: time.Unix(120, 0)},
+ Extent{Start: time.Unix(120, 0), End: time.Unix(180, 0)},
+ Extent{Start: time.Unix(270, 0), End: time.Unix(360, 0)},
+ Extent{Start: time.Unix(180, 0), End: time.Unix(210, 0)},
+ Extent{Start: time.Unix(420, 0), End: time.Unix(480, 0)},
+ },
+ ExtentList{
+ Extent{Start: time.Unix(0, 0), End: time.Unix(30, 0)},
+ Extent{Start: time.Unix(90, 0), End: time.Unix(210, 0)},
+ Extent{Start: time.Unix(270, 0), End: time.Unix(360, 0)},
+ Extent{Start: time.Unix(420, 0), End: time.Unix(480, 0)},
+ },
+ },
+
+ {
+ ExtentList{
+ Extent{Start: time.Unix(90, 0), End: time.Unix(120, 0)},
+ Extent{Start: time.Unix(90, 0), End: time.Unix(120, 0)},
+ Extent{Start: time.Unix(180, 0), End: time.Unix(180, 0)},
+ Extent{Start: time.Unix(180, 0), End: time.Unix(180, 0)},
+ },
+ ExtentList{
+ Extent{Start: time.Unix(90, 0), End: time.Unix(120, 0)},
+ Extent{Start: time.Unix(180, 0), End: time.Unix(180, 0)},
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+
+ result := ExtentList(test.uncompressed).Compress(time.Duration(30) * time.Second)
+
+ if !reflect.DeepEqual(result, test.compressed) {
+ t.Errorf("mismatch in Compress: expected=%s got=%s", test.compressed, result)
+ }
+ })
+ }
+}
diff --git a/internal/timeseries/timerangequery.go b/internal/timeseries/timerangequery.go
new file mode 100644
index 000000000..74770fbc9
--- /dev/null
+++ b/internal/timeseries/timerangequery.go
@@ -0,0 +1,117 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package timeseries
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/Comcast/trickster/internal/proxy/urls"
+)
+
+// TimeRangeQuery represents a timeseries database query parsed from an inbound HTTP request
+type TimeRangeQuery struct {
+ // Statement is the timeseries database query (with tokenized timeranges where present) requested by the user
+ Statement string
+ // Extent provides the start and end times for the request from a timeseries database
+ Extent Extent
+ // Step indicates the amount of time in seconds between each datapoint in a TimeRangeQuery's resulting timeseries
+ Step time.Duration
+ // IsOffset is true if the query uses a relative offset modifier
+ IsOffset bool
+ // TimestampFieldName indicates the database field name for the timestamp field
+ TimestampFieldName string
+ // FastForwardDisable indicates whether the Time Range Query result should include fast forward data
+ FastForwardDisable bool
+ // TemplateURL is used by some Origin Types for templatization of url parameters containing timestamps
+ TemplateURL *url.URL
+}
+
+// Clone returns an exact copy of a TimeRangeQuery
+func (trq *TimeRangeQuery) Clone() *TimeRangeQuery {
+ t := &TimeRangeQuery{
+ Statement: trq.Statement,
+ Step: trq.Step,
+ Extent: Extent{Start: trq.Extent.Start, End: trq.Extent.End},
+ IsOffset: trq.IsOffset,
+ TimestampFieldName: trq.TimestampFieldName,
+ FastForwardDisable: trq.FastForwardDisable,
+ }
+
+ if trq.TemplateURL != nil {
+ t.TemplateURL = urls.Clone(trq.TemplateURL)
+ }
+
+ return t
+}
+
+// NormalizeExtent adjusts the Start and End of a TimeRangeQuery's Extent to align against normalized boundaries.
+func (trq *TimeRangeQuery) NormalizeExtent() {
+ if trq.Step.Seconds() > 0 {
+ if !trq.IsOffset && trq.Extent.End.After(time.Now()) {
+ trq.Extent.End = time.Now()
+ }
+ trq.Extent.Start = trq.Extent.Start.Truncate(trq.Step)
+ trq.Extent.End = trq.Extent.End.Truncate(trq.Step)
+ }
+}
+
+// CalculateDeltas provides a list of extents that are not in a cached timeseries, when provided a list of extents that are cached.
+func (trq *TimeRangeQuery) CalculateDeltas(have ExtentList) ExtentList {
+ if len(have) == 0 {
+ return ExtentList{trq.Extent}
+ }
+ misCap := trq.Extent.End.Sub(trq.Extent.Start) / trq.Step
+ if misCap < 0 {
+ misCap = 0
+ }
+ misses := make([]time.Time, 0, misCap)
+ for i := trq.Extent.Start; !trq.Extent.End.Before(i); i = i.Add(trq.Step) {
+ found := false
+ for j := range have {
+ if j == 0 && i.Before(have[j].Start) {
+ // our earliest datapoint in cache is after the first point the user wants
+ break
+ }
+ if i.Equal(have[j].Start) || i.Equal(have[j].End) || (i.After(have[j].Start) && have[j].End.After(i)) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ misses = append(misses, i)
+ }
+ }
+ // Find the fill and gap ranges
+ ins := ExtentList{}
+ var inStart = time.Time{}
+ l := len(misses)
+ for i := range misses {
+ if inStart.IsZero() {
+ inStart = misses[i]
+ }
+ if i+1 == l || !misses[i+1].Equal(misses[i].Add(trq.Step)) {
+ ins = append(ins, Extent{Start: inStart, End: misses[i]})
+ inStart = time.Time{}
+ }
+ }
+ return ins
+}
+
+func (trq *TimeRangeQuery) String() string {
+ return fmt.Sprintf(`{ "statement": "%s", "step": "%s", "extent": "%s" }`,
+ strings.Replace(trq.Statement, `"`, `\"`, -1), trq.Step.String(), trq.Extent.String())
+}
diff --git a/internal/timeseries/timerangequery_test.go b/internal/timeseries/timerangequery_test.go
new file mode 100644
index 000000000..3d14c4f42
--- /dev/null
+++ b/internal/timeseries/timerangequery_test.go
@@ -0,0 +1,148 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package timeseries
+
+import (
+ "net/url"
+ "reflect"
+ "strconv"
+ "testing"
+ "time"
+)
+
+func TestCalculateDeltas(t *testing.T) {
+
+ // test when start is after end
+ trq := TimeRangeQuery{Statement: "up", Extent: Extent{Start: time.Unix(20, 0), End: time.Unix(10, 0)}, Step: time.Duration(10) * time.Second}
+ trq.CalculateDeltas(ExtentList{Extent{}})
+
+ tests := []struct {
+ have []Extent
+ expected []Extent
+ start, end, stepSecs int64
+ }{
+ {
+ []Extent{},
+ []Extent{{Start: time.Unix(1, 0), End: time.Unix(100, 0)}},
+ 1, 100, 1,
+ },
+ {
+ []Extent{{Start: time.Unix(50, 0), End: time.Unix(100, 0)}},
+ []Extent{{Start: time.Unix(1, 0), End: time.Unix(49, 0)}},
+ 1, 100, 1,
+ },
+ {
+ []Extent{{Start: time.Unix(50, 0), End: time.Unix(100, 0)}},
+ []Extent{{Start: time.Unix(1, 0), End: time.Unix(49, 0)}, {Start: time.Unix(101, 0), End: time.Unix(101, 0)}},
+ 1, 101, 1,
+ },
+ {
+ []Extent{{Start: time.Unix(1, 0), End: time.Unix(100, 0)}},
+ []Extent{{Start: time.Unix(101, 0), End: time.Unix(101, 0)}},
+ 1, 101, 1,
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+
+ trq := TimeRangeQuery{Statement: "up", Extent: Extent{Start: time.Unix(test.start, 0), End: time.Unix(test.end, 0)}, Step: time.Duration(test.stepSecs) * time.Second}
+ trq.NormalizeExtent()
+ d := trq.CalculateDeltas(test.have)
+
+ if len(d) != len(test.expected) {
+ t.Errorf("expected %v got %v", test.expected, d)
+ return
+ }
+
+ for i := range d {
+ if d[i].Start != test.expected[i].Start {
+ t.Errorf("expected %d got %d", test.expected[i].Start.Unix(), d[i].Start.Unix())
+ }
+ if d[i].End != test.expected[i].End {
+ t.Errorf("expected %d got %d", test.expected[i].End.Unix(), d[i].End.Unix())
+ }
+ }
+ })
+ }
+}
+
+func TestNormalizeExtent(t *testing.T) {
+
+ tmrw := time.Now().Add(time.Duration(24) * time.Hour).Unix()
+ expected := (time.Now().Unix() / 10) * 10
+
+ tests := []struct {
+ start, end, stepSecs, now int64
+ rangeStart, rangeEnd int64
+ err bool
+ }{
+ // Basic test
+ {
+ 1, 100, 1, 1,
+ 1, 100,
+ false,
+ },
+ // Ensure that it aligns to the step interval
+ {
+ 1, 103, 10, 1,
+ 0, 100,
+ false,
+ },
+ // Ensure that it brings in future times
+ {
+ 1, tmrw, 10, 1,
+ 0, expected,
+ false,
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+
+ trq := TimeRangeQuery{Statement: "up", Extent: Extent{Start: time.Unix(test.start, 0), End: time.Unix(test.end, 0)}, Step: time.Duration(test.stepSecs) * time.Second}
+
+ trq.NormalizeExtent()
+
+ if trq.Extent.Start.Unix() != test.rangeStart {
+ t.Errorf("Mismatch in rangeStart: expected=%d actual=%d", test.rangeStart, trq.Extent.Start.Unix())
+ }
+ if trq.Extent.End.Unix() != test.rangeEnd {
+ t.Errorf("Mismatch in rangeStart: expected=%d actual=%d", test.rangeEnd, trq.Extent.End.Unix())
+ }
+ })
+ }
+}
+
+func TestClone(t *testing.T) {
+ u, _ := url.Parse("http://127.0.0.1/")
+ trq := &TimeRangeQuery{Statement: "1234", Extent: Extent{Start: time.Unix(5, 0), End: time.Unix(10, 0)}, Step: time.Duration(5) * time.Second, TemplateURL: u}
+ c := trq.Clone()
+ if !reflect.DeepEqual(trq, c) {
+ t.Errorf("expected %s got %s", trq.String(), c.String())
+ }
+}
+
+func TestStringTRQ(t *testing.T) {
+
+ const expected = `{ "statement": "1234", "step": "5s", "extent": "5-10" }`
+
+ trq := &TimeRangeQuery{Statement: "1234", Extent: Extent{Start: time.Unix(5, 0), End: time.Unix(10, 0)}, Step: time.Duration(5) * time.Second}
+ s := trq.String()
+
+ if s != expected {
+ t.Errorf("%s", s)
+ }
+
+}
diff --git a/internal/timeseries/timeseries.go b/internal/timeseries/timeseries.go
new file mode 100644
index 000000000..fbd35a37e
--- /dev/null
+++ b/internal/timeseries/timeseries.go
@@ -0,0 +1,48 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package timeseries
+
+import "time"
+
+// Timeseries represents a Response Object from a Timeseries Database
+type Timeseries interface {
+ // SetExtents sets the Extents of the Timeseries
+ SetExtents(ExtentList)
+ // Extents should return the list of time Extents having data present in the Timeseries
+ Extents() ExtentList
+ // TimeStampCount should return the number of unique timestamps across the timeseries
+ TimestampCount() int
+ // Step should return the Step Interval of the Timeseries
+ Step() time.Duration
+ // SetStep should update the Step Interval of the Timeseries
+ SetStep(time.Duration)
+ // Merge should merge the Timeseries collection into the source Timeseries
+ Merge(bool, ...Timeseries)
+ // Sort should uniqueify and sort all series by Timestamp
+ Sort()
+ // Clone should returns an exact duplicate source the Timeseries
+ Clone() Timeseries
+ // CropToRange should reduce time range of the Timeseries to the provided Extent
+ CropToRange(Extent)
+ // CropToSize should reduce time range of the Timeseries to the provided element size using
+ // a least-recently-used methodology, while limiting the upper extent to the provided time,
+ // in order to support backfill tolerance
+ CropToSize(int, time.Time, Extent)
+ // SeriesCount returns the number of individual Series in the Timeseries object
+ SeriesCount() int
+ // ValueCount returns the count of all values across all Series in the Timeseries object
+ ValueCount() int
+ // Size returns the approximate memory byte size of the timeseries object
+ Size() int
+}
diff --git a/internal/util/compress/gzip/gzip.go b/internal/util/compress/gzip/gzip.go
new file mode 100644
index 000000000..e2ac6c9d4
--- /dev/null
+++ b/internal/util/compress/gzip/gzip.go
@@ -0,0 +1,31 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package gzip
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io/ioutil"
+)
+
+// Inflate returns the inflated version of a gzip-deflated byte slice
+func Inflate(in []byte) ([]byte, error) {
+ gr, err := gzip.NewReader(bytes.NewBuffer(in))
+ if err != nil {
+ return []byte{}, err
+ }
+
+ out, err := ioutil.ReadAll(gr)
+ return out, err
+}
diff --git a/internal/util/compress/gzip/gzip_test.go b/internal/util/compress/gzip/gzip_test.go
new file mode 100644
index 000000000..73015e682
--- /dev/null
+++ b/internal/util/compress/gzip/gzip_test.go
@@ -0,0 +1,40 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package gzip
+
+import (
+ "io/ioutil"
+ "testing"
+)
+
+func TestInflate(t *testing.T) {
+ const expected = "this is the inflated text string"
+ c, err := ioutil.ReadFile("../../../../testdata/gzip_test.txt.gz")
+ if err != nil {
+ t.Error(err)
+ }
+ u, err := Inflate(c)
+ if err != nil {
+ t.Error(err)
+ }
+ if string(u) != expected {
+ t.Errorf(`got "%s" expected "%s"`, string(u), expected)
+ }
+
+ _, err = Inflate(nil)
+ if err == nil {
+ t.Errorf("expected error: EOF")
+ }
+
+}
diff --git a/internal/util/log/log.go b/internal/util/log/log.go
new file mode 100644
index 000000000..828577d6d
--- /dev/null
+++ b/internal/util/log/log.go
@@ -0,0 +1,286 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package log
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/go-kit/kit/log"
+ "github.com/go-kit/kit/log/level"
+ "github.com/go-stack/stack"
+ lumberjack "gopkg.in/natefinch/lumberjack.v2"
+
+ "github.com/Comcast/trickster/internal/config"
+)
+
+// Logger is the handle to the common TricksterLogger
+var Logger *TricksterLogger
+
+func mapToArray(event string, detail Pairs) []interface{} {
+ a := make([]interface{}, (len(detail)*2)+2)
+ var i int
+
+ // Ensure the log level is the first Pair in the output order (after prefixes)
+ if level, ok := detail["level"]; ok {
+ a[0] = "level"
+ a[1] = level
+ delete(detail, "level")
+ i += 2
+ }
+
+ // Ensure the event description is the second Pair in the output order (after prefixes)
+ a[i] = "event"
+ a[i+1] = event
+ i += 2
+
+ for k, v := range detail {
+ a[i] = k
+ a[i+1] = v
+ i += 2
+ }
+ return a
+}
+
+var onceMutex *sync.Mutex
+var onceRanEntries map[string]bool
+
+func init() {
+ Logger = ConsoleLogger("info")
+ onceRanEntries = make(map[string]bool)
+ onceMutex = &sync.Mutex{}
+}
+
+// ConsoleLogger returns a TricksterLogger object that prints log events to the Console
+func ConsoleLogger(logLevel string) *TricksterLogger {
+ l := &TricksterLogger{}
+
+ wr := os.Stdout
+
+ logger := log.NewLogfmtLogger(log.NewSyncWriter(wr))
+ logger = log.With(logger,
+ "time", log.DefaultTimestampUTC,
+ "app", "trickster",
+ "caller", log.Valuer(func() interface{} {
+ return pkgCaller{stack.Caller(6)}
+ }),
+ )
+
+ l.level = strings.ToLower(logLevel)
+
+ // wrap logger depending on log level
+ switch l.level {
+ case "debug":
+ logger = level.NewFilter(logger, level.AllowDebug())
+ case "info":
+ logger = level.NewFilter(logger, level.AllowInfo())
+ case "warn":
+ logger = level.NewFilter(logger, level.AllowWarn())
+ case "error":
+ logger = level.NewFilter(logger, level.AllowError())
+ case "trace":
+ logger = level.NewFilter(logger, level.AllowDebug())
+ case "none":
+ logger = level.NewFilter(logger, level.AllowNone())
+ default:
+ logger = level.NewFilter(logger, level.AllowInfo())
+ }
+
+ l.logger = logger
+
+ return l
+}
+
+// Init returns a TricksterLogger for the provided logging configuration. The
+// returned TricksterLogger will write to files distinguished from other TricksterLoggers by the
+// instance string.
+func Init() {
+ l := &TricksterLogger{}
+
+ var wr io.Writer
+
+ if config.Logging.LogFile == "" {
+ wr = os.Stdout
+ } else {
+ logFile := config.Logging.LogFile
+ if config.Main.InstanceID > 0 {
+ logFile = strings.Replace(logFile, ".log", "."+strconv.Itoa(config.Main.InstanceID)+".log", 1)
+ }
+
+ wr = &lumberjack.Logger{
+ Filename: logFile,
+ MaxSize: 256, // megabytes
+ MaxBackups: 80, // 256 megs @ 80 backups is 20GB of Logs
+ MaxAge: 7, // days
+ Compress: true, // Compress Rolled Backups
+ }
+ }
+
+ logger := log.NewLogfmtLogger(log.NewSyncWriter(wr))
+ logger = log.With(logger,
+ "time", log.DefaultTimestampUTC,
+ "app", "trickster",
+ "caller", log.Valuer(func() interface{} {
+ return pkgCaller{stack.Caller(6)}
+ }),
+ )
+
+ l.level = strings.ToLower(config.Logging.LogLevel)
+
+ // wrap logger depending on log level
+ switch l.level {
+ case "debug":
+ logger = level.NewFilter(logger, level.AllowDebug())
+ case "info":
+ logger = level.NewFilter(logger, level.AllowInfo())
+ case "warn":
+ logger = level.NewFilter(logger, level.AllowWarn())
+ case "error":
+ logger = level.NewFilter(logger, level.AllowError())
+ case "trace":
+ logger = level.NewFilter(logger, level.AllowDebug())
+ default:
+ logger = level.NewFilter(logger, level.AllowInfo())
+ }
+
+ l.logger = logger
+ if c, ok := wr.(io.Closer); ok && c != nil {
+ l.closer = c
+ }
+
+ Logger = l
+
+}
+
+// Pairs represents a key=value pair that helps to describe a log event
+type Pairs map[string]interface{}
+
+// TricksterLogger is a container for the underlying log provider
+type TricksterLogger struct {
+ logger log.Logger
+ closer io.Closer
+ level string
+}
+
+// Info sends an "INFO" event to the TricksterLogger
+func Info(event string, detail Pairs) {
+ level.Info(Logger.logger).Log(mapToArray(event, detail)...)
+}
+
+// InfoOnce sends a "INFO" event to the TricksterLogger only once per key.
+// Returns true if this invocation was the first, and thus sent to the TricksterLogger
+func InfoOnce(key string, event string, detail Pairs) bool {
+ onceMutex.Lock()
+ defer onceMutex.Unlock()
+ key = "info." + key
+ if _, ok := onceRanEntries[key]; !ok {
+ onceRanEntries[key] = true
+ Info(event, detail)
+ return true
+ }
+ return false
+}
+
+// Warn sends an "WARN" event to the TricksterLogger
+func Warn(event string, detail Pairs) {
+ level.Warn(Logger.logger).Log(mapToArray(event, detail)...)
+}
+
+// WarnOnce sends a "WARN" event to the TricksterLogger only once per key.
+// Returns true if this invocation was the first, and thus sent to the TricksterLogger
+func WarnOnce(key string, event string, detail Pairs) bool {
+ onceMutex.Lock()
+ defer onceMutex.Unlock()
+ key = "warn." + key
+ if _, ok := onceRanEntries[key]; !ok {
+ onceRanEntries[key] = true
+ Warn(event, detail)
+ return true
+ }
+ return false
+}
+
+// HasWarnedOnce returns true if a warning for the key has already been sent to the TricksterLoggerr
+func HasWarnedOnce(key string) bool {
+ onceMutex.Lock()
+ defer onceMutex.Unlock()
+ key = "warn." + key
+ _, ok := onceRanEntries[key]
+ return ok
+}
+
+// Error sends an "ERROR" event to the TricksterLogger
+func Error(event string, detail Pairs) {
+ level.Error(Logger.logger).Log(mapToArray(event, detail)...)
+}
+
+// ErrorOnce sends an "ERROR" event to the TricksterLogger only once per key
+// Returns true if this invocation was the first, and thus sent to the TricksterLogger
+func ErrorOnce(key string, event string, detail Pairs) bool {
+ onceMutex.Lock()
+ defer onceMutex.Unlock()
+ key = "error." + key
+ if _, ok := onceRanEntries[key]; !ok {
+ onceRanEntries[key] = true
+ Error(event, detail)
+ return true
+ }
+ return false
+}
+
+// Debug sends an "DEBUG" event to the TricksterLogger
+func Debug(event string, detail Pairs) {
+ level.Debug(Logger.logger).Log(mapToArray(event, detail)...)
+}
+
+// Trace sends a "TRACE" event to the TricksterLogger
+func Trace(event string, detail Pairs) {
+ // go-kit/log/level does not support Trace, so implemented separately here
+ if Logger.level == "trace" {
+ detail["level"] = "trace"
+ Logger.logger.Log(mapToArray(event, detail)...)
+ }
+}
+
+// Fatal sends a "FATAL" event to the TricksterLogger and exits the program with the provided exit code
+func Fatal(code int, event string, detail Pairs) {
+ // go-kit/log/level does not support Fatal, so implemented separately here
+ detail["level"] = "fatal"
+ Logger.logger.Log(mapToArray(event, detail)...)
+ if code >= 0 {
+ os.Exit(code)
+ }
+}
+
+// Close closes any opened file handles that were used for logging.
+func (l TricksterLogger) Close() {
+ if l.closer != nil {
+ l.closer.Close()
+ }
+}
+
+// pkgCaller wraps a stack.Call to make the default string output include the
+// package path.
+type pkgCaller struct {
+ c stack.Call
+}
+
+// String returns a path from the call stack that is relative to the root of the project
+func (pc pkgCaller) String() string {
+ return strings.TrimPrefix(fmt.Sprintf("%+v", pc.c), "github.com/Comcast/trickster/internal/")
+}
diff --git a/internal/util/log/log_test.go b/internal/util/log/log_test.go
new file mode 100644
index 000000000..5d442c26f
--- /dev/null
+++ b/internal/util/log/log_test.go
@@ -0,0 +1,248 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package log
+
+import (
+ "os"
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+)
+
+func TestConsoleLogger(t *testing.T) {
+
+ testCases := []string{
+ "debug",
+ "info",
+ "warn",
+ "error",
+ "trace",
+ "none",
+ }
+ // it should create a logger for each level
+ for _, tc := range testCases {
+ t.Run(tc, func(t *testing.T) {
+ l := ConsoleLogger(tc)
+ if l.level != tc {
+ t.Errorf("mismatch in log level: expected=%s actual=%s", tc, l.level)
+ }
+ })
+ }
+}
+
+func TestInit(t *testing.T) {
+
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogLevel: "info"}
+ Init()
+ if Logger.level != "info" {
+ t.Errorf("expected %s got %s", "info", Logger.level)
+ }
+}
+
+func TestNewLogger_LogFile(t *testing.T) {
+ fileName := "out.log"
+ instanceFileName := "out.1.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 1}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "info"}
+ Init()
+ Info("test entry", Pairs{"testKey": "testVal"})
+ if _, err := os.Stat(instanceFileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ Logger.Close()
+ os.Remove(instanceFileName)
+}
+
+func TestNewLoggerDebug_LogFile(t *testing.T) {
+ fileName := "out.debug.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "debug"}
+ Init()
+ Debug("test entry", Pairs{"testKey": "testVal"})
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ Logger.Close()
+ os.Remove(fileName)
+}
+
+func TestNewLoggerWarn_LogFile(t *testing.T) {
+ fileName := "out.warn.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "warn"}
+ Init()
+ Warn("test entry", Pairs{"testKey": "testVal"})
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ Logger.Close()
+ os.Remove(fileName)
+}
+
+func TestNewLoggerWarnOnce_LogFile(t *testing.T) {
+ fileName := "out.warnonce.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "x"}
+ Init()
+
+ key := "warnonce-test-key"
+
+ if HasWarnedOnce(key) {
+ t.Errorf("expected %t got %t", false, true)
+ }
+
+ ok := WarnOnce(key, "test entry", Pairs{"testKey": "testVal"})
+ if !ok {
+ t.Errorf("expected %t got %t", true, ok)
+ }
+
+ if !HasWarnedOnce(key) {
+ t.Errorf("expected %t got %t", true, false)
+ }
+
+ ok = WarnOnce(key, "test entry", Pairs{"testKey": "testVal"})
+ if ok {
+ t.Errorf("expected %t got %t", false, ok)
+ }
+
+ if !HasWarnedOnce(key) {
+ t.Errorf("expected %t got %t", true, false)
+ }
+
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ Logger.Close()
+ os.Remove(fileName)
+}
+
+func TestNewLoggerError_LogFile(t *testing.T) {
+ fileName := "out.error.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "error"}
+ Init()
+ Error("test entry", Pairs{"testKey": "testVal"})
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ Logger.Close()
+ os.Remove(fileName)
+}
+
+func TestNewLoggerErrorOnce_LogFile(t *testing.T) {
+ fileName := "out.erroronce.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "x"}
+ Init()
+
+ ok := ErrorOnce("erroroonce-test-key", "test entry", Pairs{"testKey": "testVal"})
+ if !ok {
+ t.Errorf("expected %t got %t", true, ok)
+ }
+
+ ok = ErrorOnce("erroroonce-test-key", "test entry", Pairs{"testKey": "testVal"})
+ if ok {
+ t.Errorf("expected %t got %t", false, ok)
+ }
+
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ Logger.Close()
+ os.Remove(fileName)
+}
+
+func TestNewLoggerTrace_LogFile(t *testing.T) {
+ fileName := "out.trace.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "trace"}
+ Init()
+ Trace("test entry", Pairs{"testKey": "testVal"})
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ Logger.Close()
+ os.Remove(fileName)
+}
+
+func TestNewLoggerDefault_LogFile(t *testing.T) {
+ fileName := "out.info.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "x"}
+ Init()
+ Info("test entry", Pairs{"testKey": "testVal"})
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ Logger.Close()
+ os.Remove(fileName)
+}
+
+func TestNewLoggerInfoOnce_LogFile(t *testing.T) {
+ fileName := "out.infoonce.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "info"}
+ Init()
+ ok := InfoOnce("infoonce-test-key", "test entry", Pairs{"testKey": "testVal"})
+ if !ok {
+ t.Errorf("expected %t got %t", true, ok)
+ }
+
+ ok = InfoOnce("infoonce-test-key", "test entry", Pairs{"testKey": "testVal"})
+ if ok {
+ t.Errorf("expected %t got %t", false, ok)
+ }
+
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+
+ Logger.Close()
+ os.Remove(fileName)
+}
+
+func TestNewLoggerFatal_LogFile(t *testing.T) {
+ fileName := "out.fatal.log"
+ // it should create a logger that outputs to a log file ("out.test.log")
+ config.Config = config.NewConfig()
+ config.Main = &config.MainConfig{InstanceID: 0}
+ config.Logging = &config.LoggingConfig{LogFile: fileName, LogLevel: "debug"}
+ Init()
+ Fatal(-1, "test entry", Pairs{"testKey": "testVal"})
+ if _, err := os.Stat(fileName); err != nil {
+ t.Errorf(err.Error())
+ }
+ Logger.Close()
+ os.Remove(fileName)
+}
diff --git a/internal/util/md5/checksum.go b/internal/util/md5/checksum.go
new file mode 100644
index 000000000..7e5d2c15e
--- /dev/null
+++ b/internal/util/md5/checksum.go
@@ -0,0 +1,24 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package md5
+
+import (
+ "crypto/md5"
+ "fmt"
+)
+
+// Checksum returns the calculated hex string version of the md5 checksum for the input string
+func Checksum(input string) string {
+ return fmt.Sprintf("%x", md5.Sum([]byte(input)))
+}
diff --git a/internal/util/md5/checksum_test.go b/internal/util/md5/checksum_test.go
new file mode 100644
index 000000000..5faad8522
--- /dev/null
+++ b/internal/util/md5/checksum_test.go
@@ -0,0 +1,34 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package md5
+
+import "testing"
+
+func TestChecksum(t *testing.T) {
+
+ input := "test"
+ expected := "098f6bcd4621d373cade4e832627b4f6"
+ result := Checksum(input)
+ if expected != result {
+ t.Errorf("unexpected checksum for '%s', wanted %s got %s", input, expected, result)
+ }
+
+ input = ""
+ expected = "d41d8cd98f00b204e9800998ecf8427e"
+ result = Checksum(input)
+ if expected != result {
+ t.Errorf("unexpected checksum for '%s', wanted %s got %s", input, expected, result)
+ }
+
+}
diff --git a/internal/util/metrics/metrics.go b/internal/util/metrics/metrics.go
new file mode 100644
index 000000000..5fe8c4225
--- /dev/null
+++ b/internal/util/metrics/metrics.go
@@ -0,0 +1,326 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package metrics
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/util/log"
+)
+
+const (
+ metricNamespace = "trickster"
+ cacheSubsystem = "cache"
+ proxySubsystem = "proxy"
+ frontendSubsystem = "frontend"
+)
+
+// Default histogram buckets used by trickster
+var (
+ defaultBuckets = []float64{0.05, 0.1, 0.5, 1, 5, 10, 20}
+)
+
+// FrontendRequestStatus is a Counter of front end requests that have been processed with their status
+var FrontendRequestStatus *prometheus.CounterVec
+
+// FrontendRequestDuration is a histogram that tracks the time it takes to process a request
+var FrontendRequestDuration *prometheus.HistogramVec
+
+// FrontendRequestWrittenBytes is a Counter of bytes written for front end requests
+var FrontendRequestWrittenBytes *prometheus.CounterVec
+
+// ProxyRequestStatus is a Counter of downstream client requests handled by Trickster
+var ProxyRequestStatus *prometheus.CounterVec
+
+// ProxyRequestElements is a Counter of data points in the timeseries returned to the requesting client
+var ProxyRequestElements *prometheus.CounterVec
+
+// ProxyRequestDuration is a Histogram of time required in seconds to proxy a given Prometheus query
+var ProxyRequestDuration *prometheus.HistogramVec
+
+// CacheObjectOperations is a Counter of operations (in # of objects) performed on a Trickster cache
+var CacheObjectOperations *prometheus.CounterVec
+
+// CacheByteOperations is a Counter of operations (in # of bytes) performed on a Trickster cache
+var CacheByteOperations *prometheus.CounterVec
+
+// CacheEvents is a Counter of events performed on a Trickster cache
+var CacheEvents *prometheus.CounterVec
+
+// CacheObjects is a Gauge representing the number of objects in a Trickster cache
+var CacheObjects *prometheus.GaugeVec
+
+// CacheBytes is a Gauge representing the number of bytes in a Trickster cache
+var CacheBytes *prometheus.GaugeVec
+
+// CacheMaxObjects is a Gauge representing the Trickster cache's Max Object Threshold for triggering an eviction exercise
+var CacheMaxObjects *prometheus.GaugeVec
+
+// CacheMaxBytes is a Gauge representing the Trickster cache's Max Object Threshold for triggering an eviction exercise
+var CacheMaxBytes *prometheus.GaugeVec
+
+// ProxyMaxConnections is a Gauge representing the max number of active concurrent connections in the server
+var ProxyMaxConnections prometheus.Gauge
+
+// ProxyActiveConnections is a Gauge representing the number of active connections in the server
+var ProxyActiveConnections prometheus.Gauge
+
+// ProxyConnectionRequested is a counter representing the total number of connections requested by clients to the Proxy
+var ProxyConnectionRequested prometheus.Counter
+
+// ProxyConnectionAccepted is a counter representing the total number of connections accepted by the Proxy
+var ProxyConnectionAccepted prometheus.Counter
+
+// ProxyConnectionClosed is a counter representing the total number of connections closed by the Proxy
+var ProxyConnectionClosed prometheus.Counter
+
+// ProxyConnectionFailed is a counter representing the total number of connections failed to connect for whatever reason
+var ProxyConnectionFailed prometheus.Counter
+
+var o sync.Once
+
+// Init initializes the instrumented metrics and starts the listener endpoint
+func Init() {
+ o.Do(initialize)
+}
+
+func initialize() {
+
+ FrontendRequestStatus = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: frontendSubsystem,
+ Name: "requests_total",
+ Help: "Count of front end requests handled by Trickster",
+ },
+ []string{"origin_name", "origin_type", "method", "path", "http_status"},
+ )
+
+ FrontendRequestDuration = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Subsystem: frontendSubsystem,
+ Name: "requests_duration_seconds",
+ Help: "Histogram of front end request durations handled by Trickster",
+ Buckets: defaultBuckets,
+ },
+ []string{"origin_name", "origin_type", "method", "path", "http_status"},
+ )
+
+ FrontendRequestWrittenBytes = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: frontendSubsystem,
+ Name: "written_bytes_total",
+ Help: "Count of bytes written in front end requests handled by Trickster",
+ },
+ []string{"origin_name", "origin_type", "method", "path", "http_status"})
+
+ ProxyRequestStatus = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: proxySubsystem,
+ Name: "requests_total",
+ Help: "Count of downstream client requests handled by Trickster",
+ },
+ []string{"origin_name", "origin_type", "method", "cache_status", "http_status", "path"},
+ )
+
+ ProxyRequestElements = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: proxySubsystem,
+ Name: "points_total",
+ Help: "Count of data points in the timeseries returned to the requesting client.",
+ },
+ []string{"origin_name", "origin_type", "cache_status", "path"},
+ )
+
+ ProxyRequestDuration = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Subsystem: proxySubsystem,
+ Name: "request_duration_seconds",
+ Help: "Time required in seconds to proxy a given Prometheus query.",
+ Buckets: defaultBuckets,
+ },
+ []string{"origin_name", "origin_type", "method", "status", "http_status", "path"},
+ )
+
+ ProxyMaxConnections = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Subsystem: proxySubsystem,
+ Name: "max_connections",
+ Help: "Trickster max number of active connections.",
+ },
+ )
+
+ ProxyActiveConnections = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Subsystem: proxySubsystem,
+ Name: "active_connections",
+ Help: "Trickster number of active connections.",
+ },
+ )
+
+ ProxyConnectionRequested = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: proxySubsystem,
+ Name: "requested_connections_total",
+ Help: "Trickster total number of connections requested by clients.",
+ },
+ )
+ ProxyConnectionAccepted = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: proxySubsystem,
+ Name: "accepted_connections_total",
+ Help: "Trickster total number of accepted connections.",
+ },
+ )
+
+ ProxyConnectionClosed = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: proxySubsystem,
+ Name: "closed_connections_total",
+ Help: "Trickster total number of closed connections.",
+ },
+ )
+
+ ProxyConnectionFailed = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: proxySubsystem,
+ Name: "failed_connections_total",
+ Help: "Trickster total number of failed connections.",
+ },
+ )
+
+ CacheObjectOperations = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: cacheSubsystem,
+ Name: "operation_objects_total",
+ Help: "Count (in # of objects) of operations performed on a Trickster cache.",
+ },
+ []string{"cache_name", "cache_type", "operation", "status"},
+ )
+
+ CacheByteOperations = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: cacheSubsystem,
+ Name: "operation_bytes_total",
+ Help: "Count (in bytes) of operations performed on a Trickster cache.",
+ },
+ []string{"cache_name", "cache_type", "operation", "status"},
+ )
+
+ CacheEvents = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Subsystem: cacheSubsystem,
+ Name: "events_total",
+ Help: "Count of events performed on a Trickster cache.",
+ },
+ []string{"cache_name", "cache_type", "event", "reason"},
+ )
+
+ CacheObjects = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Subsystem: cacheSubsystem,
+ Name: "usage_objects",
+ Help: "Number of objects in a Trickster cache.",
+ },
+ []string{"cache_name", "cache_type"},
+ )
+
+ CacheBytes = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Subsystem: cacheSubsystem,
+ Name: "usage_bytes",
+ Help: "Number of bytes in a Trickster cache.",
+ },
+ []string{"cache_name", "cache_type"},
+ )
+
+ CacheMaxObjects = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Subsystem: cacheSubsystem,
+ Name: "max_usage_objects",
+ Help: "Trickster cache's Max Object Threshold for triggering an eviction exercise.",
+ },
+ []string{"cache_name", "cache_type"},
+ )
+
+ CacheMaxBytes = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Subsystem: cacheSubsystem,
+ Name: "max_usage_bytes",
+ Help: "Trickster cache's Max Byte Threshold for triggering an eviction exercise.",
+ },
+ []string{"cache_name", "cache_type"},
+ )
+
+ // Register Metrics
+ prometheus.MustRegister(FrontendRequestStatus)
+ prometheus.MustRegister(FrontendRequestDuration)
+ prometheus.MustRegister(FrontendRequestWrittenBytes)
+ prometheus.MustRegister(ProxyRequestStatus)
+ prometheus.MustRegister(ProxyRequestElements)
+ prometheus.MustRegister(ProxyRequestDuration)
+ prometheus.MustRegister(ProxyMaxConnections)
+ prometheus.MustRegister(ProxyActiveConnections)
+ prometheus.MustRegister(ProxyConnectionRequested)
+ prometheus.MustRegister(ProxyConnectionAccepted)
+ prometheus.MustRegister(ProxyConnectionClosed)
+ prometheus.MustRegister(ProxyConnectionFailed)
+ prometheus.MustRegister(CacheObjectOperations)
+ prometheus.MustRegister(CacheByteOperations)
+ prometheus.MustRegister(CacheEvents)
+ prometheus.MustRegister(CacheObjects)
+ prometheus.MustRegister(CacheBytes)
+ prometheus.MustRegister(CacheMaxObjects)
+ prometheus.MustRegister(CacheMaxBytes)
+
+ // Turn up the Metrics HTTP Server
+ if config.Metrics != nil && config.Metrics.ListenPort > 0 {
+ go func() {
+
+ log.Info("metrics http endpoint starting", log.Pairs{"address": config.Metrics.ListenAddress, "port": fmt.Sprintf("%d", config.Metrics.ListenPort)})
+
+ http.Handle("/metrics", promhttp.Handler())
+ if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.Metrics.ListenAddress, config.Metrics.ListenPort), nil); err != nil {
+ log.Error("unable to start metrics http server", log.Pairs{"detail": err.Error()})
+ os.Exit(1)
+ }
+ }()
+ }
+
+}
diff --git a/internal/util/middleware/config_context.go b/internal/util/middleware/config_context.go
new file mode 100644
index 000000000..4008d5f8e
--- /dev/null
+++ b/internal/util/middleware/config_context.go
@@ -0,0 +1,32 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package middleware
+
+import (
+ "net/http"
+
+ "github.com/Comcast/trickster/internal/cache"
+ "github.com/Comcast/trickster/internal/config"
+ "github.com/Comcast/trickster/internal/proxy/context"
+ "github.com/Comcast/trickster/internal/proxy/origins"
+ "github.com/Comcast/trickster/internal/proxy/request"
+)
+
+// WithResourcesContext ...
+func WithResourcesContext(client origins.Client, oc *config.OriginConfig, c cache.Cache, p *config.PathConfig, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ resources := request.NewResources(oc, p, c.Configuration(), c, client)
+ next.ServeHTTP(w, r.WithContext(context.WithResources(r.Context(), resources)))
+ })
+}
diff --git a/internal/util/middleware/metrics.go b/internal/util/middleware/metrics.go
new file mode 100644
index 000000000..3672a0d17
--- /dev/null
+++ b/internal/util/middleware/metrics.go
@@ -0,0 +1,72 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package middleware
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/Comcast/trickster/internal/util/metrics"
+)
+
+// Decorate decorates a function in such a way that it captures both the
+// returned status and the time used to execute a request from the front end
+// perspective
+func Decorate(originName, originType, path string, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ observer := &responseObserver{
+ w,
+ "unknown",
+ 0,
+ }
+
+ n := time.Now()
+ next.ServeHTTP(observer, r)
+
+ metrics.FrontendRequestDuration.WithLabelValues(originName, originType, r.Method, path, observer.status).Observe(time.Since(n).Seconds())
+ metrics.FrontendRequestStatus.WithLabelValues(originName, originType, r.Method, path, observer.status).Inc()
+ metrics.FrontendRequestWrittenBytes.WithLabelValues(originName, originType, r.Method, path, observer.status).Add(float64(observer.bytesWritten))
+ })
+}
+
+type responseObserver struct {
+ http.ResponseWriter
+
+ status string
+ bytesWritten float64
+}
+
+func (w *responseObserver) WriteHeader(statusCode int) {
+ w.ResponseWriter.WriteHeader(statusCode)
+ switch {
+ case statusCode >= 100 && statusCode < 199:
+ w.status = "1xx"
+ case statusCode >= 200 && statusCode < 299:
+ w.status = "2xx"
+ case statusCode >= 300 && statusCode < 399:
+ w.status = "3xx"
+ case statusCode >= 400 && statusCode < 499:
+ w.status = "4xx"
+ case statusCode >= 500 && statusCode < 599:
+ w.status = "5xx"
+ }
+}
+
+func (w *responseObserver) Write(b []byte) (int, error) {
+ bytesWritten, err := w.ResponseWriter.Write(b)
+
+ w.bytesWritten += float64(bytesWritten)
+
+ return bytesWritten, err
+}
diff --git a/internal/util/regexp/matching/matching.go b/internal/util/regexp/matching/matching.go
new file mode 100644
index 000000000..91feb0bae
--- /dev/null
+++ b/internal/util/regexp/matching/matching.go
@@ -0,0 +1,89 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+// Package matching provides patterns for processing regexp.Regexp matches
+package matching
+
+import "regexp"
+
+// GetNamedMatches will return a map of NamedSubmatches=Value for a Regexp and input string,
+// filtered to the provided list when populated. If there are multiple matches of the same name, last one wins
+func GetNamedMatches(re *regexp.Regexp, input string, filter []string) map[string]string {
+
+ found := make(map[string]string)
+ if input == "" || re == nil {
+ return found
+ }
+
+ matches := re.FindStringSubmatch(input)
+ if len(matches) == 0 {
+ return found
+ }
+
+ have := re.SubexpNames()
+ if len(have) == 0 {
+ return found
+ }
+
+ have = have[1:]
+ matches = matches[1:]
+ useFilter := len(filter) > 0
+
+ // Go through the matches
+ for i, n := range matches {
+ if useFilter {
+ for _, name := range filter {
+ if name != "" && n != "" && have[i] == name {
+ found[name] = n
+ }
+ }
+ } else {
+ if have[i] != "" && n != "" {
+ found[have[i]] = n
+ }
+ }
+ }
+ return found
+}
+
+// GetNamedMatch will return the value of a Named Submatch for a given regexp and its matches.
+// If there are multiple matches of the same name, first one wins
+func GetNamedMatch(filter string, re *regexp.Regexp, input string) (string, bool) {
+
+ if input == "" || filter == "" || re == nil {
+ return "", false
+ }
+
+ matches := re.FindStringSubmatch(input)
+ if len(matches) == 0 {
+ return "", false
+ }
+
+ have := re.SubexpNames()
+ if len(have) == 0 {
+ return "", false
+ }
+
+ have = have[1:]
+ matches = matches[1:]
+
+ // Go through the matches
+ for i, n := range matches {
+ if filter != "" && have[i] == filter {
+ return n, true
+ }
+ }
+
+ return "", false
+
+}
diff --git a/internal/util/regexp/matching/matching_test.go b/internal/util/regexp/matching/matching_test.go
new file mode 100644
index 000000000..a2c194313
--- /dev/null
+++ b/internal/util/regexp/matching/matching_test.go
@@ -0,0 +1,85 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package matching
+
+import (
+ "regexp"
+ "testing"
+)
+
+var testRegexp *regexp.Regexp
+
+func init() {
+ testRegexp = regexp.MustCompile(`(?Ptest)`)
+}
+
+func TestGetNamedMatches(t *testing.T) {
+
+ m := GetNamedMatches(testRegexp, "i love tests!", nil)
+ if len(m) != 1 {
+ t.Errorf("expected %d got %d", 1, len(m))
+ }
+
+ m = GetNamedMatches(testRegexp, "", nil)
+ if len(m) != 0 {
+ t.Errorf("expected %d got %d", 0, len(m))
+ }
+
+ m = GetNamedMatches(testRegexp, "a", nil)
+ if len(m) != 0 {
+ t.Errorf("expected %d got %d", 0, len(m))
+ }
+
+ m = GetNamedMatches(testRegexp, "i love tests!", []string{"value"})
+ if len(m) != 1 {
+ t.Errorf("expected %d got %d", 1, len(m))
+ }
+
+}
+
+func TestGetNamedMatch(t *testing.T) {
+
+ s, b := GetNamedMatch("", testRegexp, "i love tests!")
+ if b {
+ t.Errorf("expected %t got %t", false, b)
+ }
+ if s != "" {
+ t.Errorf("expected %s got %s", "", s)
+ }
+
+ s, b = GetNamedMatch("value", testRegexp, "i love tests!")
+ if !b {
+ t.Errorf("expected %t got %t", true, b)
+ }
+ if s != "test" {
+ t.Errorf("expected %s got %s", "test", s)
+ }
+
+ s, b = GetNamedMatch("a", testRegexp, "i love tests!")
+ if b {
+ t.Errorf("expected %t got %t", false, b)
+ }
+ if s != "" {
+ t.Errorf("expected %s got %s", "", s)
+ }
+
+ s, b = GetNamedMatch("value", testRegexp, "a")
+ if b {
+ t.Errorf("expected %t got %t", false, b)
+ }
+ if s != "" {
+ t.Errorf("expected %s got %s", "", s)
+ }
+
+}
diff --git a/internal/util/strings/strings.go b/internal/util/strings/strings.go
new file mode 100644
index 000000000..4a93f6fb1
--- /dev/null
+++ b/internal/util/strings/strings.go
@@ -0,0 +1,33 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package strings
+
+// IndexOfString returns the index of a string element in a given slice
+func IndexOfString(arr []string, val string) int {
+ for i, v := range arr {
+ if v == val {
+ return i
+ }
+ }
+ return -1
+}
+
+// CloneMap returns an exact copy of a map consisting string key and values
+func CloneMap(in map[string]string) map[string]string {
+ out := make(map[string]string)
+ for k, v := range in {
+ out[k] = v
+ }
+ return out
+}
diff --git a/internal/util/strings/strings_test.go b/internal/util/strings/strings_test.go
new file mode 100644
index 000000000..beb6b6d12
--- /dev/null
+++ b/internal/util/strings/strings_test.go
@@ -0,0 +1,50 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package strings
+
+import "testing"
+
+func TestIndexOfString(t *testing.T) {
+
+ arr := []string{"string0", "string1", "string2"}
+
+ i := IndexOfString(arr, "string0")
+ if i != 0 {
+ t.Errorf(`expected 0. got %d`, i)
+ }
+
+ i = IndexOfString(arr, "string3")
+ if i != -1 {
+ t.Errorf(`expected -1. got %d`, i)
+ }
+
+}
+
+func TestCloneMap(t *testing.T) {
+
+ const expected = "pass"
+
+ m := map[string]string{"test": expected}
+ m2 := CloneMap(m)
+
+ v, ok := m2["test"]
+ if !ok {
+ t.Errorf("expected true got %t", ok)
+ }
+
+ if v != expected {
+ t.Errorf("expected %s got %s", expected, v)
+ }
+
+}
diff --git a/internal/util/testing/testing.go b/internal/util/testing/testing.go
new file mode 100644
index 000000000..630042774
--- /dev/null
+++ b/internal/util/testing/testing.go
@@ -0,0 +1,145 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package testing
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "time"
+
+ cr "github.com/Comcast/trickster/internal/cache/registration"
+ "github.com/Comcast/trickster/internal/config"
+ tc "github.com/Comcast/trickster/internal/proxy/context"
+ th "github.com/Comcast/trickster/internal/proxy/headers"
+ "github.com/Comcast/trickster/internal/proxy/request"
+ "github.com/Comcast/trickster/internal/util/metrics"
+ "github.com/Comcast/trickster/pkg/promsim"
+ "github.com/Comcast/trickster/pkg/rangesim"
+)
+
+// NewTestServer returns a new httptest.Server that responds with the provided code, body and headers
+func NewTestServer(responseCode int, responseBody string, headers map[string]string) *httptest.Server {
+ handler := func(w http.ResponseWriter, r *http.Request) {
+ th.UpdateHeaders(w.Header(), headers)
+ w.WriteHeader(responseCode)
+ fmt.Fprint(w, responseBody)
+ }
+ s := httptest.NewServer(http.HandlerFunc(handler))
+ return s
+}
+
+// NewTestWebClient returns a new *http.Client configured with reasonable defaults
+func NewTestWebClient() *http.Client {
+ return &http.Client{
+ Timeout: 30 * time.Second,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ Transport: &http.Transport{
+ Dial: (&net.Dialer{KeepAlive: 300 * time.Second}).Dial,
+ MaxIdleConns: 20,
+ MaxIdleConnsPerHost: 20,
+ },
+ }
+}
+
+// NewTestInstance will start a trickster
+func NewTestInstance(
+ configFile string,
+ DefaultPathConfigs func(*config.OriginConfig) map[string]*config.PathConfig,
+ respCode int, respBody string, respHeaders map[string]string,
+ originType, urlPath, logLevel string,
+) (*httptest.Server, *httptest.ResponseRecorder, *http.Request, *http.Client, error) {
+
+ metrics.Init()
+
+ isBasicTestServer := false
+
+ var ts *httptest.Server
+ if originType == "promsim" {
+ ts = promsim.NewTestServer()
+ originType = "prometheus"
+ } else if originType == "rangesim" {
+ ts = rangesim.NewTestServer()
+ originType = "rpc"
+ } else {
+ isBasicTestServer = true
+ ts = NewTestServer(respCode, respBody, respHeaders)
+ }
+
+ args := []string{"-origin-url", ts.URL, "-origin-type", originType, "-log-level", logLevel}
+ if configFile != "" {
+ args = append(args, []string{"-config", configFile}...)
+ }
+
+ err := config.Load("trickster", "test", args)
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("Could not load configuration: %s", err.Error())
+ }
+
+ cr.LoadCachesFromConfig()
+ cache, err := cr.GetCache("default")
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ if !strings.HasPrefix(urlPath, "/") {
+ urlPath = "/" + urlPath
+ }
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", ts.URL+urlPath, nil)
+
+ oc := config.Origins["default"]
+ p := NewTestPathConfig(oc, DefaultPathConfigs, urlPath)
+
+ if !isBasicTestServer && respHeaders != nil {
+ p.ResponseHeaders = respHeaders
+ }
+
+ rsc := request.NewResources(oc, p, cache.Configuration(), cache, nil)
+ r = r.WithContext(tc.WithResources(r.Context(), rsc))
+
+ c := NewTestWebClient()
+
+ return ts, w, r, c, nil
+}
+
+// NewTestPathConfig returns a path config based on the provided parameters
+func NewTestPathConfig(
+ oc *config.OriginConfig,
+ DefaultPathConfigs func(*config.OriginConfig) map[string]*config.PathConfig,
+ urlPath string,
+) *config.PathConfig {
+ var paths map[string]*config.PathConfig
+ if DefaultPathConfigs != nil {
+ paths = DefaultPathConfigs(oc)
+ }
+
+ oc.Paths = paths
+
+ var p *config.PathConfig
+ if len(paths) > 0 {
+ if p2, ok := paths[urlPath]; ok {
+ p = p2
+ } else {
+ p = paths["/"]
+ }
+ }
+
+ return p
+}
diff --git a/internal/util/testing/testing_test.go b/internal/util/testing/testing_test.go
new file mode 100644
index 000000000..d0a8cf015
--- /dev/null
+++ b/internal/util/testing/testing_test.go
@@ -0,0 +1,84 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package testing
+
+import (
+ "testing"
+
+ "github.com/Comcast/trickster/internal/config"
+)
+
+func TestNewTestServer(t *testing.T) {
+ s := NewTestServer(200, "OK", map[string]string{"Expires": "-1"})
+ if s == nil {
+ t.Errorf("Expected server pointer, got %v", s)
+ }
+
+}
+
+func TestNewTestWebClient(t *testing.T) {
+ s := NewTestWebClient()
+ if s == nil {
+ t.Errorf("Expected webclient pointer, got %v", s)
+ }
+}
+
+func TestNewTestInstance(t *testing.T) {
+ s, w, r, c, err := NewTestInstance("", nil, 200, "", nil, "test", "test", "debug")
+
+ if s == nil {
+ t.Errorf("Expected server pointer, got %v", "nil")
+ }
+
+ if r == nil {
+ t.Errorf("Expected server pointer, got %v", "nil")
+ }
+
+ if c == nil {
+ t.Errorf("Expected server pointer, got %v", "nil")
+ }
+
+ if w == nil {
+ t.Errorf("Expected server pointer, got %v", "nil")
+ }
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ // cover promsim conditional and path generation
+
+ f := func(*config.OriginConfig) map[string]*config.PathConfig {
+ return map[string]*config.PathConfig{
+ "path1": {},
+ "path2": {},
+ }
+ }
+
+ s, _, _, _, err = NewTestInstance("", f, 200, "", nil, "promsim", "test", "debug")
+ if s == nil {
+ t.Errorf("Expected server pointer, got %v", "nil")
+ }
+ if err != nil {
+ t.Error(err)
+ }
+
+ // cover config file provided
+
+ _, _, _, _, err = NewTestInstance("../../../testdata/test.full.conf", f, 200, "", nil, "promsim", "test", "debug")
+ if err == nil {
+ t.Errorf("Expected error, got %v", "nil")
+ }
+
+}
diff --git a/internal/util/tracing/tracing.go b/internal/util/tracing/tracing.go
new file mode 100644
index 000000000..4e0443aef
--- /dev/null
+++ b/internal/util/tracing/tracing.go
@@ -0,0 +1,14 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package tracing
diff --git a/internal/util/util.go b/internal/util/util.go
new file mode 100644
index 000000000..9f681a4c8
--- /dev/null
+++ b/internal/util/util.go
@@ -0,0 +1,14 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package util
diff --git a/logging.go b/logging.go
deleted file mode 100644
index d10f89f26..000000000
--- a/logging.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
-
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
- "github.com/go-stack/stack"
- "gopkg.in/natefinch/lumberjack.v2"
-)
-
-// newLogger returns a Logger for the provided logging configuration. The
-// returned Logger will write to files distinguished from other Loggers by the
-// instance string.
-func newLogger(cfg LoggingConfig, instance string) log.Logger {
- var wr io.Writer
-
- if cfg.LogFile == "" {
- wr = os.Stdout
- } else {
- logFile := cfg.LogFile
- if instance != "" {
- logFile = strings.Replace(logFile, ".log", "."+instance+".log", 1)
- }
-
- wr = &lumberjack.Logger{
- Filename: logFile,
- MaxSize: 256, // megabytes
- MaxBackups: 80, // 256 megs @ 80 backups is 20GB of Logs
- MaxAge: 7, // days
- Compress: true, // Compress Rolled Backups
- }
- }
-
- logger := log.NewLogfmtLogger(log.NewSyncWriter(wr))
- logger = log.With(logger,
- "time", log.DefaultTimestampUTC,
- "app", "trickster",
- "caller", log.Valuer(func() interface{} {
- return pkgCaller{stack.Caller(5)}
- }),
- )
-
- // wrap logger depending on log level
- switch strings.ToLower(cfg.LogLevel) {
- case "debug":
- logger = level.NewFilter(logger, level.AllowDebug())
- case "info":
- logger = level.NewFilter(logger, level.AllowInfo())
- case "warn":
- logger = level.NewFilter(logger, level.AllowWarn())
- case "error":
- logger = level.NewFilter(logger, level.AllowError())
- default:
- logger = level.NewFilter(logger, level.AllowInfo())
- }
-
- return logger
-}
-
-// pkgCaller wraps a stack.Call to make the default string output include the
-// package path.
-type pkgCaller struct {
- c stack.Call
-}
-
-func (pc pkgCaller) String() string {
- caller := fmt.Sprintf("%+v", pc.c)
- caller = strings.TrimPrefix(caller, "github.com/Comcast/trickster/")
- return caller
-}
diff --git a/main.go b/main.go
deleted file mode 100644
index e25ba77bb..000000000
--- a/main.go
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "fmt"
- "net/http"
- _ "net/http/pprof"
- "os"
-
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
- "github.com/gorilla/handlers"
- "github.com/gorilla/mux"
-)
-
-const (
- applicationName = "trickster"
- applicationVersion = "0.1.10"
-
- // Log fields
- lfEvent = "event"
- lfDetail = "detail"
- lfCacheKey = "cacheKey"
-
- // Prometheus API method names
- mnQueryRange = "query_range"
- mnQuery = "query"
- mnLabels = "label/__name__/values"
- mnHealth = "health"
-
- // Prometheus URL endpoints
- prometheusAPIv1Path = "/api/v1/"
-)
-
-func main() {
- t := &TricksterHandler{}
- t.ResponseChannels = make(map[string]chan *ClientRequestContext)
-
- t.Config = NewConfig()
- if err := loadConfiguration(t.Config, os.Args[1:]); err != nil {
- // using fmt.Println because logger can't be instantiated without the config loaded
- // to know the log path, and the config load just failed, so we just abort.
- fmt.Println("Could not load trickster configuration: ", err.Error())
- os.Exit(1)
- }
-
- if t.Config.Main.InstanceID > 0 {
- t.Logger = newLogger(t.Config.Logging, fmt.Sprint(t.Config.Main.InstanceID))
- } else {
- t.Logger = newLogger(t.Config.Logging, "")
- }
-
- level.Info(t.Logger).Log("event", "application startup", "version", applicationVersion)
-
- if t.Config.Profiler.Enabled {
- go exposeProfilerEndpoint(t.Config, t.Logger)
- }
-
- t.Metrics = NewApplicationMetrics()
- t.Metrics.ListenAndServe(t.Config, t.Logger)
-
- t.Cacher = getCache(t)
- if err := t.Cacher.Connect(); err != nil {
- level.Error(t.Logger).Log("event", "Unable to connect to Cache", "detail", err.Error())
- os.Exit(1)
- }
- defer t.Cacher.Close()
-
- router := mux.NewRouter()
-
- // Health Check Paths
- router.HandleFunc("/ping", t.pingHandler).Methods("GET")
- router.HandleFunc("/{originMoniker}/"+mnHealth, t.promHealthCheckHandler).Methods("GET")
- router.HandleFunc("/"+mnHealth, t.promHealthCheckHandler).Methods("GET")
-
- // Path-based multi-origin support - no support for full proxy of the prometheus UI, only querying
- router.HandleFunc("/{originMoniker}"+prometheusAPIv1Path+mnQueryRange, t.promQueryRangeHandler).Methods("GET", "POST")
- router.HandleFunc("/{originMoniker}"+prometheusAPIv1Path+mnQuery, t.promQueryHandler).Methods("GET", "POST")
- router.PathPrefix("/{originMoniker}" + prometheusAPIv1Path).HandlerFunc(t.promFullProxyHandler).Methods("GET")
-
- router.HandleFunc(prometheusAPIv1Path+mnQueryRange, t.promQueryRangeHandler).Methods("GET", "POST")
- router.HandleFunc(prometheusAPIv1Path+mnQuery, t.promQueryHandler).Methods("GET", "POST")
- router.PathPrefix(prometheusAPIv1Path).HandlerFunc(t.promFullProxyHandler).Methods("GET")
-
- // Catch All for Single-Origin proxy
- router.PathPrefix("/").HandlerFunc(t.promFullProxyHandler).Methods("GET")
-
- level.Info(t.Logger).Log("event", "proxy http endpoint starting", "address", t.Config.ProxyServer.ListenAddress, "port", t.Config.ProxyServer.ListenPort)
-
- // Start the Server
- if t.Config.TLS.Enabled {
- err := http.ListenAndServeTLS(fmt.Sprintf("%s:%d", t.Config.ProxyServer.ListenAddress, t.Config.ProxyServer.ListenPort), t.Config.TLS.FullChainCertPath, t.Config.TLS.PrivateKeyPath, handlers.CompressHandler(router))
- level.Error(t.Logger).Log("event", "exiting", "err", err)
- } else {
- err := http.ListenAndServe(fmt.Sprintf("%s:%d", t.Config.ProxyServer.ListenAddress, t.Config.ProxyServer.ListenPort), handlers.CompressHandler(router))
- level.Error(t.Logger).Log("event", "exiting", "err", err)
- }
-}
-
-func exposeProfilerEndpoint(c *Config, l log.Logger) {
- level.Info(l).Log("event", "profiler http endpoint starting", "port", c.Profiler.ListenPort)
- err := http.ListenAndServe(fmt.Sprintf(":%d", c.Profiler.ListenPort), nil)
- if err != nil {
- level.Error(l).Log("event", "error starting profiler http server", "detail", err.Error())
- }
-}
diff --git a/memory.go b/memory.go
deleted file mode 100644
index 006cd3101..000000000
--- a/memory.go
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/go-kit/kit/log/level"
-)
-
-// MemoryCache defines a a Memory Cache client that conforms to the Cache interface
-type MemoryCache struct {
- T *TricksterHandler
- client sync.Map
-}
-
-// CacheObject represents a Cached object as stored in the Memory Cache
-type CacheObject struct {
- Key string
- Value string
- Expiration int64
-}
-
-// Connect initializes the MemoryCache
-func (c *MemoryCache) Connect() error {
- level.Info(c.T.Logger).Log("event", "memorycache setup")
- c.client = sync.Map{}
- go c.Reap()
- return nil
-}
-
-// Store places an object in the cache using the specified key and ttl
-func (c *MemoryCache) Store(cacheKey string, data string, ttl int64) error {
- level.Debug(c.T.Logger).Log("event", "memorycache cache store", "key", cacheKey)
- c.client.Store(cacheKey, CacheObject{Key: cacheKey, Value: data, Expiration: time.Now().Unix() + ttl})
- return nil
-}
-
-// Retrieve looks for an object in cache and returns it (or an error if not found)
-func (c *MemoryCache) Retrieve(cacheKey string) (string, error) {
- record, ok := c.client.Load(cacheKey)
- if ok {
- level.Debug(c.T.Logger).Log("event", "memorycache cache retrieve", "key", cacheKey)
- return record.(CacheObject).Value, nil
- }
- return "", fmt.Errorf("Value for key [%s] not in cache", cacheKey)
-}
-
-// Reap continually iterates through the cache to find expired elements and removes them
-func (c *MemoryCache) Reap() {
- for {
- c.ReapOnce()
- time.Sleep(time.Duration(c.T.Config.Caching.ReapSleepMS) * time.Millisecond)
- }
-}
-
-// ReapOnce makes a single iteration through the cache to to find and remove expired elements
-func (c *MemoryCache) ReapOnce() {
- now := time.Now().Unix()
-
- c.client.Range(func(k, value interface{}) bool {
- if value.(CacheObject).Expiration < now {
- key := k.(string)
- level.Debug(c.T.Logger).Log("event", "memorycache cache reap", "key", key)
-
- c.T.ChannelCreateMtx.Lock()
- c.client.Delete(k)
-
- // Close out the channel if it exists
- if _, ok := c.T.ResponseChannels[key]; ok {
- close(c.T.ResponseChannels[key])
- delete(c.T.ResponseChannels, key)
- }
-
- c.T.ChannelCreateMtx.Unlock()
- }
- return true
- })
-}
-
-// Close is not used for MemoryCache, and is here to fully prototype the Cache Interface
-func (c *MemoryCache) Close() error {
- return nil
-}
diff --git a/memory_test.go b/memory_test.go
deleted file mode 100644
index 763694b3a..000000000
--- a/memory_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "testing"
-
- "github.com/go-kit/kit/log"
-)
-
-func setupMemoryCache() MemoryCache {
- cfg := Config{Caching: CachingConfig{ReapSleepMS: 1000}}
- tr := TricksterHandler{
- Logger: log.NewNopLogger(),
- ResponseChannels: make(map[string]chan *ClientRequestContext),
- Config: &cfg,
- }
- return MemoryCache{T: &tr}
-}
-
-func TestMemoryCache_Connect(t *testing.T) {
- mc := setupMemoryCache()
-
- // it should connect
- err := mc.Connect()
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestMemoryCache_Store(t *testing.T) {
- mc := setupMemoryCache()
-
- err := mc.Connect()
- if err != nil {
- t.Error(err)
- }
-
- // it should store a value
- err = mc.Store("cacheKey", "data", 60000)
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestMemoryCache_Retrieve(t *testing.T) {
- mc := setupMemoryCache()
-
- err := mc.Connect()
- if err != nil {
- t.Error(err)
- }
-
- err = mc.Store("cacheKey", "data", 60000)
- if err != nil {
- t.Error(err)
- }
-
- // it should retrieve a value
- var data string
- data, err = mc.Retrieve("cacheKey")
- if err != nil {
- t.Error(err)
- }
- if data != "data" {
- t.Errorf("wanted \"%s\". got \"%s\"", "data", data)
- }
-}
-
-func TestMemoryCache_ReapOnce(t *testing.T) {
- mc := setupMemoryCache()
-
- err := mc.Connect()
- if err != nil {
- t.Error(err)
- }
-
- // fake an expired entry
- mc.Store("cacheKey", "data", -1000)
-
- // fake a response channel to reap
- ch := make(chan *ClientRequestContext, 100)
- mc.T.ResponseChannels["cacheKey"] = ch
-
- // it should remove empty response channel
- mc.ReapOnce()
-
- if mc.T.ResponseChannels["cacheKey"] != nil {
- t.Errorf("expected response channel to be removed")
- }
-}
-
-func TestMemoryCache_Close(t *testing.T) {
- mc := setupMemoryCache()
- mc.Close()
-}
diff --git a/metrics.go b/metrics.go
deleted file mode 100644
index 390ed8601..000000000
--- a/metrics.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "fmt"
- "net/http"
- _ "net/http/pprof"
- "os"
-
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promhttp"
-)
-
-// ApplicationMetrics enumerates the metrics collected and reported by the trickster application.
-type ApplicationMetrics struct {
- CacheRequestStatus *prometheus.CounterVec
- CacheRequestElements *prometheus.CounterVec
- ProxyRequestDuration *prometheus.HistogramVec
-}
-
-// Unregister removes registered metrics from the Prometheus metrics instrumentation.
-func (metrics ApplicationMetrics) Unregister() {
- prometheus.Unregister(metrics.CacheRequestStatus)
- prometheus.Unregister(metrics.CacheRequestElements)
- prometheus.Unregister(metrics.ProxyRequestDuration)
-}
-
-// ListenAndServe Starts the HTTP Server for Prometheus Scraping
-func (metrics ApplicationMetrics) ListenAndServe(config *Config, logger log.Logger) {
- // Turn up the Metrics HTTP Server
- if config.Metrics.ListenPort > 0 {
- go func() {
-
- level.Info(logger).Log("event", "metrics http endpoint starting", "address", config.Metrics.ListenAddress, "port", fmt.Sprintf("%d", config.Metrics.ListenPort))
-
- http.Handle("/metrics", promhttp.Handler())
- if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.Metrics.ListenAddress, config.Metrics.ListenPort), nil); err != nil {
- level.Error(logger).Log("event", "unable to start metrics http server", "detail", err.Error())
- os.Exit(1)
- }
- }()
- }
-}
-
-// NewApplicationMetrics returns a ApplicationMetrics object and instantiates an HTTP server for polling them.
-func NewApplicationMetrics() *ApplicationMetrics {
- metrics := ApplicationMetrics{
- CacheRequestStatus: prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: "trickster_requests_total",
- Help: "Count of the total number of requests Trickster has handled",
- },
- []string{"origin", "origin_type", "method", "status", "http_status"},
- ),
- CacheRequestElements: prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: "trickster_points_total",
- Help: "Count of data points returned in a Prometheus query_range Request",
- },
- []string{"origin", "origin_type", "status"},
- ),
- ProxyRequestDuration: prometheus.NewHistogramVec(
- prometheus.HistogramOpts{
- Name: "trickster_proxy_duration_seconds",
- Help: "Time required in seconds to proxy a given Prometheus query.",
- Buckets: []float64{0.05, 0.1, 0.5, 1, 5, 10, 20},
- },
- []string{"origin", "origin_type", "method", "status", "http_status"},
- ),
- }
-
- prometheus.MustRegister(metrics.CacheRequestStatus)
- prometheus.MustRegister(metrics.CacheRequestElements)
- prometheus.MustRegister(metrics.ProxyRequestDuration)
-
- return &metrics
-}
diff --git a/model.go b/model.go
deleted file mode 100644
index 4cb667d21..000000000
--- a/model.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "net/http"
- "net/url"
- "sync"
-
- "github.com/prometheus/common/model"
-)
-
-// PrometheusVectorEnvelope represents a Vector response object from the Prometheus HTTP API
-type PrometheusVectorEnvelope struct {
- Status string `json:"status"`
- Data PrometheusVectorData `json:"data"`
-}
-
-// PrometheusVectorData represents the Data body of a Vector response object from the Prometheus HTTP API
-type PrometheusVectorData struct {
- ResultType string `json:"resultType"`
- Result model.Vector `json:"result"`
-}
-
-// PrometheusMatrixEnvelope represents a Matrix response object from the Prometheus HTTP API
-type PrometheusMatrixEnvelope struct {
- Status string `json:"status"`
- Data PrometheusMatrixData `json:"data"`
-}
-
-// PrometheusMatrixData represents the Data body of a Matrix response object from the Prometheus HTTP API
-type PrometheusMatrixData struct {
- ResultType string `json:"resultType"`
- Result model.Matrix `json:"result"`
-}
-
-// ClientRequestContext contains the objects needed to fulfull a client request
-type ClientRequestContext struct {
- Request *http.Request
- Writer http.ResponseWriter
- CacheKey string
- CacheLookupResult string
- Matrix PrometheusMatrixEnvelope
- Origin PrometheusOriginConfig
- RequestParams url.Values
- RequestExtents MatrixExtents
- OriginUpperExtents MatrixExtents
- OriginLowerExtents MatrixExtents
- StepParam string
- StepMS int64
- Time int64
- WaitGroup sync.WaitGroup
-}
-
-// MatrixExtents describes the start and end epoch times (in ms) for a given range of data
-type MatrixExtents struct {
- Start int64
- End int64
-}
diff --git a/model_test.go b/model_test.go
deleted file mode 100644
index 0b2b318cd..000000000
--- a/model_test.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package main
-
-import (
- "reflect"
- "strconv"
- "testing"
-
- "github.com/prometheus/common/model"
-)
-
-func TestPrometheusMatrixEnvelope_CropToRange(t *testing.T) {
- tests := []struct {
- before, after PrometheusMatrixEnvelope
- start, end int64
- }{
- // Case where we trim nothing
- {
- before: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{1544004600, 1.5},
- },
- },
- },
- },
- },
- after: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{1544004600, 1.5},
- },
- },
- },
- },
- },
- start: 0,
- end: 1644004600,
- },
- // Case where we trim everything (all data is too late)
- {
- before: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{1544004600, 1.5},
- },
- },
- },
- },
- },
- after: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{},
- },
- },
- start: 0,
- end: 10,
- },
- // Case where we trim everything (all data is too early)
- {
- before: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{100, 1.5},
- },
- },
- },
- },
- },
- after: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{},
- },
- },
- start: 10000,
- end: 20000,
- },
- // Case where we trim some off the beginning
- {
- before: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{99, 1.5},
- model.SamplePair{199, 1.5},
- model.SamplePair{299, 1.5},
- },
- },
- },
- },
- },
- after: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{299, 1.5},
- },
- },
- },
- },
- },
- start: 200,
- end: 300,
- },
- // Case where we trim some off the end
- {
- before: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{99, 1.5},
- model.SamplePair{199, 1.5},
- model.SamplePair{299, 1.5},
- },
- },
- },
- },
- },
- after: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{
- model.SamplePair{199, 1.5},
- },
- },
- },
- },
- },
- start: 100,
- end: 200,
- },
-
- // Case where we aren't given any datapoints
- {
- before: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{},
- },
- },
- },
- },
- after: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{},
- },
- },
- start: 200,
- end: 300,
- },
-
- // Case where we have more series then points
- {
- before: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{
- &model.SampleStream{
- Metric: model.Metric{"__name__": "a"},
- Values: []model.SamplePair{model.SamplePair{99, 1.5}},
- },
- &model.SampleStream{
- Metric: model.Metric{"__name__": "b"},
- Values: []model.SamplePair{model.SamplePair{99, 1.5}},
- },
- },
- },
- },
- after: PrometheusMatrixEnvelope{
- Data: PrometheusMatrixData{
- ResultType: "matrix",
- Result: model.Matrix{},
- },
- },
- start: 200,
- end: 300,
- },
- }
-
- for i, test := range tests {
- t.Run(strconv.Itoa(i), func(t *testing.T) {
- test.before.cropToRange(test.start, test.end)
- if !reflect.DeepEqual(test.before, test.after) {
- t.Fatalf("mismatch\nexpected=%v\nactual=%v", test.after, test.before)
- }
- })
- }
-}
diff --git a/pkg/locks/locks.go b/pkg/locks/locks.go
new file mode 100644
index 000000000..514bee285
--- /dev/null
+++ b/pkg/locks/locks.go
@@ -0,0 +1,73 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package locks
+
+import (
+ "sync"
+)
+
+var locks = make(map[string]*namedLock)
+var mapLock = sync.Mutex{}
+
+type namedLock struct {
+ name string
+ mtx *sync.Mutex
+ queueSize int
+}
+
+func newNamedLock(name string) *namedLock {
+ return &namedLock{
+ name: name,
+ mtx: &sync.Mutex{},
+ }
+}
+
+// Acquire returns a named lock, and blocks until it is acquired
+func Acquire(lockName string) *sync.Mutex {
+
+ var nl *namedLock
+ var ok bool
+
+ if lockName == "" {
+ return nil
+ }
+
+ mapLock.Lock()
+ if nl, ok = locks[lockName]; !ok {
+ nl = newNamedLock(lockName)
+ locks[lockName] = nl
+ }
+ nl.queueSize++
+ mapLock.Unlock()
+ nl.mtx.Lock()
+ return nl.mtx
+}
+
+// Release unlocks and releases a named lock
+func Release(lockName string) {
+
+ if lockName == "" {
+ return
+ }
+
+ mapLock.Lock()
+ if nl, ok := locks[lockName]; ok {
+ nl.queueSize--
+ if nl.queueSize == 0 {
+ delete(locks, lockName)
+ }
+ nl.mtx.Unlock()
+ }
+ mapLock.Unlock()
+}
diff --git a/pkg/locks/locks_test.go b/pkg/locks/locks_test.go
new file mode 100644
index 000000000..196648f09
--- /dev/null
+++ b/pkg/locks/locks_test.go
@@ -0,0 +1,55 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package locks
+
+import (
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestLocks(t *testing.T) {
+
+ var testVal = 0
+
+ Acquire("test")
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ Acquire("test")
+ testVal += 10
+ Release("test")
+ wg.Done()
+ }()
+ testVal++
+ if testVal != 1 {
+ t.Errorf("expected 1 got %d", testVal)
+ }
+ time.Sleep(time.Second * 1)
+ Release("test")
+ wg.Wait()
+
+ if testVal != 11 {
+ t.Errorf("expected 11 got %d", testVal)
+ }
+
+ // Cover Empty String Cases
+ mtx := Acquire("")
+ if mtx != nil {
+ t.Errorf("expected nil got %v", mtx)
+ }
+ // Shouldn't matter but covers the code
+ Release("")
+
+}
diff --git a/pkg/promsim/http_server.go b/pkg/promsim/http_server.go
new file mode 100644
index 000000000..ee84d8bb0
--- /dev/null
+++ b/pkg/promsim/http_server.go
@@ -0,0 +1,184 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package promsim
+
+import (
+ "fmt"
+ "math"
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "time"
+)
+
+// NewTestServer launches a Test Prometheus Server (for unit testing)
+func NewTestServer() *httptest.Server {
+ return httptest.NewServer(MuxWithRoutes())
+}
+
+// MuxWithRoutes returns a ServeMux that includes the PromSim handlers already registered
+func MuxWithRoutes() *http.ServeMux {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/api/v1/query_range", queryRangeHandler)
+ mux.HandleFunc("/api/v1/query", queryHandler)
+ return mux
+}
+
+func queryRangeHandler(w http.ResponseWriter, r *http.Request) {
+
+ params := r.URL.Query()
+ q := params.Get("query")
+ s := params.Get("start")
+ e := params.Get("end")
+ p := params.Get("step")
+
+ var err error
+
+ if q != "" && s != "" && e != "" && p != "" {
+ var i int64
+ var start, end time.Time
+ var step time.Duration
+
+ start, err = parseTime(s)
+ if err != nil {
+ writeError(http.StatusBadRequest, []byte("unable to parse start time parameter"), w)
+ return
+ }
+
+ end, err = parseTime(e)
+ if err != nil {
+ writeError(http.StatusBadRequest, []byte("unable to parse end time parameter"), w)
+ return
+ }
+
+ i, err = parseDuration(p)
+ if err != nil {
+ writeError(http.StatusBadRequest, []byte(fmt.Sprintf("unable to parse step parameter: %s", p)), w)
+ return
+ }
+ step = time.Duration(i) * time.Second
+
+ json, code, _ := GetTimeSeriesData(q, start, end, step)
+
+ if code == http.StatusOK {
+ w.Header().Set("Content-Type", "application/json")
+ }
+
+ w.WriteHeader(code)
+
+ if code == http.StatusOK {
+ w.Write([]byte(json))
+ } else {
+ w.Write([]byte{})
+ }
+
+ return
+ }
+ writeError(http.StatusBadRequest, []byte("missing required parameter"), w)
+}
+
+func queryHandler(w http.ResponseWriter, r *http.Request) {
+
+ w.Header().Set("Content-Type", "application/json")
+
+ params := r.URL.Query()
+ q := params.Get("query")
+ t := params.Get("time")
+
+ if q != "" {
+ tm := time.Now()
+ if t != "" {
+ var err error
+ tm, err = parseTime(t)
+ if err != nil {
+ writeError(http.StatusBadRequest, []byte("unable to parse time parameter"), w)
+ return
+ }
+ }
+
+ json, code, _ := GetInstantData(q, tm)
+ w.WriteHeader(code)
+ w.Write([]byte(json))
+ return
+ }
+ writeError(http.StatusBadRequest, []byte("missing required parameter 'query'"), w)
+}
+
+func writeError(code int, body []byte, w http.ResponseWriter) {
+ w.WriteHeader(code)
+ w.Write(body)
+}
+
+// parseTime converts a query time URL parameter to time.Time.
+// Copied from https://github.com/prometheus/prometheus/blob/master/web/api/v1/api.go
+func parseTime(s string) (time.Time, error) {
+ if t, err := strconv.ParseFloat(s, 64); err == nil {
+ s, ns := math.Modf(t)
+ ns = math.Round(ns*1000) / 1000
+ return time.Unix(int64(s), int64(ns*float64(time.Second))), nil
+ }
+ if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
+ return t, nil
+ }
+ return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
+}
+
+func parseDuration(input string) (int64, error) {
+
+ v, err := strconv.ParseInt(input, 10, 64)
+ if err == nil {
+ return v, nil
+ }
+
+ for i := range input {
+ if input[i] > 47 && input[i] < 58 {
+ continue
+ }
+ if input[i] == 46 {
+ break
+ }
+ if i > 0 {
+ units, ok := UnitMap[input[i:]]
+ if !ok {
+ return 0, durationError(input)
+ }
+ v, err := strconv.ParseInt(input[0:i], 10, 64)
+ if err != nil {
+ return 0, durationError(input)
+ }
+ v = v * units
+ return int64(time.Duration(v).Seconds()), nil
+ }
+ }
+ return 0, durationError(input)
+}
+
+func durationError(input string) error {
+ return fmt.Errorf("cannot parse %q to a valid duration", input)
+}
+
+// UnitMap provides a map of common time unit indicators to nanoseconds of duration per unit
+var UnitMap = map[string]int64{
+ "ns": int64(time.Nanosecond),
+ "us": int64(time.Microsecond),
+ "µs": int64(time.Microsecond), // U+00B5 = micro symbol
+ "μs": int64(time.Microsecond), // U+03BC = Greek letter mu
+ "ms": int64(time.Millisecond),
+ "s": int64(time.Second),
+ "m": int64(time.Minute),
+ "h": int64(time.Hour),
+ "d": int64(24 * time.Hour),
+ "w": int64(24 * 7 * time.Hour),
+ "y": int64(24 * 365 * time.Hour),
+}
diff --git a/pkg/promsim/http_server_test.go b/pkg/promsim/http_server_test.go
new file mode 100644
index 000000000..e76301aa6
--- /dev/null
+++ b/pkg/promsim/http_server_test.go
@@ -0,0 +1,297 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package promsim
+
+import (
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestNewTestServer(t *testing.T) {
+ if NewTestServer() == nil {
+ t.Errorf("failed to get test server object")
+ }
+}
+
+func TestQueryRangeHandler(t *testing.T) {
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/query_range?query=up&start=0&end=30&step=15", nil)
+ queryRangeHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ const expected = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"series_id":"0"},"values":[[0,"29"],[15,"81"],[30,"23"]]}]}}`
+
+ if string(bodyBytes) != expected {
+ t.Errorf("expected %s got %s", expected, bodyBytes)
+ }
+
+ // Test with a duration that includes a unit of measurement
+ w = httptest.NewRecorder()
+ r = httptest.NewRequest("GET", "http://0/query_range?query=up&start=0&end=30&step=15s", nil)
+ queryRangeHandler(w, r)
+
+ resp = w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d", resp.StatusCode)
+ }
+
+ bodyBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(bodyBytes) != expected {
+ t.Errorf("expected %s got %s", expected, bodyBytes)
+ }
+
+}
+
+func TestQueryRangeHandlerFloatTime(t *testing.T) {
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/query_range?query=up&start=0.000&end=30.456&step=15", nil)
+ queryRangeHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ const expected = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"series_id":"0"},"values":[[0,"29"],[15,"81"],[30,"23"]]}]}}`
+
+ if string(bodyBytes) != expected {
+ t.Errorf("expected %s got %s", expected, bodyBytes)
+ }
+}
+
+func TestQueryRangeHandlerMissingParam(t *testing.T) {
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/query_range?q=up&start=0&end=30&step=15", nil)
+ queryRangeHandler(w, r)
+
+ resp := w.Result()
+
+ if resp.StatusCode != http.StatusBadRequest {
+ t.Errorf("expected %d got %d", http.StatusBadRequest, resp.StatusCode)
+ }
+}
+
+func TestQueryRangeHandlerInvalidParam(t *testing.T) {
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/query_range?query=up&start=foo&end=30&step=15", nil)
+ queryRangeHandler(w, r)
+
+ resp := w.Result()
+
+ if resp.StatusCode != http.StatusBadRequest {
+ t.Errorf("expected %d got %d", http.StatusBadRequest, resp.StatusCode)
+ }
+
+ w = httptest.NewRecorder()
+ r = httptest.NewRequest("GET", "http://0/query_range?query=up&start=0&end=foo&step=15", nil)
+ queryRangeHandler(w, r)
+
+ resp = w.Result()
+
+ if resp.StatusCode != http.StatusBadRequest {
+ t.Errorf("expected %d got %d", http.StatusBadRequest, resp.StatusCode)
+ }
+
+ w = httptest.NewRecorder()
+ r = httptest.NewRequest("GET", "http://0/query_range?query=up&start=0&end=30&step=foo", nil)
+ queryRangeHandler(w, r)
+
+ resp = w.Result()
+
+ if resp.StatusCode != http.StatusBadRequest {
+ t.Errorf("expected %d got %d", http.StatusBadRequest, resp.StatusCode)
+ }
+
+ w = httptest.NewRecorder()
+ r = httptest.NewRequest("GET", "http://0/query_range?query=up{status_code=400}&start=0&end=30&step=15", nil)
+ queryRangeHandler(w, r)
+
+ resp = w.Result()
+
+ if resp.StatusCode != http.StatusBadRequest {
+ t.Errorf("expected %d got %d", http.StatusBadRequest, resp.StatusCode)
+ }
+}
+
+func TestQueryHandler(t *testing.T) {
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/query?query=up&time=0", nil)
+ queryHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ const expected = `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"series_id":"0"},"value":[0,"29"]}]}}`
+
+ if string(bodyBytes) != expected {
+ t.Errorf("expected %s got %s", expected, bodyBytes)
+ }
+}
+
+func TestQueryHandlerFloatTime(t *testing.T) {
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/query?query=up&time=30.456", nil)
+ queryHandler(w, r)
+
+ resp := w.Result()
+
+ // it should return 200 OK
+ if resp.StatusCode != 200 {
+ t.Errorf("expected 200 got %d", resp.StatusCode)
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ const expected = `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"series_id":"0"},"value":[30,"23"]}]}}`
+
+ if string(bodyBytes) != expected {
+ t.Errorf("expected %s got %s", expected, bodyBytes)
+ }
+}
+
+func TestQueryHandlerMissingParam(t *testing.T) {
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/query?q=up", nil)
+ queryHandler(w, r)
+ resp := w.Result()
+
+ if resp.StatusCode != http.StatusBadRequest {
+ t.Errorf("expected %d got %d", http.StatusBadRequest, resp.StatusCode)
+ }
+}
+
+func TestQueryHandlerInvalidParam(t *testing.T) {
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "http://0/query?query=up&time=foo", nil)
+ queryHandler(w, r)
+ resp := w.Result()
+
+ if resp.StatusCode != http.StatusBadRequest {
+ t.Errorf("expected %d got %d", http.StatusBadRequest, resp.StatusCode)
+ }
+}
+
+func TestParseTime(t *testing.T) {
+
+ const time1 = "2006-01-02T15:04:05.999999999Z"
+ _, err := parseTime(time1)
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestParseDuration(t *testing.T) {
+
+ // Test inferred seconds
+ d, err := parseDuration("15")
+ if err != nil {
+ t.Error(err)
+ }
+ if d != 15 {
+ t.Errorf("expected %d got %d", 15, d)
+ }
+
+ // Test unit of h
+ d, err = parseDuration("1h")
+ if err != nil {
+ t.Error(err)
+ }
+ if d != 3600 {
+ t.Errorf("expected %d got %d", 3600, d)
+ }
+
+ // Test invalid unit
+ d, err = parseDuration("1x")
+ if err == nil {
+ t.Errorf("expected parseDuration error for input [%s] got [%d]", "1x", d)
+ }
+
+ // Test decimal
+ d, err = parseDuration("1.3")
+ if err == nil {
+ t.Errorf("expected parseDuration error for input [%s] got [%d]", "1.3", d)
+ }
+
+ // Test empty
+ d, err = parseDuration("")
+ if err == nil {
+ t.Errorf("expected parseDuration error for input [%s] got [%d]", "", d)
+ }
+
+ // Test Invalid
+ d, err = parseDuration("1s1t")
+ if err == nil {
+ t.Errorf("expected parseDuration error for input [%s] got [%d]", "", d)
+ }
+
+ // Test Valid Units but No Value
+ d, err = parseDuration("s")
+ if err == nil {
+ t.Errorf("expected parseDuration error for input [%s] got [%d]", "", d)
+ }
+
+ // Test Negative
+ _, err = parseDuration("-1")
+ if err != nil {
+ t.Error(err)
+ }
+
+}
diff --git a/pkg/promsim/promsim.go b/pkg/promsim/promsim.go
new file mode 100644
index 000000000..f801decfc
--- /dev/null
+++ b/pkg/promsim/promsim.go
@@ -0,0 +1,278 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+// Package promsim is a rudimentary Prometheus HTTP APIv1 output simulator,
+// intended for use with unit testing that would normally require a running Prometheus server.
+// PromSim outputs repeatable, Prometheus-formatted data, synthetically generated from query and timestamp.
+// It does not validate queries and does not produce output that accurately depicts data shapes expected of the query.
+// They will probably look really ugly on an actual graph
+// PromSim currently only supports matrix responses to a query_range request
+package promsim
+
+import (
+ "fmt"
+ "math"
+ "math/rand"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ lpRepeatableRandom = "repeatable_random"
+ lpUsageCurve = "usage_curve"
+ secondsPerDay = 86400
+)
+
+const (
+ mdSeriesCount = "series_count"
+ mdLatency = "latency_ms"
+ mdRangeLatency = "range_latency_ms"
+ mdMaxVal = "max_value"
+ mdMinVal = "min_value"
+ mdSeriesID = "series_id"
+ mdStatusCode = "status_code"
+ mdInvalidBody = "invalid_response_body"
+ mdLinePattern = "line_pattern"
+)
+
+// Modifiers represents a collection of modifiers for the simulator's behavior provided by the user
+type Modifiers struct {
+ // SeriesCount defines how many series to return
+ SeriesCount int
+ // Latency introduces a static delay in responding to each request
+ Latency time.Duration
+ // RangeLatency introduces an additional delay as a multiple of the number of timestamps in the series
+ RangeLatency time.Duration
+ // MaxValue limits the maximum value of any data in the query result
+ MaxValue int
+ // MinValue limits the minimum value of any data in the query result
+ MinValue int
+ // StatusCode indicates the desired return status code, to simulate errors
+ StatusCode int
+ // InvalidResponseBody when > 0 causes the server to respond with a payload that cannot be unmarshaled
+ // useful for causing and testing unmarshling failure cases
+ InvalidResponseBody int
+ // LinePattern indicates the pattern/shape of the resulting timeseries
+ LinePattern string
+
+ rawString string
+ seriesID int
+ seedFunc func(d *Modifiers, seriesIndex int, querySeed int64, t time.Time) int
+}
+
+// Result is a simplified version of a Prometheus timeseries response
+type Result struct {
+ Metric string
+ Values string
+}
+
+// GetInstantData returns a simulated Vector Envelope with repeatable results
+func GetInstantData(query string, t time.Time) (string, int, error) {
+
+ if t.IsZero() {
+ t = time.Now()
+ }
+
+ d := getModifiers(query)
+ if d.Latency > 0 {
+ time.Sleep(d.Latency)
+ }
+
+ if d.InvalidResponseBody > 0 {
+ return "foo", d.StatusCode, nil
+ }
+
+ status := "success"
+ series := make([]string, 0, d.SeriesCount)
+ queryVal := getQueryVal(query)
+
+ for i := 0; d.SeriesCount > i; i++ {
+ d1 := &Modifiers{rawString: d.rawString}
+ d1.addLabel(fmt.Sprintf(`"%s":"%d"`, mdSeriesID, i))
+ series = append(series, fmt.Sprintf(`{"metric":{%s},"value":[%d,"%d"]}`, d1.rawString, t.Unix(), d.seedFunc(d, i, queryVal, t)))
+ }
+ return fmt.Sprintf(`{"status":"%s","data":{"resultType":"vector","result":[`, status) + strings.Join(series, ",") + "]}}", d.StatusCode, nil
+}
+
+// GetTimeSeriesData returns a simulated Matrix Envelope with repeatable results
+func GetTimeSeriesData(query string, start time.Time, end time.Time, step time.Duration) (string, int, error) {
+
+ d := getModifiers(query)
+
+ if d.Latency > 0 {
+ time.Sleep(d.Latency)
+ }
+
+ if d.InvalidResponseBody > 0 {
+ return "foo", d.StatusCode, nil
+ }
+
+ status := "success"
+ seriesLen := int(end.Sub(start) / step)
+ start = end.Add(time.Duration(-seriesLen) * step)
+ queryVal := getQueryVal(query)
+
+ var b strings.Builder
+ b.Grow(d.SeriesCount * seriesLen * 18)
+ sep1 := ","
+ fmt.Fprintf(&b, `{"status":"%s","data":{"resultType":"matrix","result":[`, status)
+ for i := 0; d.SeriesCount > i; i++ {
+ sep2 := ","
+ if i == d.SeriesCount-1 {
+ sep1 = ""
+ }
+ d1 := &Modifiers{rawString: d.rawString}
+ d1.addLabel(fmt.Sprintf(`"%s":"%d"`, mdSeriesID, i))
+ fmt.Fprintf(&b, fmt.Sprintf(`{"metric":{%s},"values":[`, d1.rawString))
+ for j := 0; j <= seriesLen; j++ {
+ if j == seriesLen {
+ sep2 = ""
+ }
+ t := start.Add(time.Duration(j) * step)
+ fmt.Fprintf(&b, `[%d,"%d"]%s`, t.Unix(), d.seedFunc(d, i, queryVal, t), sep2)
+ }
+ fmt.Fprintf(&b, fmt.Sprintf(`]}%s`, sep1))
+ }
+ b.WriteString("]}}")
+
+ return b.String(), d.StatusCode, nil
+}
+
+func getModifiers(query string) *Modifiers {
+
+ var err error
+ var i int64
+
+ d := &Modifiers{
+ InvalidResponseBody: 0,
+ Latency: 0,
+ RangeLatency: 0,
+ MaxValue: 100,
+ MinValue: 0,
+ SeriesCount: 1,
+ seriesID: 0,
+ StatusCode: 200,
+ LinePattern: lpRepeatableRandom,
+ seedFunc: repeatableRandomVal,
+ }
+
+ provided := []string{}
+
+ start := strings.Index(query, "{")
+ if start > -1 {
+ start++
+ end := strings.LastIndex(query, "}")
+ if end > start {
+ mods := strings.Split(query[start:end], ",")
+ for _, mod := range mods {
+ parts := strings.SplitN(mod, "=", 2)
+ if len(parts) != 2 {
+ provided = append(provided, fmt.Sprintf(`"%s":""`, mod))
+ continue
+ }
+ parts[1] = strings.Replace(parts[1], `"`, ``, -1)
+ i, err = strconv.ParseInt(parts[1], 10, 64)
+ if err != nil {
+ switch parts[0] {
+ case mdLinePattern:
+ d.LinePattern = parts[1]
+ }
+ } else {
+ switch parts[0] {
+ case mdSeriesCount:
+ d.SeriesCount = int(i)
+ case mdLatency:
+ d.Latency = time.Duration(i) * time.Millisecond
+ case mdRangeLatency:
+ d.RangeLatency = time.Duration(i) * time.Millisecond
+ case mdMaxVal:
+ d.MaxValue = int(i)
+ case mdMinVal:
+ d.MinValue = int(i)
+ case mdSeriesID:
+ d.seriesID = int(i)
+ case mdStatusCode:
+ d.StatusCode = int(i)
+ case mdInvalidBody:
+ d.InvalidResponseBody = int(i)
+ }
+ }
+ provided = append(provided, fmt.Sprintf(`"%s":"%s"`, parts[0], parts[1]))
+ }
+ }
+ }
+
+ // this determines, based on the provided LinePattern, what value generator function to call
+ // if the LinePattern is not provided, or the pattern name is not registered below in seedFuncs
+ // then the repeatable random value generator func will be used
+ if lp, ok := seedFuncs[d.LinePattern]; ok {
+ d.seedFunc = lp
+ }
+
+ d.rawString = strings.Join(provided, ",")
+ return d
+}
+
+var seedFuncs = map[string]func(d *Modifiers, seriesIndex int, querySeed int64, t time.Time) int{
+ lpRepeatableRandom: repeatableRandomVal,
+ lpUsageCurve: usageCurveVal,
+}
+
+func repeatableRandomVal(d *Modifiers, seriesIndex int, querySeed int64, t time.Time) int {
+ if d.RangeLatency > 0 {
+ time.Sleep(d.RangeLatency)
+ }
+ rand.Seed(querySeed + int64(seriesIndex) + t.Unix())
+ return d.MinValue + rand.Intn(d.MaxValue-d.MinValue)
+}
+
+func usageCurveVal(d *Modifiers, seriesIndex int, querySeed int64, t time.Time) int {
+ if d.RangeLatency > 0 {
+ time.Sleep(d.RangeLatency)
+ }
+
+ // Scale the max randomly if it is not index 0
+ max := d.MaxValue
+ if seriesIndex != 0 {
+ rand.Seed(int64(seriesIndex) + querySeed)
+ scale := rand.Float32()*.5 + .5
+ max = int(float32(max-d.MinValue)*scale) + d.MinValue
+ }
+ _, offset := t.Zone()
+ seconds := (t.Unix() + int64(offset)) % secondsPerDay
+ A := float64(max-d.MinValue) / 2 // Amplitude
+ B := math.Pi * 2 / secondsPerDay // Period
+ C := 4.0 * 3600.0 // Phase shift back to 8pm
+ D := A + float64(d.MinValue) // Vertical shift
+ return int(A*math.Cos(B*(float64(seconds)+C)) + D)
+}
+
+// Calculates a number for the Query Value
+func getQueryVal(query string) int64 {
+ l := len(query)
+ var v int64
+ for i := 0; i < l; i++ {
+ v += int64(query[i])
+ }
+ v = v * v * v
+ return v
+}
+
+func (d *Modifiers) addLabel(in string) {
+ if len(d.rawString) == 0 {
+ d.rawString = in
+ return
+ }
+ d.rawString += "," + in
+}
diff --git a/pkg/promsim/promsim_test.go b/pkg/promsim/promsim_test.go
new file mode 100644
index 000000000..5c436d2ac
--- /dev/null
+++ b/pkg/promsim/promsim_test.go
@@ -0,0 +1,155 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package promsim
+
+import (
+ "net/http"
+ "testing"
+ "time"
+)
+
+const testQuery = `myQuery{other_label=5,latency_ms=1,range_latency_ms=1,series_count=1,test}`
+const expectedRangeOutput = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"other_label":"5","latency_ms":"1","range_latency_ms":"1","series_count":"1","test":"","series_id":"0"},"values":[[0,"25"],[1800,"92"],[3600,"89"]]}]}}`
+const expectedInstantOutput = `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"other_label":"5","latency_ms":"1","range_latency_ms":"1","series_count":"1","test":"","series_id":"0"},"value":[0,"25"]}]}}`
+
+const testQueryUsageCurve = `myQuery{other_label=5,latency_ms=1,range_latency_ms=1,series_count=1,line_pattern="usage_curve",test}`
+const expectedRangeUsageCurveOutput = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"other_label":"5","latency_ms":"1","range_latency_ms":"1","series_count":"1","line_pattern":"usage_curve","test":"","series_id":"0"},"values":[[233438400,"100"],[233481600,"0"],[233524800,"100"]]}]}}`
+
+const testQueryUsageCurve2 = `myQuery{other_label=5,latency_ms=1,range_latency_ms=1,series_count=2,line_pattern="usage_curve",test}`
+const expectedRangeUsageCurve2Output = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"other_label":"5","latency_ms":"1","range_latency_ms":"1","series_count":"2","line_pattern":"usage_curve","test":"","series_id":"0"},"values":[[233438400,"100"],[233481600,"0"],[233524800,"100"]]},{"metric":{"other_label":"5","latency_ms":"1","range_latency_ms":"1","series_count":"2","line_pattern":"usage_curve","test":"","series_id":"1"},"values":[[233438400,"72"],[233481600,"0"],[233524800,"72"]]}]}}`
+
+const testQueryInvalidResponse = "myQuery{invalid_response_body=1}"
+const expectedInvalidResponse = "foo"
+
+const testFullQuery = `myQuery{other_label=a5,max_value=1,min_value=1,series_id=1,status_code=200,line_pattern="usage_curve",test}`
+const expectedFullRawstring = `"other_label":"a5","max_value":"1","min_value":"1","series_id":"1","status_code":"200","line_pattern":"usage_curve","test":""`
+
+func TestGetTimeSeriesDataRandomVals(t *testing.T) {
+ out, code, err := GetTimeSeriesData(testQuery, time.Unix(0, 0), time.Unix(3600, 0), time.Duration(1800)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if code != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, code)
+ }
+
+ if out != expectedRangeOutput {
+ t.Errorf("expected %s got %s", expectedRangeOutput, out)
+ }
+}
+
+func TestGetTimeSeriesDataUsageCurve(t *testing.T) {
+ start := time.Date(1977, 5, 25, 20, 0, 0, 0, time.UTC)
+ end := time.Date(1977, 5, 26, 20, 0, 0, 0, time.UTC)
+ out, code, err := GetTimeSeriesData(testQueryUsageCurve, start, end, time.Duration(secondsPerDay/2)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if code != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, code)
+ }
+
+ if out != expectedRangeUsageCurveOutput {
+ t.Errorf("expected %s got %s", expectedRangeUsageCurveOutput, out)
+ }
+
+ out, code, err = GetTimeSeriesData(testQueryUsageCurve2, start, end, time.Duration(secondsPerDay/2)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if code != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, code)
+ }
+
+ if out != expectedRangeUsageCurve2Output {
+ t.Errorf("expected %s got %s", expectedRangeUsageCurve2Output, out)
+ }
+
+}
+
+func TestGetTimeSeriesDataInvalidResponseBody(t *testing.T) {
+ out, code, err := GetTimeSeriesData(testQueryInvalidResponse, time.Unix(0, 0), time.Unix(3600, 0), time.Duration(1800)*time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if code != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, code)
+ }
+
+ if out != expectedInvalidResponse {
+ t.Errorf("expected %s got %s", expectedInvalidResponse, out)
+ }
+}
+
+func TestGetInstantData(t *testing.T) {
+ out, code, err := GetInstantData(testQuery, time.Unix(0, 0))
+ if err != nil {
+ t.Error(err)
+ }
+
+ if code != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, code)
+ }
+
+ if out != expectedInstantOutput {
+ t.Errorf("expected %s got %s", expectedInstantOutput, out)
+ }
+}
+
+func TestGetInstantDataInvalidResponseBody(t *testing.T) {
+ out, code, err := GetInstantData(testQueryInvalidResponse, time.Time{})
+ if err != nil {
+ t.Error(err)
+ }
+
+ if code != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, code)
+ }
+
+ if out != expectedInvalidResponse {
+ t.Errorf("expected %s got %s", expectedInvalidResponse, out)
+ }
+}
+
+func TestAddLabel(t *testing.T) {
+
+ d := &Modifiers{}
+
+ const label1 = "test1"
+ const label2 = "test2"
+ const labels = "test1,test2"
+
+ d.addLabel(label1)
+ if d.rawString != label1 {
+ t.Errorf("expected %s got %s", label1, d.rawString)
+ }
+
+ d.addLabel(label2)
+ if d.rawString != labels {
+ t.Errorf("expected %s got %s", labels, d.rawString)
+ }
+}
+
+func TestGetModifiers(t *testing.T) {
+
+ d := getModifiers(testFullQuery)
+ if d.rawString != expectedFullRawstring {
+ t.Errorf("expected %s got %s", expectedFullRawstring, d.rawString)
+ }
+
+}
diff --git a/pkg/rangesim/http_server.go b/pkg/rangesim/http_server.go
new file mode 100644
index 000000000..06dfad28e
--- /dev/null
+++ b/pkg/rangesim/http_server.go
@@ -0,0 +1,163 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package rangesim
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "time"
+)
+
+func writeError(code int, body []byte, w http.ResponseWriter) {
+ w.WriteHeader(code)
+ w.Write(body)
+}
+
+// NewTestServer launches a Test Prometheus Server (for unit testing)
+func NewTestServer() *httptest.Server {
+ return httptest.NewServer(MuxWithRoutes())
+}
+
+// MuxWithRoutes returns a ServeMux that includes the PromSim handlers already registered
+func MuxWithRoutes() *http.ServeMux {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", handler)
+ return mux
+}
+
+var customStatuses = map[string]int{
+ "200": http.StatusOK,
+ "206": http.StatusPartialContent,
+ "304": http.StatusNotModified,
+ "404": http.StatusNotFound,
+ "500": http.StatusInternalServerError,
+ "400": http.StatusBadRequest,
+ "412": http.StatusRequestedRangeNotSatisfiable,
+}
+
+func handler(w http.ResponseWriter, r *http.Request) {
+
+ rh := r.Header
+ h := w.Header()
+
+ // handle custom response code requested by the client for testing purposees
+
+ customCode := 0
+
+ var code int
+ var ok bool
+
+ // user can send max-age=XX to define a custom max-age header
+ rMaxAge := maxAge
+ if v := r.URL.Query().Get("max-age"); v != "" {
+ if i, err := strconv.ParseInt(v, 10, 64); err == nil {
+ if i > 0 {
+ rMaxAge = fmt.Sprintf("max-age=%d", i)
+ } else {
+ rMaxAge = ""
+ }
+ } else {
+ rMaxAge = ""
+ }
+ }
+
+ if code, ok = customStatuses[r.URL.Query().Get("status")]; ok {
+ customCode = code
+ // if the user custom-requested 200, go ahead and return the full body
+ // to do that, we'll delete any IMS and Range headers from the client
+ if code == http.StatusOK {
+ rh.Del(hnIfModifiedSince)
+ rh.Del(hnRange)
+ }
+ }
+
+ if customCode == 0 {
+ // if the client is revalidating and their copy is still fresh
+ // reply w/ a 304 Not Modified
+ if ims := rh.Get(hnIfModifiedSince); ims != "" {
+
+ // for testing a 200 OK only when the user sends an IMS
+ if code, ok := customStatuses[r.URL.Query().Get("ims")]; ok {
+ customCode = code
+ if code == http.StatusOK {
+ rh.Del(hnRange)
+ }
+
+ } else {
+
+ t, err := time.Parse(time.RFC1123, ims)
+ if err == nil && (!lastModified.After(t)) {
+ w.WriteHeader(http.StatusNotModified)
+ return
+ }
+ }
+ } else if code, ok := customStatuses[r.URL.Query().Get("non-ims")]; ok {
+ // for testing a 200 OK only when the user does _not_ send IMS
+ customCode = code
+ if code == http.StatusOK {
+ rh.Del(hnRange)
+ }
+ }
+ }
+
+ if customCode > 299 {
+ w.WriteHeader(customCode)
+ return
+ }
+
+ // add some cacheability headers
+ if rMaxAge != "" {
+ h.Add(hnCacheControl, rMaxAge)
+ }
+
+ h.Add(hnLastModified, lastModified.UTC().Format(time.RFC1123))
+
+ if customCode == http.StatusOK {
+ w.WriteHeader(customCode)
+ }
+
+ // Handle Range Request Cases
+ if cr := r.Header.Get(hnRange); cr != "" {
+ ranges := parseRangeHeader(cr)
+ lr := len(ranges)
+ if ranges != nil && lr > 0 {
+ if ranges.validate() {
+ // Handle Single Range in Request
+ if lr == 1 {
+ h.Add(hnContentRange, ranges[0].contentRangeHeader())
+ h.Set(hnContentType, contentType)
+ w.WriteHeader(http.StatusPartialContent)
+ fmt.Fprintf(w, Body[ranges[0].start:ranges[0].end+1])
+ return
+ }
+ // Handle Multiple Ranges in Request
+ h.Set(hnContentType, hvMultipartByteRange+separator)
+ w.WriteHeader(http.StatusPartialContent)
+ ranges.writeMultipartResponse(w)
+ return
+ }
+ w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
+ // TODO: write correct response indictaing what was wrong with the range.
+ return
+
+ }
+ }
+
+ // Handle Full Body Case
+ h.Set(hnAcceptRanges, "bytes")
+ h.Set(hnContentType, contentType)
+ w.Write([]byte(Body))
+}
diff --git a/pkg/rangesim/http_server_test.go b/pkg/rangesim/http_server_test.go
new file mode 100644
index 000000000..e647b25e9
--- /dev/null
+++ b/pkg/rangesim/http_server_test.go
@@ -0,0 +1,187 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package rangesim
+
+import (
+ "github.com/Comcast/trickster/internal/proxy/headers"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestNewTestServer(t *testing.T) {
+ if NewTestServer() == nil {
+ t.Errorf("failed to get test server object")
+ }
+}
+
+func TestWriteError(t *testing.T) {
+
+ w := httptest.NewRecorder()
+ writeError(http.StatusNotFound, []byte("Not Found"), w)
+
+ r := w.Result()
+ if r.StatusCode != http.StatusNotFound {
+ t.Errorf("expected %d got %d", http.StatusNotFound, r.StatusCode)
+ }
+
+}
+
+func TestHandlerCustomizations(t *testing.T) {
+
+ // test invalid max age
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/?max-age=a", nil)
+ w := httptest.NewRecorder()
+ handler(w, r)
+
+ if v := w.Header().Get(hnCacheControl); v != "" {
+ t.Errorf("expected %s got %s", "", v)
+ }
+
+ // test max age 0
+ r, _ = http.NewRequest(http.MethodGet, "http://127.0.0.1/?max-age=0", nil)
+ w = httptest.NewRecorder()
+ handler(w, r)
+
+ if v := w.Header().Get(hnCacheControl); v != "" {
+ t.Errorf("expected %s got %s", "", v)
+ }
+
+ // test custom status code of 404
+ r, _ = http.NewRequest(http.MethodGet, "http://127.0.0.1/?status=404", nil)
+ w = httptest.NewRecorder()
+ handler(w, r)
+
+ if w.Result().StatusCode != http.StatusNotFound {
+ t.Errorf("expected %d got %d", http.StatusNotFound, w.Result().StatusCode)
+ }
+
+ // test custom status code of 200
+ r, _ = http.NewRequest(http.MethodGet, "http://127.0.0.1/?status=200", nil)
+ w = httptest.NewRecorder()
+ handler(w, r)
+
+ if w.Result().StatusCode != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, w.Result().StatusCode)
+ }
+
+ // test custom non-ims code of 200
+ r, _ = http.NewRequest(http.MethodGet, "http://127.0.0.1/?non-ims=200", nil)
+ w = httptest.NewRecorder()
+ handler(w, r)
+
+ if w.Result().StatusCode != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, w.Result().StatusCode)
+ }
+
+ // test custom ims code of 200
+ r, _ = http.NewRequest(http.MethodGet, "http://127.0.0.1/?ims=200", nil)
+ r.Header.Set("If-Modified-Since", "trickster")
+ w = httptest.NewRecorder()
+ handler(w, r)
+
+ if w.Result().StatusCode != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, w.Result().StatusCode)
+ }
+
+}
+
+func TestHandler(t *testing.T) {
+
+ r, _ := http.NewRequest(http.MethodGet, "http://127.0.0.1/?max-age=1", nil)
+ w := httptest.NewRecorder()
+ handler(w, r)
+ res := w.Result()
+
+ if res.StatusCode != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, res.StatusCode)
+ }
+
+ h := make(http.Header)
+ h.Set(headers.NameRange, "bytes=0-10")
+ r.Header = h
+ w = httptest.NewRecorder()
+ handler(w, r)
+ res = w.Result()
+ rh := res.Header
+
+ if res.StatusCode != http.StatusPartialContent {
+ t.Errorf("expected %d got %d", http.StatusPartialContent, res.StatusCode)
+ }
+
+ if v := rh.Get(headers.NameContentType); !strings.HasPrefix(v, headers.ValueTextPlain) {
+ t.Errorf("expected %s got %s", headers.ValueTextPlain, v)
+ }
+
+ h.Set(headers.NameRange, "bytes=0-10,20-30")
+ w = httptest.NewRecorder()
+ handler(w, r)
+ res = w.Result()
+ rh = res.Header
+
+ if res.StatusCode != http.StatusPartialContent {
+ t.Errorf("expected %d got %d", http.StatusPartialContent, res.StatusCode)
+ }
+
+ if v := rh.Get(headers.NameContentType); !strings.HasPrefix(v, headers.ValueMultipartByteRanges) {
+ t.Errorf("expected %s got %s", headers.ValueMultipartByteRanges, v)
+ }
+
+ // test bad range
+ h.Set(headers.NameRange, "bytes=40-30")
+ w = httptest.NewRecorder()
+ handler(w, r)
+ res = w.Result()
+ rh = res.Header
+
+ if res.StatusCode != http.StatusRequestedRangeNotSatisfiable {
+ t.Errorf("expected %d got %d", http.StatusRequestedRangeNotSatisfiable, res.StatusCode)
+ }
+
+ if v := rh.Get(headers.NameContentType); v != "" {
+ t.Errorf("expected empty string got %s", v)
+ }
+
+ h.Del(headers.NameRange)
+ h.Set(headers.NameIfModifiedSince, time.Unix(1577836799, 0).Format(time.RFC1123))
+ w = httptest.NewRecorder()
+ handler(w, r)
+ res = w.Result()
+ rh = res.Header
+
+ if res.StatusCode != http.StatusOK {
+ t.Errorf("expected %d got %d", http.StatusOK, res.StatusCode)
+ }
+
+ if v := rh.Get(headers.NameContentType); !strings.HasPrefix(v, headers.ValueTextPlain) {
+ t.Errorf("expected %s got %s", headers.ValueTextPlain, v)
+ }
+
+ h.Set(headers.NameIfModifiedSince, time.Unix(1577836801, 0).Format(time.RFC1123))
+ w = httptest.NewRecorder()
+ handler(w, r)
+ res = w.Result()
+ rh = res.Header
+
+ if res.StatusCode != http.StatusNotModified {
+ t.Errorf("expected %d got %d", http.StatusNotModified, res.StatusCode)
+ }
+
+ if v := rh.Get(headers.NameContentType); v != "" {
+ t.Errorf("expected empty string got %s", v)
+ }
+
+}
diff --git a/pkg/rangesim/rangesim.go b/pkg/rangesim/rangesim.go
new file mode 100644
index 000000000..ff6ecc46a
--- /dev/null
+++ b/pkg/rangesim/rangesim.go
@@ -0,0 +1,153 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+// Package rangesim is a sample HTTP server that fully supports HTTP Range Requests
+// it is used by Trickster for unit testing and integration testing
+package rangesim
+
+import (
+ "fmt"
+ "io"
+ "mime/multipart"
+ "net/textproto"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Response Object Data Constants
+const contentLength = int64(1222)
+
+// Jan 1 2020 00:00:00 GMT
+var lastModified = time.Unix(1577836800, 0)
+
+const contentType = "text/plain; charset=utf-8"
+const separator = "TestRangeServerBoundary"
+const maxAge = "max-age=60"
+
+// Body is the body that RangeSim uses to serve content
+const Body = `Lorem ipsum dolor sit amet, mel alia definiebas ei, labore eligendi ` +
+ `signiferumque id sed. Dico tantas fabulas et vel, maiorum splendide has an. Te mea ` +
+ `suas commune concludaturque. Qui feugait tacimates te.
+
+` + `Ea sea error altera efficiantur, ex possit appetere eum. Sed cu sanctus blandit definiebas, ` +
+ `movet accumsan no mei. Vim diam molestie singulis cu, et sanctus appetere ius, his ut ` +
+ `consulatu vituperata. Graece graeco sit ut, an quem summo splendide duo. Iisque ` +
+ `sapientem interpretaris pro ad, alii mazim pro te. Malis laoreet facilis sea te. An ` +
+ `ferri albucius vel, altera volumus legendos has in.
+
+` + `His ne dolore rationibus. Ut qui ferri malorum. Mel commune atomorum cu. Ut mollis ` +
+ `reprimique nam, eos quot mutat molestie id. Mea error legere contentiones et, ponderum ` +
+ `accusamus est eu. Detraxit repudiandae signiferumque ne eos.
+
+` + `Ius ne periculis consequat, ea usu brute mediocritatem, an qui reque falli deseruisse. ` +
+ `Vix ne aeque movet. Novum homero referrentur in est. No mei adhuc malorum.
+
+` + `Pri vitae sapientem ad, qui libris prompta ei. Ne quem fabulas dissentiet cum, error ` +
+ `legimus vis cu. Te eum lorem liber aliquando, eirmod diceret vis ad. Eos et facer tation. ` +
+ `Etiam phaedrum ea est, an nec summo mediocritatem.`
+
+// HTTP Elements
+const hnAcceptRanges = `Accept-Ranges`
+const hnCacheControl = `Cache-Control`
+const hnContentRange = `Content-Range`
+const hnContentType = `Content-Type`
+const hnIfModifiedSince = `If-Modified-Since`
+const hnLastModified = `Last-Modified`
+const hnRange = `Range`
+
+const hvMultipartByteRange = `multipart/byteranges; boundary=`
+const byteRequestRangePrefix = "bytes="
+const byteResponsRangePrefix = "bytes "
+
+type byteRanges []byteRange
+
+func (brs byteRanges) validate() bool {
+ for _, r := range brs {
+ if r.start < 0 || r.end >= contentLength || r.end < r.start {
+ return false
+ }
+ }
+ return true
+}
+
+type byteRange struct {
+ start int64
+ end int64
+}
+
+func (br byteRange) contentRangeHeader() string {
+ return fmt.Sprintf("%s%d-%d/%d", byteResponsRangePrefix, br.start, br.end, contentLength)
+}
+
+func (brs byteRanges) writeMultipartResponse(w io.Writer) error {
+
+ mw := multipart.NewWriter(w)
+ mw.SetBoundary(separator)
+ for _, r := range brs {
+ pw, err := mw.CreatePart(
+ textproto.MIMEHeader{
+ hnContentType: []string{contentType},
+ hnContentRange: []string{r.contentRangeHeader()},
+ },
+ )
+ if err != nil {
+ return err
+ }
+ pw.Write([]byte(Body[r.start : r.end+1]))
+ }
+ mw.Close()
+ return nil
+}
+
+func parseRangeHeader(input string) byteRanges {
+
+ if input == "" || !strings.HasPrefix(input, byteRequestRangePrefix) ||
+ input == byteRequestRangePrefix {
+ return nil
+ }
+ input = strings.Replace(input, " ", "", -1)[6:]
+ parts := strings.Split(input, ",")
+ ranges := make(byteRanges, len(parts))
+
+ for i, p := range parts {
+
+ j := strings.Index(p, "-")
+ if j < 0 {
+ return nil
+ }
+
+ var start = int64(-1)
+ var end = int64(-1)
+ var err error
+
+ if j > 0 {
+ start, err = strconv.ParseInt(p[0:j], 10, 64)
+ if err != nil {
+ return nil
+ }
+ }
+
+ if j < len(p)-1 {
+ end, err = strconv.ParseInt(p[j+1:], 10, 64)
+ if err != nil {
+ return nil
+ }
+ }
+
+ ranges[i].start = start
+ ranges[i].end = end
+ }
+
+ return ranges
+}
diff --git a/pkg/rangesim/rangesim_test.go b/pkg/rangesim/rangesim_test.go
new file mode 100644
index 000000000..a8fe51549
--- /dev/null
+++ b/pkg/rangesim/rangesim_test.go
@@ -0,0 +1,72 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package rangesim
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestParseRangeHeader(t *testing.T) {
+
+ br := parseRangeHeader("bytes=0-10,20-40")
+ if len(br) != 2 {
+ t.Errorf("expected %d got %d", 2, len(br))
+ }
+
+ br = parseRangeHeader("")
+ if br != nil {
+ t.Errorf("expected nil got %v", br)
+ }
+
+ br = parseRangeHeader("bytes=10")
+ if br != nil {
+ t.Errorf("expected nil got %v", br)
+ }
+
+ br = parseRangeHeader("bytes=a0-n0")
+ if br != nil {
+ t.Errorf("expected nil got %v", br)
+ }
+
+}
+
+func TestWriteMultipartResponse(t *testing.T) {
+
+ br := parseRangeHeader("bytes=0-10,20-40")
+ buff := make([]byte, 0)
+ w := bytes.NewBuffer(buff)
+
+ err := br.writeMultipartResponse(w)
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestValidate(t *testing.T) {
+
+ br := parseRangeHeader("bytes=0-10,20-40")
+ v := br.validate()
+ if !v {
+ t.Errorf("expected %t got %t", true, v)
+ }
+
+ br[1].start = 45
+ v = br.validate()
+ if v {
+ t.Errorf("expected %t got %t", false, v)
+ }
+
+}
diff --git a/pkg/sort/floats/floats.go b/pkg/sort/floats/floats.go
new file mode 100644
index 000000000..eccd43655
--- /dev/null
+++ b/pkg/sort/floats/floats.go
@@ -0,0 +1,32 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package floats
+
+// Floats represents an array of float64's
+type Floats []float64
+
+// Len returns the length of an array of float64's
+func (t Floats) Len() int {
+ return len(t)
+}
+
+// Less returns true if i comes before j
+func (t Floats) Less(i, j int) bool {
+ return t[i] < t[j]
+}
+
+// Swap modifies an array of float64's by swapping the values in indexes i and j
+func (t Floats) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
diff --git a/pkg/sort/floats/floats_test.go b/pkg/sort/floats/floats_test.go
new file mode 100644
index 000000000..67e6c016c
--- /dev/null
+++ b/pkg/sort/floats/floats_test.go
@@ -0,0 +1,28 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package floats
+
+import (
+ "errors"
+ "sort"
+ "testing"
+)
+
+func TestSortFloats(t *testing.T) {
+ f := Floats{2, 1, 6, 5}
+ sort.Sort(f)
+ if f[0] != 1 && f[3] != 6 {
+ t.Error(errors.New("sort failed"))
+ }
+}
diff --git a/pkg/sort/times/times.go b/pkg/sort/times/times.go
new file mode 100644
index 000000000..cca833c86
--- /dev/null
+++ b/pkg/sort/times/times.go
@@ -0,0 +1,58 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package times
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Times represents a slice of time.Time
+type Times []time.Time
+
+// FromMap returns a times.Times from a map of time.Time
+func FromMap(m map[time.Time]bool) Times {
+
+ l := make(Times, 0, len(m))
+ for t := range m {
+ l = append(l, t)
+ }
+ sort.Sort(l)
+ return l
+}
+
+// Len returns the length of a slice of time.Times
+func (t Times) Len() int {
+ return len(t)
+}
+
+// Less returns true if i comes before j
+func (t Times) Less(i, j int) bool {
+ return t[i].Before(t[j])
+}
+
+// Swap modifies a slice of time.Times by swapping the values in indexes i and j
+func (t Times) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+
+func (t Times) String() string {
+ l := make([]string, 0, len(t))
+ for _, v := range t {
+ l = append(l, strconv.FormatInt(v.Unix(), 10))
+ }
+ return "[ " + strings.Join(l, ", ") + " ]"
+}
diff --git a/pkg/sort/times/times_test.go b/pkg/sort/times/times_test.go
new file mode 100644
index 000000000..9e53fc4e9
--- /dev/null
+++ b/pkg/sort/times/times_test.go
@@ -0,0 +1,48 @@
+/**
+* Copyright 2018 Comcast Cable Communications Management, LLC
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+* http://www.apache.org/licenses/LICENSE-2.0
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package times
+
+import (
+ "errors"
+ "sort"
+ "testing"
+ "time"
+)
+
+func TestSortFloats(t *testing.T) {
+ f := Times{time.Unix(2, 0), time.Unix(1, 0)}
+ sort.Sort(f)
+ if f[0] != time.Unix(1, 0) {
+ t.Error(errors.New("sort failed"))
+ }
+}
+
+func TestString(t *testing.T) {
+ f := Times{time.Unix(2, 0), time.Unix(1, 0)}
+ const expected = "[ 2, 1 ]"
+ if f.String() != expected {
+ t.Errorf("expected %s got %s", expected, f.String())
+ }
+}
+
+func TestFromMap(t *testing.T) {
+ m := map[time.Time]bool{
+ time.Unix(2, 0): true,
+ }
+ f := FromMap(m)
+ const expected = "[ 2 ]"
+ if f.String() != expected {
+ t.Errorf("expected %s got %s", expected, f.String())
+ }
+}
diff --git a/redis.go b/redis.go
deleted file mode 100644
index 5662432e2..000000000
--- a/redis.go
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "sync"
- "time"
-
- "github.com/go-kit/kit/log/level"
- "github.com/go-redis/redis"
-)
-
-// RedisCache represents a redis cache object that conforms to the Cache interface
-type RedisCache struct {
- T *TricksterHandler
- Config RedisCacheConfig
- client *redis.Client
- CacheKeys sync.Map
-}
-
-// Connect connects to the configured Redis endpoint
-func (r *RedisCache) Connect() error {
- level.Info(r.T.Logger).Log("event", "connecting to redis", "protocol", r.Config.Protocol, "Endpoint", r.Config.Endpoint)
- r.client = redis.NewClient(&redis.Options{
- Network: r.Config.Protocol,
- Addr: r.Config.Endpoint,
- })
- if r.Config.Password != "" {
- r.client.Options().Password = r.Config.Password
- }
- go r.Reap()
- return r.client.Ping().Err()
-}
-
-// Store places the the data into the Redis Cache using the provided Key and TTL
-func (r *RedisCache) Store(cacheKey string, data string, ttl int64) error {
- level.Debug(r.T.Logger).Log("event", "redis cache store", "key", cacheKey)
- return r.client.Set(cacheKey, data, time.Second*time.Duration(ttl)).Err()
-}
-
-// Retrieve gets data from the Redis Cache using the provided Key
-func (r *RedisCache) Retrieve(cacheKey string) (string, error) {
- level.Debug(r.T.Logger).Log("event", "redis cache retrieve", "key", cacheKey)
- return r.client.Get(cacheKey).Result()
-}
-
-// Reap continually iterates through the cache to find expired elements and removes them
-func (r *RedisCache) Reap() {
- for {
- r.ReapOnce()
- time.Sleep(time.Duration(r.T.Config.Caching.ReapSleepMS) * time.Millisecond)
- }
-}
-
-// ReapOnce makes a single iteration through the Response Channels to remove orphaned channels due to Redis Cache Expiration
-func (r *RedisCache) ReapOnce() {
- var keys []string
-
- r.T.ChannelCreateMtx.Lock()
- for key := range r.T.ResponseChannels {
- keys = append(keys, key)
-
- }
- r.T.ChannelCreateMtx.Unlock()
-
- if len(keys) > 0 {
- results, err := r.client.MGet(keys...).Result()
- if err != nil {
- level.Debug(r.T.Logger).Log("event", "error fetching values in bulk from redis cache", "MGetDetail", err)
- }
-
- for i, key := range keys {
- if results[i] == nil {
- level.Debug(r.T.Logger).Log("event", "redis cache reap", "key", key)
-
- r.T.ChannelCreateMtx.Lock()
-
- // Close out the channel if it exists
- if _, ok := r.T.ResponseChannels[key]; ok {
- close(r.T.ResponseChannels[key])
- delete(r.T.ResponseChannels, key)
- }
-
- r.T.ChannelCreateMtx.Unlock()
- }
- }
- }
-}
-
-// Close disconnects from the Redis Cache
-func (r *RedisCache) Close() error {
- level.Info(r.T.Logger).Log("event", "closing redis connection")
- r.client.Close()
- return nil
-}
diff --git a/redis_test.go b/redis_test.go
deleted file mode 100644
index 6711fc857..000000000
--- a/redis_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
-* Copyright 2018 Comcast Cable Communications Management, LLC
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-* http://www.apache.org/licenses/LICENSE-2.0
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
- */
-
-package main
-
-import (
- "testing"
-
- "github.com/alicebob/miniredis"
- "github.com/go-kit/kit/log"
-)
-
-func setupRedisCache() (RedisCache, func()) {
- s, err := miniredis.Run()
- if err != nil {
- panic(err)
- }
- cfg := Config{Caching: CachingConfig{ReapSleepMS: 1000}}
- tr := TricksterHandler{
- Logger: log.NewNopLogger(),
- ResponseChannels: make(map[string]chan *ClientRequestContext),
- Config: &cfg,
- }
- rcfg := RedisCacheConfig{Endpoint: s.Addr()}
- close := func() {
- s.Close()
- }
- return RedisCache{T: &tr, Config: rcfg}, close
-}
-
-func TestRedisCache_Connect(t *testing.T) {
- rc, close := setupRedisCache()
- defer close()
-
- // it should connect
- err := rc.Connect()
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestRedisCache_Store(t *testing.T) {
- rc, close := setupRedisCache()
- defer close()
-
- err := rc.Connect()
- if err != nil {
- t.Error(err)
- }
-
- // it should store a value
- err = rc.Store("cacheKey", "data", 60000)
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestRedisCache_Retrieve(t *testing.T) {
- rc, close := setupRedisCache()
- defer close()
-
- err := rc.Connect()
- if err != nil {
- t.Error(err)
- }
- err = rc.Store("cacheKey", "data", 60000)
- if err != nil {
- t.Error(err)
- }
-
- // it should retrieve a value
- data, err := rc.Retrieve("cacheKey")
- if err != nil {
- t.Error(err)
- }
- if data != "data" {
- t.Errorf("wanted \"%s\". got \"%s\"", "data", data)
- }
-}
-
-func TestRedisCache_ReapOnce(t *testing.T) {
- rc, close := setupRedisCache()
- defer close()
-
- err := rc.Connect()
- if err != nil {
- t.Error(err)
- }
-
- // fake an empty response channel to reap
- ch := make(chan *ClientRequestContext, 100)
- rc.T.ResponseChannels["cacheKey"] = ch
-
- // it should remove empty response channel
- rc.ReapOnce()
-
- if rc.T.ResponseChannels["cacheKey"] != nil {
- t.Errorf("expected response channel to be removed")
- }
-}
-
-func TestRedisCache_Close(t *testing.T) {
- rc, close := setupRedisCache()
- defer close()
-
- err := rc.Connect()
- if err != nil {
- t.Error(err)
- }
-
- // it should close
- err = rc.Close()
- if err != nil {
- t.Error(err)
- }
-}
diff --git a/testdata/gzip_test.txt.gz b/testdata/gzip_test.txt.gz
new file mode 100644
index 000000000..e3712e132
Binary files /dev/null and b/testdata/gzip_test.txt.gz differ
diff --git a/testdata/test.01.cert.pem b/testdata/test.01.cert.pem
new file mode 100644
index 000000000..7ed3f2e7c
--- /dev/null
+++ b/testdata/test.01.cert.pem
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICETCCAXqgAwIBAgIQcpiSApefaVCxEHAn1+NbRDANBgkqhkiG9w0BAQsFADAS
+MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
+MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
+iQKBgQDJ5z7FEgKi+sfUl2wOXhg7HG5cQcYxXd8HvXSyr55a1jPGI8yQ+HHrxgy3
+GM7vyjGqn0O8bk+JmL/JXKqdjc2Oo7d18bDZHzV/8ZZ4TRAGDPotLdwUrb3RQK3l
+vYb8RxdbQefhurfTCwu3eG3NDuWtS4Yk2S+H2pCcuIGnC1V4xQIDAQABo2YwZDAO
+BgNVHQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUw
+AwEB/zAsBgNVHREEJTAjgglsb2NhbGhvc3SHBH8AAAGHEAAAAAAAAAAAAAAAAAAA
+AAEwDQYJKoZIhvcNAQELBQADgYEAOAMcA3BrcepiFV+nywffI8CjwsLL9aZcNNuP
+GaLedrtDsCT+4oVzTcvH3ubYpT5G4AD7U64fbmLNKI1Fnmw8X5GS1sEQ1bR0nFEB
++lXS7da7NZ89HRncDhjSjn9LHFZmMNZ8Bpihssp106PLaQ5hl0S+Ln+SgZlVzQDi
+wn1sYLo=
+-----END CERTIFICATE-----
diff --git a/testdata/test.01.key.pem b/testdata/test.01.key.pem
new file mode 100644
index 000000000..545c6fde7
--- /dev/null
+++ b/testdata/test.01.key.pem
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDJ5z7FEgKi+sfUl2wOXhg7HG5cQcYxXd8HvXSyr55a1jPGI8yQ
++HHrxgy3GM7vyjGqn0O8bk+JmL/JXKqdjc2Oo7d18bDZHzV/8ZZ4TRAGDPotLdwU
+rb3RQK3lvYb8RxdbQefhurfTCwu3eG3NDuWtS4Yk2S+H2pCcuIGnC1V4xQIDAQAB
+AoGAQ5K/cVl7xGxGYSJkqdJYLcBwNzRUTsVqFb8UxZD9YM17+n6UwphEGHLqYoVN
+DPgQ81fmZbRNrnGPDqeS+rQw7Uoxn3DnVhKp5JhlZuNHW7KmxfuAFoKM93r9nC5r
+2GH88wKQHUcCIgYM1uvfEcCPG8Xdf0FKWKnHLLke5x3oBQECQQDhC+PCMJNmENBB
+s72exdyI9v6onKupA05emxqDrU6eDcdxsi/a7AtpmGrc75reNynSDU6btM1thpWG
+w24r5KthAkEA5ax2Wtboxbm3D1uJTY3wKZO8avx8hosx9EqXu0MhrP15ZR+XUgTd
+OHfLBgGM1ZyPIm92nnkkNr7tuVJ4H0sL5QJAdj0p+EBPYWPaF3sZWI86lJe5KtvN
+8Sn0hg5V+vMWiEJTFLB7Jjm4sU1McDJPmws+pMcrEvIGsNHyQ/DwNI2bYQJBALu/
+3gioQiTQimsNgxJZA4iZcp9qw0khJLb0+1BjsnW9x6z4xDSSMV1l4BuKzNwaXY/i
+2m71zLrMHX0vg27hZFkCQQCj2SpoSgLl+qvCqLCF/UW+oiIIh7CmrrnJP9w17t8D
+VXlJXhMztJstwq/uA/Owq10l/ECmvcQSP/jraLYrimyJ
+-----END RSA PRIVATE KEY-----
diff --git a/testdata/test.02.cert.pem b/testdata/test.02.cert.pem
new file mode 100644
index 000000000..42a04d6d0
--- /dev/null
+++ b/testdata/test.02.cert.pem
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICETCCAXqgAwIBAgIQPOKRmsinAE3eJU63d5cOmTANBgkqhkiG9w0BAQsFADAS
+MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
+MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
+iQKBgQC47irzqc1ZBlNSDd19Efv+OoXjAJlUgL7G6OPIQyCUEbaTFbSwn6BWyBs7
+5YbXuvaff1UjuzsdM+aK1JE1wujnXpaVkNIWCuXbsh8ltYTVdRyHdR5fACXJKE/E
+Y5ACBxzrw8kmy4pAsSINa0B7/oT+yT25zi7wpwz7fzJI3dBsFQIDAQABo2YwZDAO
+BgNVHQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUw
+AwEB/zAsBgNVHREEJTAjgglsb2NhbGhvc3SHBH8AAAGHEAAAAAAAAAAAAAAAAAAA
+AAEwDQYJKoZIhvcNAQELBQADgYEAAmSc23AK6VIFbapy41GwxeRtulIVy52DMHlY
+quqK7t5DF++MPKa7zf5jZniRlf5IZCO63cFaePgJYQq3sGsdOiAmznrl76LULY/s
+Qrl/kwBdRhxvbTP1IR7ftTNKIkH2HDAWNBOxZJOVRnWztk72DJJNdBesw3AARWJO
+ZAqb4uM=
+-----END CERTIFICATE-----
diff --git a/testdata/test.02.key.pem b/testdata/test.02.key.pem
new file mode 100644
index 000000000..ab01dbfec
--- /dev/null
+++ b/testdata/test.02.key.pem
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXAIBAAKBgQC47irzqc1ZBlNSDd19Efv+OoXjAJlUgL7G6OPIQyCUEbaTFbSw
+n6BWyBs75YbXuvaff1UjuzsdM+aK1JE1wujnXpaVkNIWCuXbsh8ltYTVdRyHdR5f
+ACXJKE/EY5ACBxzrw8kmy4pAsSINa0B7/oT+yT25zi7wpwz7fzJI3dBsFQIDAQAB
+AoGAGb7us+Wb9RzwVKghgdwTHR3Arg3TSvRxmvepXqQZ0QN2S5vmAAD5LZvGTtxx
+nW8mDqaHj+XtIO+s1P8rCDUOf4rEi5eYfBzDrbPmJ5tbD9E6++ssnGH+j5qE0RIc
+Y6ZgbkpjSUHDzKcT5PfMZy1dfZ2H7ZV2kHA7ApNC32JUKLECQQDE4tYrf6X8Ygy4
+OOC9HPSG/5AtObULAowDqQxPOlPh8fn/6upVAZ2mVKfzdeq8Z78ktm1JEf/0qfWB
+FUjWt0WXAkEA8HRiA+cnEjlxKZoT/rpkWFFYhmQr2Ti+ERx2Hg7QEt8rX30qefu3
+8uG1f2JyAh+iUlTuiw6iKynRoQhjNObJMwJBALD331/L5ctb6mjwHvl8/EIXlmVA
+OH3D9UwY98qC+ADgOkEQyz5LLIPkFe5cr/hXHFUIcGS8fB9TYSt8kTMGGtMCQG//
+5b+npX1JoDoeeH3H2AGDMla2xe3SDkXuGd56S9Teeldp96UF6HKLS3zgH/Z5QaRT
+xyCiWkr8mZYGUB9N+B0CQAYzgTLqM3Gem4AibSqrHf9o5QaSTpEONPGcfm5QtRgY
+wW1eefuQ7dbz4Iey1KJW6XZNzK8fQ9Gtfepm1nmvvwU=
+-----END RSA PRIVATE KEY-----
diff --git a/testdata/test.03.cert.pem b/testdata/test.03.cert.pem
new file mode 100644
index 000000000..2ed65639b
--- /dev/null
+++ b/testdata/test.03.cert.pem
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICEjCCAXugAwIBAgIRAPL12HXY79PT9Y5faJC7PV0wDQYJKoZIhvcNAQELBQAw
+EjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2
+MDAwMFowEjEQMA4GA1UEChMHQWNtZSBDbzCBnzANBgkqhkiG9w0BAQEFAAOBjQAw
+gYkCgYEAxkrheDo4SxfC2amiIRomb8GPUT3pEwlcfcZSemr3mCOI9598+OJfvDTf
+Vi1aftGtHea9Gu2pLnmUKHFQx+szILcRrQvUz+npb+yHVCccd8HNk/082SUxOI60
+PuZ5KuUHg3Fxv64aFqS+rj09GtM/aysQixWIYn+mPsD5p6+Q1ocCAwEAAaNmMGQw
+DgYDVR0PAQH/BAQDAgKkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQF
+MAMBAf8wLAYDVR0RBCUwI4IJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAA
+AAABMA0GCSqGSIb3DQEBCwUAA4GBAH9eu1TXMKzXvsU5RHP+OSdfWo3MGsnMRE0o
+tEZdYIPhKyH03a9rzEl4hQoRpzwTHXOqoM/ojU9Q+s9tQ56P/HgZZvfVCtONEDLx
+QYK0qEr/aoUAfbV4IJx7KtcKjqY2SGbaTGQSxZ80DX224ZqnY6CLqLzsl7oHKCSS
+d+Dw3u7K
+-----END CERTIFICATE-----
diff --git a/testdata/test.03.key.pem b/testdata/test.03.key.pem
new file mode 100644
index 000000000..3d18245e5
--- /dev/null
+++ b/testdata/test.03.key.pem
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICWwIBAAKBgQDGSuF4OjhLF8LZqaIhGiZvwY9RPekTCVx9xlJ6aveYI4j3n3z4
+4l+8NN9WLVp+0a0d5r0a7akueZQocVDH6zMgtxGtC9TP6elv7IdUJxx3wc2T/TzZ
+JTE4jrQ+5nkq5QeDcXG/rhoWpL6uPT0a0z9rKxCLFYhif6Y+wPmnr5DWhwIDAQAB
+AoGALrHpgI9tPDm/N9SbimIHFSAmqMIEhegZcMDoIU3ZOml70pwrKMzeMIre4QNT
+ITQtKTJx68p1oa0dt5xfooR/iFVqvTA2N5SvJUrcZhytAMDh1Te7SIOgchOy9EEs
+ge69SUTpvp6YbP3TnAVUtN8NbOBmuz5tg6PoHZjrYXeMZPkCQQDugVqZJjyMaov+
+oR0qPUbW07Ph2PvLqyubC2+0GIGOuQXCqRQdfb8LdnHwqQ9JPKOiIyts2wHbgar6
+qinaISO9AkEA1NZphcBPvP8Qnea6vmIx1QVJQ7TxH7c4yoEmDG223mO5JZXvjYIO
+rKnTBGSAQU0tUhgo2Z2I9Y9cgN/MnEslkwJALZ1DrIKpldlSyPIbV9a8U53Ni2Yq
+Uft8rXx0cqc1MAym02Hu3O0Nuq1+gR50M/eK/Blp1rnUEx0rjCE7O+KWqQJAWqgV
+fGYk0MDoSAm+Y1eaGD1PLqrExhiZ9Q+7sDGPYfyiIVNTHThXnc4cVtOkGayQ8FXg
+GIlUjBwLb81j7vLHRwJAT5WMWrPSCTF/ckSDsy3rxU1vZjIElch4wpmXdO3a5K/7
+7AN6xScNtZqKfsj3KAQqKiqwyglqw4bvr4ggKLlX7w==
+-----END RSA PRIVATE KEY-----
diff --git a/testdata/test.04.cert.pem b/testdata/test.04.cert.pem
new file mode 100644
index 000000000..b084cafe8
--- /dev/null
+++ b/testdata/test.04.cert.pem
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICEjCCAXugAwIBAgIRAMu8d9oXcjm8HHNCMnwUbfwwDQYJKoZIhvcNAQELBQAw
+EjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2
+MDAwMFowEjEQMA4GA1UEChMHQWNtZSBDbzCBnzANBgkqhkiG9w0BAQEFAAOBjQAw
+gYkCgYEAvagCHfSaw+crd9xEwCbo9BphgWs5QRQUW3BPSKNP2fEOk4YQyxKcJ649
+SJ1p3hShlziKS6L7KhohK8f7Lm1BtRwdFMe7IfDkFzX9gnV1BuAjK3ht6GqYX7gL
+fsty+1sNBFeRLJegYiVv8G6QoeKMRoARNnFnjiXjGUEBBoT7QqUCAwEAAaNmMGQw
+DgYDVR0PAQH/BAQDAgKkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQF
+MAMBAf8wLAYDVR0RBCUwI4IJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAA
+AAABMA0GCSqGSIb3DQEBCwUAA4GBAC77kO3+KbCgUPM5pwOm8rDm98uXHq0Naz8H
+utfZgMdOIVmXccUKodgVqv8koA3ZtPfHLlYf3Fc81vqsUsfycf7SP+lFR2Wwkkma
+JZlLr+32psde3pTMzWi4vN5Li3bXGMfQkd9JVn/1z8Mu6klroYEZ3wiootY9yhvm
+K23WR5tl
+-----END CERTIFICATE-----
diff --git a/testdata/test.04.key.pem b/testdata/test.04.key.pem
new file mode 100644
index 000000000..24945da50
--- /dev/null
+++ b/testdata/test.04.key.pem
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXwIBAAKBgQC9qAId9JrD5yt33ETAJuj0GmGBazlBFBRbcE9Io0/Z8Q6ThhDL
+Epwnrj1InWneFKGXOIpLovsqGiErx/subUG1HB0Ux7sh8OQXNf2CdXUG4CMreG3o
+aphfuAt+y3L7Ww0EV5Esl6BiJW/wbpCh4oxGgBE2cWeOJeMZQQEGhPtCpQIDAQAB
+AoGBAJBECLg4pLF4WzxltXZlIrbsilcj9P1oBMQ5flXGxKsIDwpw5L1UfqxAgiWG
+eE2SbAjX3XsDkwLHtYvUJ2F9goisjXkc0QTwKJ7DND00OSakMs+9vn8j9ppZ8CtD
+7LuFRttBqfCZcW4oj1iYYBF7dbf5nitUL8NtyQaDuh/ACq7RAkEA9bzvRD6XBT4i
+6aboEkhdrE9aj9m1NWOK8JgEYfRA37c1J909F3spg06aNkW/gNcnMZGFrrO8c8vW
+vluYe7R0DwJBAMWTh/Q0FhQQhodwdju0F590KGjS/Rx8oUKPSMO10lxD6mEzNxvl
+V2SqF22XrUP9M+7ZjTZ+3e4WlJZnNuTDWgsCQQCcvq3z3uSfsHm+hfsjpksx3NAM
+T7bZXixCuQSaop061fau/dy4/JOHMP0Gv0ie2x4h7QvTWsxLJGtOssg5p+obAkEA
+niP/5c7q/RRdGXtCp3b2kYJ/9acrQOngiU32h++4eHFD4JkFuyZOVRxvtCB7Zrf8
+IWmwRbY2HKOmOtxSa7iREQJBAJUdoN0qTqEULs5Qd0tptNBHHegKKXcA64E93Rer
+twM4jNsIANZXDv3KijDPde9YvxsBcNDyyqRVBbl/mCtGCkc=
+-----END RSA PRIVATE KEY-----
diff --git a/testdata/test.05.cert.pem b/testdata/test.05.cert.pem
new file mode 100644
index 000000000..ddf0407f4
--- /dev/null
+++ b/testdata/test.05.cert.pem
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICETCCAXqgAwIBAgIQOzkwixFhkWEiokcbp9+DyDANBgkqhkiG9w0BAQsFADAS
+MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
+MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
+iQKBgQC3eWtlu9rG5rRbFtxdgCQvCQMcfqHqHxecTPaXB/mByxf2dm3Hm3UI14f3
+vSGsL8hG0KLFM6INAMCzWbxNJL6C6gHMWLyk6+1FJUyt/23AiLpMOJlZsF9hitMf
+cZgwkAnnFJiw3MPvM3Xdt4sEHqkQ6HN2I78mf5QaVcP+obcePwIDAQABo2YwZDAO
+BgNVHQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUw
+AwEB/zAsBgNVHREEJTAjgglsb2NhbGhvc3SHBH8AAAGHEAAAAAAAAAAAAAAAAAAA
+AAEwDQYJKoZIhvcNAQELBQADgYEALhrpGU7MfIp8JM3CsP48dAK5SsQQ8aI8ELtT
+nuLg8C1s0gf1BDfxl4YG5SQZzURWUI1BiNn9g32Y3pNTr0Q4S/2gMpEGbnt9jTJs
+gcWG6GT7nG/s9p/9PWT8iLoIiPRi+VizvheXz4rp5MvL2X7QaNXwOF/HqHrGyW+y
+XpNN2hA=
+-----END CERTIFICATE-----
diff --git a/testdata/test.05.key.pem b/testdata/test.05.key.pem
new file mode 100644
index 000000000..deda1dc47
--- /dev/null
+++ b/testdata/test.05.key.pem
@@ -0,0 +1 @@
+invalid key data
diff --git a/testdata/test.06.cert.pem b/testdata/test.06.cert.pem
new file mode 100644
index 000000000..5d4c9e809
--- /dev/null
+++ b/testdata/test.06.cert.pem
@@ -0,0 +1 @@
+invalid cert data
diff --git a/testdata/test.06.key.pem b/testdata/test.06.key.pem
new file mode 100644
index 000000000..1ef3abc2e
--- /dev/null
+++ b/testdata/test.06.key.pem
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICWwIBAAKBgQC3eWtlu9rG5rRbFtxdgCQvCQMcfqHqHxecTPaXB/mByxf2dm3H
+m3UI14f3vSGsL8hG0KLFM6INAMCzWbxNJL6C6gHMWLyk6+1FJUyt/23AiLpMOJlZ
+sF9hitMfcZgwkAnnFJiw3MPvM3Xdt4sEHqkQ6HN2I78mf5QaVcP+obcePwIDAQAB
+AoGAT9ffDN67TmOHiTxhma7yECXz3Kqe+6ucMsCrbv5hbkJboz3WeE8Gl1p0KTN9
+O9lvZqHUs8zMcKwrL+GVOF0NKl5lEqCu0Wfp00VSzNMFr91i/3AHHueJw8eHmjEB
+voDxY54kK02oyC+PrBABabmNsTlL4TScwvaB9OdVO2xvwsECQQDoYZQP+VBCpTw8
+wu/0NPlpAIcNqoZkgxnRokp4g4qyd58QImKt5N8+Mlfkz2FWmpn2WDzFMtLMGn+G
+0LBN4HmNAkEAyh9QJSH04QxVLWuij3jORvrJO9Iu3XvPgMpF2gYTNPbYW7ZYppzN
+PoKur39M8VogbLXzWdO1jDkW4hq0Wnz1+wJAAsXXTHF/IaxzEY6J6nIPX89fzSvx
+upVN45B6LwHyz7pZrYmbf9OxTj6vic1nre7eU3AuGXRHy6OtTeCDmgpJqQJAL3Ia
+Rh0qdomGlRrnFfattUu5YSl9htBBbWIN85fpek1XjG/Jb5LvOvVCPEANt7oIUnyD
+m1pvC3N7Q6gxHeyncQJAD/Xwmu93uWn7z6BEDpkmGxz4EkC+kmDtoCtgP8hguORa
+Q0uH8eUAdMZ+qtpCySe3U1cUKM3ZCZgU9G/TD06cxQ==
+-----END RSA PRIVATE KEY-----
diff --git a/testdata/test.bad-cache-name.conf b/testdata/test.bad-cache-name.conf
new file mode 100644
index 000000000..58f27b0c0
--- /dev/null
+++ b/testdata/test.bad-cache-name.conf
@@ -0,0 +1,98 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+
+ [caches.test]
+ type = 'redis'
+ compression = true
+ timeseries_ttl_secs = 8666
+ fastforward_ttl_secs = 17
+ object_ttl_secs = 39
+
+ [caches.test.index]
+ reap_interval_secs = 4
+ flush_interval_secs = 6
+ max_size_bytes = 536870913
+ max_size_backoff_bytes = 16777217
+ max_size_objects = 80
+ max_size_backoff_objects = 20
+
+ ### Configuration options when using a Redis Cache
+ [caches.test.redis]
+ client_type = 'test_redis_type'
+ protocol = 'test_protocol'
+ endpoint = 'test_endpoint'
+ endpoints = ['test_endpoint_1']
+ sentinel_master = 'test_master'
+ password = 'test_password'
+ db = 42
+ max_retries = 6
+ min_retry_backoff_ms = 9
+ max_retry_backoff_ms = 513
+ dial_timeout_ms = 5001
+ read_timeout_ms = 3001
+ write_timeout_ms = 3002
+ pool_size = 21
+ min_idle_conns = 5
+ max_conn_age_ms = 2000
+ pool_timeout_ms = 4001
+ idle_timeout_ms = 300001
+ idle_check_frequency_ms = 60001
+
+ [caches.test.filesystem]
+ cache_path = 'test_cache_path'
+
+ [caches.test.bbolt]
+ filename = 'test_filename'
+ bucket = 'test_bucket'
+
+ # Configuration options when using a Badger cache
+ [caches.test.badger]
+ directory = 'test_directory'
+ value_directory = 'test_value_directory'
+
+[origins]
+ [origins.test]
+ type = 'test_type'
+ cache_name = 'test_fail'
+ scheme = 'test_scheme'
+ host = 'test_host'
+ path_prefix = 'test_path_prefix'
+ api_path = 'test_api_path'
+ ignore_no_cache_header = true
+ timeseries_retention_factor = 666
+ timeseries_eviction_method = 'lru'
+ fast_forward_disable = true
+ backfill_tolerance_secs = 301
+ timeout_secs = 37
+ is_default = true
+ max_idle_conns = 23
+ keep_alive_timeout_secs = 7
+ full_chain_cert_path = '../../testdata/test.01.cert.pem'
+ private_key_path = '../../testdata/test.01.key.pem'
+ require_tls = true
+
+[metrics]
+listen_port = 57822
+listen_address = 'metrics_test'
+
+[logging]
+log_level = 'test_log_level'
+log_file = 'test_file'
diff --git a/testdata/test.bad_cache_name.conf b/testdata/test.bad_cache_name.conf
new file mode 100644
index 000000000..e95556f2b
--- /dev/null
+++ b/testdata/test.bad_cache_name.conf
@@ -0,0 +1,29 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+ [caches.test]
+ cache_type = 'memory'
+
+[origins]
+ [origins.test]
+ is_default = false
+ origin_type = 'reverseproxycache'
+ cache_name = 'test2'
+ origin_url = 'http://1'
diff --git a/testdata/test.bad_origin_url.conf b/testdata/test.bad_origin_url.conf
new file mode 100644
index 000000000..51b0c9cac
--- /dev/null
+++ b/testdata/test.bad_origin_url.conf
@@ -0,0 +1,96 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+
+ [caches.test]
+ cache_type = 'test_type'
+ compression = true
+ timeseries_ttl_secs = 8666
+ fastforward_ttl_secs = 17
+ object_ttl_secs = 39
+
+ [caches.test.index]
+ reap_interval_secs = 4
+ flush_interval_secs = 6
+ max_size_bytes = 536870913
+ max_size_backoff_bytes = 16777217
+ max_size_objects = 80
+ max_size_backoff_objects = 20
+
+ ### Configuration options when using a Redis Cache
+ [caches.test.redis]
+ client_type = 'test_redis_type'
+ protocol = 'test_protocol'
+ endpoint = 'test_endpoint'
+ endpoints = ['test_endpoint_1']
+ sentinel_master = 'test_master'
+ password = 'test_password'
+ db = 42
+ max_retries = 6
+ min_retry_backoff_ms = 9
+ max_retry_backoff_ms = 513
+ dial_timeout_ms = 5001
+ read_timeout_ms = 3001
+ write_timeout_ms = 3002
+ pool_size = 21
+ min_idle_conns = 5
+ max_conn_age_ms = 2000
+ pool_timeout_ms = 4001
+ idle_timeout_ms = 300001
+ idle_check_frequency_ms = 60001
+
+ [caches.test.filesystem]
+ cache_path = 'test_cache_path'
+
+ [caches.test.bbolt]
+ filename = 'test_filename'
+ bucket = 'test_bucket'
+
+ # Configuration options when using a Badger cache
+ [caches.test.badger]
+ directory = 'test_directory'
+ value_directory = 'test_value_directory'
+
+[origins]
+ [origins.test]
+ is_default = true
+ origin_type = 'test_type'
+ cache_name = 'test'
+ origin_url = 'sasdf_asd[as;://asdf923_-=a*'
+ api_path = 'test_api_path'
+ max_idle_conns = 63
+ keep_alive_timeout_secs = 86400
+ ignore_caching_headers = true
+ value_retention_factor = 666
+ fast_forward_disable = true
+ backfill_tolerance_secs = 301
+ timeout_secs = 37
+ health_check_endpoint = '/test_health'
+ health_check_upstream_path = '/test/upstream/endpoint'
+ health_check_verb = 'test_verb'
+ health_check_query = 'query=1234'
+
+[metrics]
+listen_port = 57822
+listen_address = 'metrics_test'
+
+[logging]
+log_level = 'test_log_level'
+log_file = 'test_file'
diff --git a/testdata/test.bad_tls_cert.conf b/testdata/test.bad_tls_cert.conf
new file mode 100644
index 000000000..3b5d931b2
--- /dev/null
+++ b/testdata/test.bad_tls_cert.conf
@@ -0,0 +1,44 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+ [caches.test]
+ cache_type = 'memory'
+
+[origins]
+ # [origins.test]
+ # origin_type = 'prometheus'
+ # cache_name = 'test'
+ # origin_url = 'http://1'
+
+ # [origins.test.paths]
+ # [origins.test.paths.testpath]
+ # path = '/test_path'
+ # no_metrics = true
+
+ [origins.default]
+ origin_type = 'reverseproxycache'
+ cache_name = 'test'
+ origin_url = 'http://2'
+
+ [origins.default.tls]
+ # 05 is a known bad pair
+ client_cert_path = '../../../testdata/test.06.cert.pem'
+ client_key_path = '../../../testdata/test.06.key.pem'
+
diff --git a/testdata/test.cache-lru.conf b/testdata/test.cache-lru.conf
new file mode 100644
index 000000000..9fb9ef8de
--- /dev/null
+++ b/testdata/test.cache-lru.conf
@@ -0,0 +1,27 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[origins]
+ [origins.default]
+ is_default = true
+ origin_type = 'prometheus'
+ origin_url = 'http://0.0.0.0/'
+ timeseries_eviction_method = 'lru'
+ timeseries_retention_factor = 5
+ backfill_tolerance_secs = 1200
diff --git a/testdata/test.custom_health.conf b/testdata/test.custom_health.conf
new file mode 100644
index 000000000..cbcffbddd
--- /dev/null
+++ b/testdata/test.custom_health.conf
@@ -0,0 +1,27 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[origins]
+ [origins.default]
+ is_default = true
+ origin_type = 'prometheus'
+ origin_url = 'http://1:9090'
+ health_check_upstream_path = '/test/health/path'
+ health_check_verb = 'head'
+ health_check_query = 'some=params'
diff --git a/testdata/test.empty.conf b/testdata/test.empty.conf
new file mode 100644
index 000000000..b1270c1ef
--- /dev/null
+++ b/testdata/test.empty.conf
@@ -0,0 +1,39 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+
+[caches]
+
+ [caches.test]
+
+ [caches.test.index]
+
+ [caches.test.redis]
+
+ [caches.test.filesystem]
+
+ [caches.test.bbolt]
+
+ [caches.test.badger]
+
+[origins]
+ [origins.test]
+ origin_type = 'test'
+ origin_url = 'http://1'
+
+[metrics]
+
+[logging]
diff --git a/testdata/test.full.conf b/testdata/test.full.conf
new file mode 100644
index 000000000..28165faa5
--- /dev/null
+++ b/testdata/test.full.conf
@@ -0,0 +1,141 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+tls_listen_port = 38821
+tls_listen_address = 'test-tls'
+
+[caches]
+
+ [caches.test]
+ cache_type = 'redis'
+ object_ttl_secs = 39
+
+ [caches.test.index]
+ reap_interval_secs = 4
+ flush_interval_secs = 6
+ max_size_bytes = 536870913
+ max_size_backoff_bytes = 16777217
+ max_size_objects = 80
+ max_size_backoff_objects = 20
+
+ ### Configuration options when using a Redis Cache
+ [caches.test.redis]
+ client_type = 'test_redis_type'
+ protocol = 'test_protocol'
+ endpoint = 'test_endpoint'
+ endpoints = ['test_endpoint_1']
+ sentinel_master = 'test_master'
+ password = 'test_password'
+ db = 42
+ max_retries = 6
+ min_retry_backoff_ms = 9
+ max_retry_backoff_ms = 513
+ dial_timeout_ms = 5001
+ read_timeout_ms = 3001
+ write_timeout_ms = 3002
+ pool_size = 21
+ min_idle_conns = 5
+ max_conn_age_ms = 2000
+ pool_timeout_ms = 4001
+ idle_timeout_ms = 300001
+ idle_check_frequency_ms = 60001
+
+ [caches.test.filesystem]
+ cache_path = 'test_cache_path'
+
+ [caches.test.bbolt]
+ filename = 'test_filename'
+ bucket = 'test_bucket'
+
+ # Configuration options when using a Badger cache
+ [caches.test.badger]
+ directory = 'test_directory'
+ value_directory = 'test_value_directory'
+
+[origins]
+ [origins.test]
+ is_default = true
+ revalidation_factor = 2.0
+ multipart_ranges_disabled = true
+ dearticulate_upstream_ranges = true
+ compressable_types = [ 'image/png' ]
+ origin_type = 'test_type'
+ cache_name = 'test'
+ origin_url = 'scheme://test_host/test_path_prefix'
+ api_path = 'test_api_path'
+ max_idle_conns = 23
+ keep_alive_timeout_secs = 7
+ ignore_caching_headers = true
+ timeseries_retention_factor = 666
+ timeseries_eviction_method = 'lru'
+ fast_forward_disable = true
+ backfill_tolerance_secs = 301
+ timeout_secs = 37
+ health_check_endpoint = '/test_health'
+ health_check_upstream_path = '/test/upstream/endpoint'
+ health_check_verb = 'test_verb'
+ health_check_query = 'query=1234'
+ timeseries_ttl_secs = 8666
+ max_ttl_secs = 300
+ fastforward_ttl_secs = 382
+ require_tls = true
+ max_object_size_bytes = 999
+ cache_key_prefix = 'test-prefix'
+ [origins.test.health_check_headers]
+ 'Authorization' = 'Basic SomeHash'
+
+
+ [origins.test.negative_cache]
+ 404 = 10
+ 500 = 10
+
+ [origins.test.paths]
+ [origins.test.paths.series]
+ path = "/series"
+ handler = "proxy"
+
+ [origins.test.paths.label]
+ path = "/label"
+ handler = "localresponse"
+ match_type = "prefix"
+ response_code = 200
+ response_body = "test"
+ collapsed_forwarding = "fake-value"
+
+ [origins.test.paths.label.response_headers]
+ 'X-Header-Test' = 'test-value'
+
+ [origins.test.tls]
+ full_chain_cert_path = '../../testdata/test.01.cert.pem'
+ private_key_path = '../../testdata/test.01.key.pem'
+ insecure_skip_verify = true
+ certificate_authority_paths = [ '../../testdata/test.rootca.pem' ]
+ client_key_path = 'test_client_key'
+ client_cert_path = 'test_client_cert'
+
+[negative_caches]
+ [negative_caches.default]
+ 404 = 5
+
+[metrics]
+listen_port = 57822
+listen_address = 'metrics_test'
+
+[logging]
+log_level = 'test_log_level'
+log_file = 'test_file'
diff --git a/testdata/test.invalid-negative-cache-1.conf b/testdata/test.invalid-negative-cache-1.conf
new file mode 100644
index 000000000..0a9584686
--- /dev/null
+++ b/testdata/test.invalid-negative-cache-1.conf
@@ -0,0 +1,28 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[origins]
+ [origins.default]
+ is_default = true
+ origin_type = 'prometheus'
+ origin_url = 'http://0.0.0.0/'
+
+[negative_caches]
+ [negative_caches.default]
+ a = 10
\ No newline at end of file
diff --git a/testdata/test.invalid-negative-cache-2.conf b/testdata/test.invalid-negative-cache-2.conf
new file mode 100644
index 000000000..a35876b8f
--- /dev/null
+++ b/testdata/test.invalid-negative-cache-2.conf
@@ -0,0 +1,29 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[origins]
+ [origins.default]
+ is_default = true
+ origin_type = 'prometheus'
+ origin_url = 'http://0.0.0.0/'
+
+[negative_caches]
+ [negative_caches.default]
+ 1212 = 10
+
diff --git a/testdata/test.invalid-negative-cache-3.conf b/testdata/test.invalid-negative-cache-3.conf
new file mode 100644
index 000000000..2b91c09f7
--- /dev/null
+++ b/testdata/test.invalid-negative-cache-3.conf
@@ -0,0 +1,30 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[origins]
+ [origins.default]
+ is_default = true
+ origin_type = 'prometheus'
+ origin_url = 'http://0.0.0.0/'
+ negative_cache_name = 'foo'
+
+[negative_caches]
+ [negative_caches.default]
+ 404 = 10
+
diff --git a/testdata/test.missing-origin-url.conf b/testdata/test.missing-origin-url.conf
new file mode 100644
index 000000000..bad971b80
--- /dev/null
+++ b/testdata/test.missing-origin-url.conf
@@ -0,0 +1,30 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[origins]
+ [origins.default]
+ is_default = true
+ origin_type = 'prometheus'
+ origin_url = 'http://0.0.0.0/'
+
+ [origins.2]
+ is_default = true
+ origin_type = 'prometheus'
+ origin_url = ''
+
diff --git a/testdata/test.missing_origin_type.conf b/testdata/test.missing_origin_type.conf
new file mode 100644
index 000000000..8042394d9
--- /dev/null
+++ b/testdata/test.missing_origin_type.conf
@@ -0,0 +1,95 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+
+ [caches.test]
+ cache_type = 'test_type'
+ compression = true
+ timeseries_ttl_secs = 8666
+ fastforward_ttl_secs = 17
+ object_ttl_secs = 39
+
+ [caches.test.index]
+ reap_interval_secs = 4
+ flush_interval_secs = 6
+ max_size_bytes = 536870913
+ max_size_backoff_bytes = 16777217
+ max_size_objects = 80
+ max_size_backoff_objects = 20
+
+ ### Configuration options when using a Redis Cache
+ [caches.test.redis]
+ client_type = 'test_redis_type'
+ protocol = 'test_protocol'
+ endpoint = 'test_endpoint'
+ endpoints = ['test_endpoint_1']
+ sentinel_master = 'test_master'
+ password = 'test_password'
+ db = 42
+ max_retries = 6
+ min_retry_backoff_ms = 9
+ max_retry_backoff_ms = 513
+ dial_timeout_ms = 5001
+ read_timeout_ms = 3001
+ write_timeout_ms = 3002
+ pool_size = 21
+ min_idle_conns = 5
+ max_conn_age_ms = 2000
+ pool_timeout_ms = 4001
+ idle_timeout_ms = 300001
+ idle_check_frequency_ms = 60001
+
+ [caches.test.filesystem]
+ cache_path = 'test_cache_path'
+
+ [caches.test.bbolt]
+ filename = 'test_filename'
+ bucket = 'test_bucket'
+
+ # Configuration options when using a Badger cache
+ [caches.test.badger]
+ directory = 'test_directory'
+ value_directory = 'test_value_directory'
+
+[origins]
+ [origins.test]
+ is_default = true
+ cache_name = 'test'
+ origin_url = 'http://1'
+ api_path = 'test_api_path'
+ max_idle_conns = 63
+ keep_alive_timeout_secs = 86400
+ ignore_caching_headers = true
+ value_retention_factor = 666
+ fast_forward_disable = true
+ backfill_tolerance_secs = 301
+ timeout_secs = 37
+ health_check_endpoint = '/test_health'
+ health_check_upstream_path = '/test/upstream/endpoint'
+ health_check_verb = 'test_verb'
+ health_check_query = 'query=1234'
+
+[metrics]
+listen_port = 57822
+listen_address = 'metrics_test'
+
+[logging]
+log_level = 'test_log_level'
+log_file = 'test_file'
diff --git a/testdata/test.multiple_origins.conf b/testdata/test.multiple_origins.conf
new file mode 100644
index 000000000..db9c4f78e
--- /dev/null
+++ b/testdata/test.multiple_origins.conf
@@ -0,0 +1,43 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+ [caches.test]
+ cache_type = 'memory'
+
+[origins]
+ [origins.test]
+ is_default = true
+ origin_type = 'reverseproxycache'
+ cache_name = 'test'
+ origin_url = 'http://1'
+
+ [origins.test2]
+ origin_type = 'reverseproxycache'
+ cache_name = 'test'
+ origin_url = 'http://2'
+
+ [origins.test2.paths]
+ [origins.test2.paths.root]
+ path = '/'
+ methods = [ '*' ]
+
+ [origins.test2.paths.test]
+ path = '/test'
+ no_metrics = true
diff --git a/testdata/test.multiple_origins_plus_default.conf b/testdata/test.multiple_origins_plus_default.conf
new file mode 100644
index 000000000..f16886912
--- /dev/null
+++ b/testdata/test.multiple_origins_plus_default.conf
@@ -0,0 +1,50 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+ [caches.test]
+ cache_type = 'memory'
+
+[origins]
+ # [origins.test]
+ # origin_type = 'prometheus'
+ # cache_name = 'test'
+ # origin_url = 'http://1'
+
+ # [origins.test.paths]
+ # [origins.test.paths.testpath]
+ # path = '/test_path'
+ # no_metrics = true
+
+ [origins.default]
+ origin_type = 'reverseproxycache'
+ cache_name = 'test'
+ origin_url = 'http://2'
+
+ [origins.default.paths]
+ [origins.default.paths.1]
+ path = '/test_path'
+ no_metrics = true
+ handler = 'invalid_handler'
+
+ [origins.default.paths.2]
+ path = '/api/v1/'
+ handler = 'proxy'
+ match_type = "prefix"
+
diff --git a/testdata/test.redis-cluster.conf b/testdata/test.redis-cluster.conf
new file mode 100644
index 000000000..b526e3b6d
--- /dev/null
+++ b/testdata/test.redis-cluster.conf
@@ -0,0 +1,57 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[caches]
+
+ [caches.test]
+ cache_type = 'redis'
+ compression = true
+ object_ttl_secs = 39
+
+ ### Configuration options when using a Redis Cache
+ [caches.test.redis]
+ client_type = 'cluster'
+ protocol = 'test_protocol'
+ endpoint = 'test_endpoint'
+ endpoints = ['test_endpoint_1']
+ sentinel_master = 'test_master'
+ password = 'test_password'
+ db = 42
+ max_retries = 6
+ min_retry_backoff_ms = 9
+ max_retry_backoff_ms = 513
+ dial_timeout_ms = 5001
+ read_timeout_ms = 3001
+ write_timeout_ms = 3002
+ pool_size = 21
+ min_idle_conns = 5
+ max_conn_age_ms = 2000
+ pool_timeout_ms = 4001
+ idle_timeout_ms = 300001
+ idle_check_frequency_ms = 60001
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[origins]
+ [origins.test]
+ is_default = true
+ origin_type = 'foo'
+ cache_name = 'test'
+ origin_url = 'http://1'
+ max_object_size_bytes = 999
+
+
diff --git a/testdata/test.redis-sentinel.conf b/testdata/test.redis-sentinel.conf
new file mode 100644
index 000000000..37954348d
--- /dev/null
+++ b/testdata/test.redis-sentinel.conf
@@ -0,0 +1,56 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[caches]
+
+ [caches.test]
+ cache_type = 'redis'
+ compression = true
+ object_ttl_secs = 39
+
+ ### Configuration options when using a Redis Cache
+ [caches.test.redis]
+ client_type = 'sentinel'
+ protocol = 'test_protocol'
+ endpoint = 'test_endpoint'
+ endpoints = ['test_endpoint_1']
+ sentinel_master = 'test_master'
+ password = 'test_password'
+ db = 42
+ max_retries = 6
+ min_retry_backoff_ms = 9
+ max_retry_backoff_ms = 513
+ dial_timeout_ms = 5001
+ read_timeout_ms = 3001
+ write_timeout_ms = 3002
+ pool_size = 21
+ min_idle_conns = 5
+ max_conn_age_ms = 2000
+ pool_timeout_ms = 4001
+ idle_timeout_ms = 300001
+ idle_check_frequency_ms = 60001
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[origins]
+ [origins.test]
+ is_default = true
+ origin_type = 'foo'
+ cache_name = 'test'
+ origin_url = 'http://1'
+ max_object_size_bytes = 999
+
diff --git a/testdata/test.redis-standard.conf b/testdata/test.redis-standard.conf
new file mode 100644
index 000000000..c54b91310
--- /dev/null
+++ b/testdata/test.redis-standard.conf
@@ -0,0 +1,56 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[caches]
+
+ [caches.test]
+ cache_type = 'redis'
+ compression = true
+ object_ttl_secs = 39
+
+ ### Configuration options when using a Redis Cache
+ [caches.test.redis]
+ client_type = 'standard'
+ protocol = 'test_protocol'
+ endpoint = 'test_endpoint'
+ endpoints = ['test_endpoint_1']
+ sentinel_master = 'test_master'
+ password = 'test_password'
+ db = 42
+ max_retries = 6
+ min_retry_backoff_ms = 9
+ max_retry_backoff_ms = 513
+ dial_timeout_ms = 5001
+ read_timeout_ms = 3001
+ write_timeout_ms = 3002
+ pool_size = 21
+ min_idle_conns = 5
+ max_conn_age_ms = 2000
+ pool_timeout_ms = 4001
+ idle_timeout_ms = 300001
+ idle_check_frequency_ms = 60001
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[origins]
+ [origins.test]
+ is_default = true
+ origin_type = 'foo'
+ cache_name = 'test'
+ origin_url = 'http://1'
+ max_object_size_bytes = 999
+
diff --git a/testdata/test.rootca.pem b/testdata/test.rootca.pem
new file mode 100644
index 000000000..a6f3e92af
--- /dev/null
+++ b/testdata/test.rootca.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
diff --git a/testdata/test.too_many_defaults.conf b/testdata/test.too_many_defaults.conf
new file mode 100644
index 000000000..f1b10f8ca
--- /dev/null
+++ b/testdata/test.too_many_defaults.conf
@@ -0,0 +1,35 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+ [caches.test]
+ cache_type = 'memory'
+
+[origins]
+ [origins.test]
+ is_default = true
+ origin_type = 'reverseproxycache'
+ cache_name = 'test'
+ origin_url = 'http://1'
+
+ [origins.test2]
+ is_default = true
+ origin_type = 'reverseproxycache'
+ cache_name = 'test'
+ origin_url = 'http://2'
diff --git a/testdata/test.unknown_origin_type.conf b/testdata/test.unknown_origin_type.conf
new file mode 100644
index 000000000..bfcb4cf7e
--- /dev/null
+++ b/testdata/test.unknown_origin_type.conf
@@ -0,0 +1,30 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+ [caches.test]
+ cache_type = 'memory'
+
+[origins]
+ [origins.test]
+ is_default = true
+ origin_type = 'foo'
+ cache_name = 'test'
+ origin_url = 'http://1'
+
diff --git a/testdata/test.warning1.conf b/testdata/test.warning1.conf
new file mode 100644
index 000000000..bdf1c886b
--- /dev/null
+++ b/testdata/test.warning1.conf
@@ -0,0 +1,34 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+
+ [caches.test]
+ cache_type = 'redis'
+
+ [caches.test.redis]
+ client_type = 'standard'
+ endpoints = ['test_endpoint_1']
+
+[origins]
+ [origins.test]
+ cache_name = 'test'
+ origin_url = 'http://192.168.1.1'
+ origin_type = 'test'
+
diff --git a/testdata/test.warning2.conf b/testdata/test.warning2.conf
new file mode 100644
index 000000000..73f6b5725
--- /dev/null
+++ b/testdata/test.warning2.conf
@@ -0,0 +1,33 @@
+#
+# Copyright 2018 Comcast Cable Communications Management, LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ### this file is for unit tests only and will not work in a live setting
+
+[frontend]
+listen_port = 57821
+listen_address = 'test'
+
+[caches]
+
+ [caches.test]
+ cache_type = 'redis'
+
+ [caches.test.redis]
+ client_type = 'sentinel'
+ endpoint = 'test_endpoint_1'
+
+[origins]
+ [origins.test]
+ cache_name = 'test'
+ origin_url = 'http://192.168.1.1'
+ origin_type = 'test'
diff --git a/vendor/github.com/AndreasBriese/bbloom/.travis.yml b/vendor/github.com/AndreasBriese/bbloom/.travis.yml
new file mode 100644
index 000000000..4f2ee4d97
--- /dev/null
+++ b/vendor/github.com/AndreasBriese/bbloom/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/vendor/github.com/AndreasBriese/bbloom/LICENSE b/vendor/github.com/AndreasBriese/bbloom/LICENSE
new file mode 100644
index 000000000..4b20050e8
--- /dev/null
+++ b/vendor/github.com/AndreasBriese/bbloom/LICENSE
@@ -0,0 +1,35 @@
+bbloom.go
+
+// The MIT License (MIT)
+// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
+
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+siphash.go
+
+// https://github.com/dchest/siphash
+//
+// Written in 2012 by Dmitry Chestnykh.
+//
+// To the extent possible under law, the author have dedicated all copyright
+// and related and neighboring rights to this software to the public domain
+// worldwide. This software is distributed without any warranty.
+// http://creativecommons.org/publicdomain/zero/1.0/
+//
+// Package siphash implements SipHash-2-4, a fast short-input PRF
+// created by Jean-Philippe Aumasson and Daniel J. Bernstein.
diff --git a/vendor/github.com/AndreasBriese/bbloom/README.md b/vendor/github.com/AndreasBriese/bbloom/README.md
new file mode 100644
index 000000000..d7413c33f
--- /dev/null
+++ b/vendor/github.com/AndreasBriese/bbloom/README.md
@@ -0,0 +1,131 @@
+## bbloom: a bitset Bloom filter for go/golang
+===
+
+[](http://travis-ci.org/AndreasBriese/bbloom)
+
+package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter.
+
+NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom
+
+===
+
+changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache.
+
+This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html".
+Nonetheless bbloom should work with any other form of entries.
+
+~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~
+
+Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash )
+
+Minimum hashset size is: 512 ([4]uint64; will be set automatically).
+
+###install
+
+```sh
+go get github.com/AndreasBriese/bbloom
+```
+
+###test
++ change to folder ../bbloom
++ create wordlist in file "words.txt" (you might use `python permut.py`)
++ run 'go test -bench=.' within the folder
+
+```go
+go test -bench=.
+```
+
+~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~
+
+using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively)
+
+### usage
+
+after installation add
+
+```go
+import (
+ ...
+ "github.com/AndreasBriese/bbloom"
+ ...
+ )
+```
+
+at your header. In the program use
+
+```go
+// create a bloom filter for 65536 items and 1 % wrong-positive ratio
+bf := bbloom.New(float64(1<<16), float64(0.01))
+
+// or
+// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly
+// bf = bbloom.New(float64(650000), float64(7))
+// or
+bf = bbloom.New(650000.0, 7.0)
+
+// add one item
+bf.Add([]byte("butter"))
+
+// Number of elements added is exposed now
+// Note: ElemNum will not be included in JSON export (for compatability to older version)
+nOfElementsInFilter := bf.ElemNum
+
+// check if item is in the filter
+isIn := bf.Has([]byte("butter")) // should be true
+isNotIn := bf.Has([]byte("Butter")) // should be false
+
+// 'add only if item is new' to the bloomfilter
+added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set
+added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new
+
+// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS
+// add one item
+bf.AddTS([]byte("peanutbutter"))
+// check if item is in the filter
+isIn = bf.HasTS([]byte("peanutbutter")) // should be true
+isNotIn = bf.HasTS([]byte("peanutButter")) // should be false
+// 'add only if item is new' to the bloomfilter
+added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set
+added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new
+
+// convert to JSON ([]byte)
+Json := bf.JSONMarshal()
+
+// bloomfilters Mutex is exposed for external un-/locking
+// i.e. mutex lock while doing JSON conversion
+bf.Mtx.Lock()
+Json = bf.JSONMarshal()
+bf.Mtx.Unlock()
+
+// restore a bloom filter from storage
+bfNew := bbloom.JSONUnmarshal(Json)
+
+isInNew := bfNew.Has([]byte("butter")) // should be true
+isNotInNew := bfNew.Has([]byte("Butter")) // should be false
+
+```
+
+to work with the bloom filter.
+
+### why 'fast'?
+
+It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint:
+
+
+ Bloom filter (filter size 524288, 7 hashlocs)
+ github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op)
+ github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op)
+ github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op)
+ github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op)
+
+ github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op)
+ github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op)
+ github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op)
+ github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op)
+ github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op)
+ github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op)
+
+(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz)
+
+
+With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions.
diff --git a/vendor/github.com/AndreasBriese/bbloom/bbloom.go b/vendor/github.com/AndreasBriese/bbloom/bbloom.go
new file mode 100644
index 000000000..3d4574066
--- /dev/null
+++ b/vendor/github.com/AndreasBriese/bbloom/bbloom.go
@@ -0,0 +1,270 @@
+// The MIT License (MIT)
+// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
+
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package bbloom
+
+import (
+ "bytes"
+ "encoding/json"
+ "log"
+ "math"
+ "sync"
+ "unsafe"
+)
+
+// helper
+var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128}
+
+func getSize(ui64 uint64) (size uint64, exponent uint64) {
+ if ui64 < uint64(512) {
+ ui64 = uint64(512)
+ }
+ size = uint64(1)
+ for size < ui64 {
+ size <<= 1
+ exponent++
+ }
+ return size, exponent
+}
+
+func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) {
+ size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2)
+ locs := math.Ceil(float64(0.69314718056) * size / numEntries)
+ return uint64(size), uint64(locs)
+}
+
+// New
+// returns a new bloomfilter
+func New(params ...float64) (bloomfilter Bloom) {
+ var entries, locs uint64
+ if len(params) == 2 {
+ if params[1] < 1 {
+ entries, locs = calcSizeByWrongPositives(params[0], params[1])
+ } else {
+ entries, locs = uint64(params[0]), uint64(params[1])
+ }
+ } else {
+ log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))")
+ }
+ size, exponent := getSize(uint64(entries))
+ bloomfilter = Bloom{
+ sizeExp: exponent,
+ size: size - 1,
+ setLocs: locs,
+ shift: 64 - exponent,
+ }
+ bloomfilter.Size(size)
+ return bloomfilter
+}
+
+// NewWithBoolset
+// takes a []byte slice and number of locs per entry
+// returns the bloomfilter with a bitset populated according to the input []byte
+func NewWithBoolset(bs *[]byte, locs uint64) (bloomfilter Bloom) {
+ bloomfilter = New(float64(len(*bs)<<3), float64(locs))
+ ptr := uintptr(unsafe.Pointer(&bloomfilter.bitset[0]))
+ for _, b := range *bs {
+ *(*uint8)(unsafe.Pointer(ptr)) = b
+ ptr++
+ }
+ return bloomfilter
+}
+
+// bloomJSONImExport
+// Im/Export structure used by JSONMarshal / JSONUnmarshal
+type bloomJSONImExport struct {
+ FilterSet []byte
+ SetLocs uint64
+}
+
+// JSONUnmarshal
+// takes JSON-Object (type bloomJSONImExport) as []bytes
+// returns bloom32 / bloom64 object
+func JSONUnmarshal(dbData []byte) Bloom {
+ bloomImEx := bloomJSONImExport{}
+ json.Unmarshal(dbData, &bloomImEx)
+ buf := bytes.NewBuffer(bloomImEx.FilterSet)
+ bs := buf.Bytes()
+ bf := NewWithBoolset(&bs, bloomImEx.SetLocs)
+ return bf
+}
+
+//
+// Bloom filter
+type Bloom struct {
+ Mtx sync.Mutex
+ ElemNum uint64
+ bitset []uint64
+ sizeExp uint64
+ size uint64
+ setLocs uint64
+ shift uint64
+}
+
+// <--- http://www.cse.yorku.ca/~oz/hash.html
+// modified Berkeley DB Hash (32bit)
+// hash is casted to l, h = 16bit fragments
+// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) {
+// hash := uint64(len(*b))
+// for _, c := range *b {
+// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash
+// }
+// h = hash >> bl.shift
+// l = hash << bl.shift >> bl.shift
+// return l, h
+// }
+
+// Update: found sipHash of Jean-Philippe Aumasson & Daniel J. Bernstein to be even faster than absdbm()
+// https://131002.net/siphash/
+// siphash was implemented for Go by Dmitry Chestnykh https://github.com/dchest/siphash
+
+// Add
+// set the bit(s) for entry; Adds an entry to the Bloom filter
+func (bl *Bloom) Add(entry []byte) {
+ l, h := bl.sipHash(entry)
+ for i := uint64(0); i < (*bl).setLocs; i++ {
+ (*bl).Set((h + i*l) & (*bl).size)
+ (*bl).ElemNum++
+ }
+}
+
+// AddTS
+// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
+func (bl *Bloom) AddTS(entry []byte) {
+ bl.Mtx.Lock()
+ defer bl.Mtx.Unlock()
+ bl.Add(entry[:])
+}
+
+// Has
+// check if bit(s) for entry is/are set
+// returns true if the entry was added to the Bloom Filter
+func (bl Bloom) Has(entry []byte) bool {
+ l, h := bl.sipHash(entry)
+ for i := uint64(0); i < bl.setLocs; i++ {
+ switch bl.IsSet((h + i*l) & bl.size) {
+ case false:
+ return false
+ }
+ }
+ return true
+}
+
+// HasTS
+// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
+func (bl *Bloom) HasTS(entry []byte) bool {
+ bl.Mtx.Lock()
+ defer bl.Mtx.Unlock()
+ return bl.Has(entry[:])
+}
+
+// AddIfNotHas
+// Only Add entry if it's not present in the bloomfilter
+// returns true if entry was added
+// returns false if entry was allready registered in the bloomfilter
+func (bl Bloom) AddIfNotHas(entry []byte) (added bool) {
+ if bl.Has(entry[:]) {
+ return added
+ }
+ bl.Add(entry[:])
+ return true
+}
+
+// AddIfNotHasTS
+// Tread safe: Only Add entry if it's not present in the bloomfilter
+// returns true if entry was added
+// returns false if entry was allready registered in the bloomfilter
+func (bl *Bloom) AddIfNotHasTS(entry []byte) (added bool) {
+ bl.Mtx.Lock()
+ defer bl.Mtx.Unlock()
+ return bl.AddIfNotHas(entry[:])
+}
+
+// Size
+// make Bloom filter with as bitset of size sz
+func (bl *Bloom) Size(sz uint64) {
+ (*bl).bitset = make([]uint64, sz>>6)
+}
+
+// Clear
+// resets the Bloom filter
+func (bl *Bloom) Clear() {
+ for i, _ := range (*bl).bitset {
+ (*bl).bitset[i] = 0
+ }
+}
+
+// Set
+// set the bit[idx] of bitsit
+func (bl *Bloom) Set(idx uint64) {
+ ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
+ *(*uint8)(ptr) |= mask[idx%8]
+}
+
+// IsSet
+// check if bit[idx] of bitset is set
+// returns true/false
+func (bl *Bloom) IsSet(idx uint64) bool {
+ ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
+ r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1
+ return r == 1
+}
+
+// JSONMarshal
+// returns JSON-object (type bloomJSONImExport) as []byte
+func (bl Bloom) JSONMarshal() []byte {
+ bloomImEx := bloomJSONImExport{}
+ bloomImEx.SetLocs = uint64(bl.setLocs)
+ bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3)
+ ptr := uintptr(unsafe.Pointer(&bl.bitset[0]))
+ for i := range bloomImEx.FilterSet {
+ bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(ptr))
+ ptr++
+ }
+ data, err := json.Marshal(bloomImEx)
+ if err != nil {
+ log.Fatal("json.Marshal failed: ", err)
+ }
+ return data
+}
+
+// // alternative hashFn
+// func (bl Bloom) fnv64a(b *[]byte) (l, h uint64) {
+// h64 := fnv.New64a()
+// h64.Write(*b)
+// hash := h64.Sum64()
+// h = hash >> 32
+// l = hash << 32 >> 32
+// return l, h
+// }
+//
+// // <-- http://partow.net/programming/hashfunctions/index.html
+// // citation: An algorithm proposed by Donald E. Knuth in The Art Of Computer Programming Volume 3,
+// // under the topic of sorting and search chapter 6.4.
+// // modified to fit with boolset-length
+// func (bl Bloom) DEKHash(b *[]byte) (l, h uint64) {
+// hash := uint64(len(*b))
+// for _, c := range *b {
+// hash = ((hash << 5) ^ (hash >> bl.shift)) ^ uint64(c)
+// }
+// h = hash >> bl.shift
+// l = hash << bl.sizeExp >> bl.sizeExp
+// return l, h
+// }
diff --git a/vendor/github.com/AndreasBriese/bbloom/sipHash.go b/vendor/github.com/AndreasBriese/bbloom/sipHash.go
new file mode 100644
index 000000000..a91d8199b
--- /dev/null
+++ b/vendor/github.com/AndreasBriese/bbloom/sipHash.go
@@ -0,0 +1,225 @@
+// Written in 2012 by Dmitry Chestnykh.
+//
+// To the extent possible under law, the author have dedicated all copyright
+// and related and neighboring rights to this software to the public domain
+// worldwide. This software is distributed without any warranty.
+// http://creativecommons.org/publicdomain/zero/1.0/
+//
+// Package siphash implements SipHash-2-4, a fast short-input PRF
+// created by Jean-Philippe Aumasson and Daniel J. Bernstein.
+
+package bbloom
+
+// Hash returns the 64-bit SipHash-2-4 of the given byte slice with two 64-bit
+// parts of 128-bit key: k0 and k1.
+func (bl Bloom) sipHash(p []byte) (l, h uint64) {
+ // Initialization.
+ v0 := uint64(8317987320269560794) // k0 ^ 0x736f6d6570736575
+ v1 := uint64(7237128889637516672) // k1 ^ 0x646f72616e646f6d
+ v2 := uint64(7816392314733513934) // k0 ^ 0x6c7967656e657261
+ v3 := uint64(8387220255325274014) // k1 ^ 0x7465646279746573
+ t := uint64(len(p)) << 56
+
+ // Compression.
+ for len(p) >= 8 {
+
+ m := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 |
+ uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
+
+ v3 ^= m
+
+ // Round 1.
+ v0 += v1
+ v1 = v1<<13 | v1>>51
+ v1 ^= v0
+ v0 = v0<<32 | v0>>32
+
+ v2 += v3
+ v3 = v3<<16 | v3>>48
+ v3 ^= v2
+
+ v0 += v3
+ v3 = v3<<21 | v3>>43
+ v3 ^= v0
+
+ v2 += v1
+ v1 = v1<<17 | v1>>47
+ v1 ^= v2
+ v2 = v2<<32 | v2>>32
+
+ // Round 2.
+ v0 += v1
+ v1 = v1<<13 | v1>>51
+ v1 ^= v0
+ v0 = v0<<32 | v0>>32
+
+ v2 += v3
+ v3 = v3<<16 | v3>>48
+ v3 ^= v2
+
+ v0 += v3
+ v3 = v3<<21 | v3>>43
+ v3 ^= v0
+
+ v2 += v1
+ v1 = v1<<17 | v1>>47
+ v1 ^= v2
+ v2 = v2<<32 | v2>>32
+
+ v0 ^= m
+ p = p[8:]
+ }
+
+ // Compress last block.
+ switch len(p) {
+ case 7:
+ t |= uint64(p[6]) << 48
+ fallthrough
+ case 6:
+ t |= uint64(p[5]) << 40
+ fallthrough
+ case 5:
+ t |= uint64(p[4]) << 32
+ fallthrough
+ case 4:
+ t |= uint64(p[3]) << 24
+ fallthrough
+ case 3:
+ t |= uint64(p[2]) << 16
+ fallthrough
+ case 2:
+ t |= uint64(p[1]) << 8
+ fallthrough
+ case 1:
+ t |= uint64(p[0])
+ }
+
+ v3 ^= t
+
+ // Round 1.
+ v0 += v1
+ v1 = v1<<13 | v1>>51
+ v1 ^= v0
+ v0 = v0<<32 | v0>>32
+
+ v2 += v3
+ v3 = v3<<16 | v3>>48
+ v3 ^= v2
+
+ v0 += v3
+ v3 = v3<<21 | v3>>43
+ v3 ^= v0
+
+ v2 += v1
+ v1 = v1<<17 | v1>>47
+ v1 ^= v2
+ v2 = v2<<32 | v2>>32
+
+ // Round 2.
+ v0 += v1
+ v1 = v1<<13 | v1>>51
+ v1 ^= v0
+ v0 = v0<<32 | v0>>32
+
+ v2 += v3
+ v3 = v3<<16 | v3>>48
+ v3 ^= v2
+
+ v0 += v3
+ v3 = v3<<21 | v3>>43
+ v3 ^= v0
+
+ v2 += v1
+ v1 = v1<<17 | v1>>47
+ v1 ^= v2
+ v2 = v2<<32 | v2>>32
+
+ v0 ^= t
+
+ // Finalization.
+ v2 ^= 0xff
+
+ // Round 1.
+ v0 += v1
+ v1 = v1<<13 | v1>>51
+ v1 ^= v0
+ v0 = v0<<32 | v0>>32
+
+ v2 += v3
+ v3 = v3<<16 | v3>>48
+ v3 ^= v2
+
+ v0 += v3
+ v3 = v3<<21 | v3>>43
+ v3 ^= v0
+
+ v2 += v1
+ v1 = v1<<17 | v1>>47
+ v1 ^= v2
+ v2 = v2<<32 | v2>>32
+
+ // Round 2.
+ v0 += v1
+ v1 = v1<<13 | v1>>51
+ v1 ^= v0
+ v0 = v0<<32 | v0>>32
+
+ v2 += v3
+ v3 = v3<<16 | v3>>48
+ v3 ^= v2
+
+ v0 += v3
+ v3 = v3<<21 | v3>>43
+ v3 ^= v0
+
+ v2 += v1
+ v1 = v1<<17 | v1>>47
+ v1 ^= v2
+ v2 = v2<<32 | v2>>32
+
+ // Round 3.
+ v0 += v1
+ v1 = v1<<13 | v1>>51
+ v1 ^= v0
+ v0 = v0<<32 | v0>>32
+
+ v2 += v3
+ v3 = v3<<16 | v3>>48
+ v3 ^= v2
+
+ v0 += v3
+ v3 = v3<<21 | v3>>43
+ v3 ^= v0
+
+ v2 += v1
+ v1 = v1<<17 | v1>>47
+ v1 ^= v2
+ v2 = v2<<32 | v2>>32
+
+ // Round 4.
+ v0 += v1
+ v1 = v1<<13 | v1>>51
+ v1 ^= v0
+ v0 = v0<<32 | v0>>32
+
+ v2 += v3
+ v3 = v3<<16 | v3>>48
+ v3 ^= v2
+
+ v0 += v3
+ v3 = v3<<21 | v3>>43
+ v3 ^= v0
+
+ v2 += v1
+ v1 = v1<<17 | v1>>47
+ v1 ^= v2
+ v2 = v2<<32 | v2>>32
+
+ // return v0 ^ v1 ^ v2 ^ v3
+
+ hash := v0 ^ v1 ^ v2 ^ v3
+ h = hash >> bl.shift
+ l = hash << bl.shift >> bl.shift
+ return l, h
+
+}
diff --git a/vendor/github.com/AndreasBriese/bbloom/words.txt b/vendor/github.com/AndreasBriese/bbloom/words.txt
new file mode 100644
index 000000000..ad86a31ac
--- /dev/null
+++ b/vendor/github.com/AndreasBriese/bbloom/words.txt
@@ -0,0 +1,140 @@
+2014/01/01 00:00:00 /info.html
+2014/01/01 00:00:00 /info.html
+2014/01/01 00:00:01 /info.html
+2014/01/01 00:00:02 /info.html
+2014/01/01 00:00:03 /info.html
+2014/01/01 00:00:04 /info.html
+2014/01/01 00:00:05 /info.html
+2014/01/01 00:00:06 /info.html
+2014/01/01 00:00:07 /info.html
+2014/01/01 00:00:08 /info.html
+2014/01/01 00:00:09 /info.html
+2014/01/01 00:00:10 /info.html
+2014/01/01 00:00:11 /info.html
+2014/01/01 00:00:12 /info.html
+2014/01/01 00:00:13 /info.html
+2014/01/01 00:00:14 /info.html
+2014/01/01 00:00:15 /info.html
+2014/01/01 00:00:16 /info.html
+2014/01/01 00:00:17 /info.html
+2014/01/01 00:00:18 /info.html
+2014/01/01 00:00:19 /info.html
+2014/01/01 00:00:20 /info.html
+2014/01/01 00:00:21 /info.html
+2014/01/01 00:00:22 /info.html
+2014/01/01 00:00:23 /info.html
+2014/01/01 00:00:24 /info.html
+2014/01/01 00:00:25 /info.html
+2014/01/01 00:00:26 /info.html
+2014/01/01 00:00:27 /info.html
+2014/01/01 00:00:28 /info.html
+2014/01/01 00:00:29 /info.html
+2014/01/01 00:00:30 /info.html
+2014/01/01 00:00:31 /info.html
+2014/01/01 00:00:32 /info.html
+2014/01/01 00:00:33 /info.html
+2014/01/01 00:00:34 /info.html
+2014/01/01 00:00:35 /info.html
+2014/01/01 00:00:36 /info.html
+2014/01/01 00:00:37 /info.html
+2014/01/01 00:00:38 /info.html
+2014/01/01 00:00:39 /info.html
+2014/01/01 00:00:40 /info.html
+2014/01/01 00:00:41 /info.html
+2014/01/01 00:00:42 /info.html
+2014/01/01 00:00:43 /info.html
+2014/01/01 00:00:44 /info.html
+2014/01/01 00:00:45 /info.html
+2014/01/01 00:00:46 /info.html
+2014/01/01 00:00:47 /info.html
+2014/01/01 00:00:48 /info.html
+2014/01/01 00:00:49 /info.html
+2014/01/01 00:00:50 /info.html
+2014/01/01 00:00:51 /info.html
+2014/01/01 00:00:52 /info.html
+2014/01/01 00:00:53 /info.html
+2014/01/01 00:00:54 /info.html
+2014/01/01 00:00:55 /info.html
+2014/01/01 00:00:56 /info.html
+2014/01/01 00:00:57 /info.html
+2014/01/01 00:00:58 /info.html
+2014/01/01 00:00:59 /info.html
+2014/01/01 00:01:00 /info.html
+2014/01/01 00:01:01 /info.html
+2014/01/01 00:01:02 /info.html
+2014/01/01 00:01:03 /info.html
+2014/01/01 00:01:04 /info.html
+2014/01/01 00:01:05 /info.html
+2014/01/01 00:01:06 /info.html
+2014/01/01 00:01:07 /info.html
+2014/01/01 00:01:08 /info.html
+2014/01/01 00:01:09 /info.html
+2014/01/01 00:01:10 /info.html
+2014/01/01 00:01:11 /info.html
+2014/01/01 00:01:12 /info.html
+2014/01/01 00:01:13 /info.html
+2014/01/01 00:01:14 /info.html
+2014/01/01 00:01:15 /info.html
+2014/01/01 00:01:16 /info.html
+2014/01/01 00:01:17 /info.html
+2014/01/01 00:01:18 /info.html
+2014/01/01 00:01:19 /info.html
+2014/01/01 00:01:20 /info.html
+2014/01/01 00:01:21 /info.html
+2014/01/01 00:01:22 /info.html
+2014/01/01 00:01:23 /info.html
+2014/01/01 00:01:24 /info.html
+2014/01/01 00:01:25 /info.html
+2014/01/01 00:01:26 /info.html
+2014/01/01 00:01:27 /info.html
+2014/01/01 00:01:28 /info.html
+2014/01/01 00:01:29 /info.html
+2014/01/01 00:01:30 /info.html
+2014/01/01 00:01:31 /info.html
+2014/01/01 00:01:32 /info.html
+2014/01/01 00:01:33 /info.html
+2014/01/01 00:01:34 /info.html
+2014/01/01 00:01:35 /info.html
+2014/01/01 00:01:36 /info.html
+2014/01/01 00:01:37 /info.html
+2014/01/01 00:01:38 /info.html
+2014/01/01 00:01:39 /info.html
+2014/01/01 00:01:40 /info.html
+2014/01/01 00:01:41 /info.html
+2014/01/01 00:01:42 /info.html
+2014/01/01 00:01:43 /info.html
+2014/01/01 00:01:44 /info.html
+2014/01/01 00:01:45 /info.html
+2014/01/01 00:01:46 /info.html
+2014/01/01 00:01:47 /info.html
+2014/01/01 00:01:48 /info.html
+2014/01/01 00:01:49 /info.html
+2014/01/01 00:01:50 /info.html
+2014/01/01 00:01:51 /info.html
+2014/01/01 00:01:52 /info.html
+2014/01/01 00:01:53 /info.html
+2014/01/01 00:01:54 /info.html
+2014/01/01 00:01:55 /info.html
+2014/01/01 00:01:56 /info.html
+2014/01/01 00:01:57 /info.html
+2014/01/01 00:01:58 /info.html
+2014/01/01 00:01:59 /info.html
+2014/01/01 00:02:00 /info.html
+2014/01/01 00:02:01 /info.html
+2014/01/01 00:02:02 /info.html
+2014/01/01 00:02:03 /info.html
+2014/01/01 00:02:04 /info.html
+2014/01/01 00:02:05 /info.html
+2014/01/01 00:02:06 /info.html
+2014/01/01 00:02:07 /info.html
+2014/01/01 00:02:08 /info.html
+2014/01/01 00:02:09 /info.html
+2014/01/01 00:02:10 /info.html
+2014/01/01 00:02:11 /info.html
+2014/01/01 00:02:12 /info.html
+2014/01/01 00:02:13 /info.html
+2014/01/01 00:02:14 /info.html
+2014/01/01 00:02:15 /info.html
+2014/01/01 00:02:16 /info.html
+2014/01/01 00:02:17 /info.html
+2014/01/01 00:02:18 /info.html
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
new file mode 100644
index 000000000..0cd380037
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/.gitignore
@@ -0,0 +1,5 @@
+TAGS
+tags
+.*.swp
+tomlcheck/tomlcheck
+toml.test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
new file mode 100644
index 000000000..8b8afc4f0
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go:
+ - 1.1
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - tip
+install:
+ - go install ./...
+ - go get github.com/BurntSushi/toml-test
+script:
+ - export PATH="$PATH:$HOME/gopath/bin"
+ - make test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
new file mode 100644
index 000000000..6efcfd0ce
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE
@@ -0,0 +1,3 @@
+Compatible with TOML version
+[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
+
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
new file mode 100644
index 000000000..01b574320
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COPYING
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 TOML authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
new file mode 100644
index 000000000..3600848d3
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/Makefile
@@ -0,0 +1,19 @@
+install:
+ go install ./...
+
+test: install
+ go test -v
+ toml-test toml-test-decoder
+ toml-test -encoder toml-test-encoder
+
+fmt:
+ gofmt -w *.go */*.go
+ colcheck *.go */*.go
+
+tags:
+ find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
+
+push:
+ git push origin master
+ git push github master
+
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
new file mode 100644
index 000000000..7c1b37ecc
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -0,0 +1,218 @@
+## TOML parser and encoder for Go with reflection
+
+TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
+reflection interface similar to Go's standard library `json` and `xml`
+packages. This package also supports the `encoding.TextUnmarshaler` and
+`encoding.TextMarshaler` interfaces so that you can define custom data
+representations. (There is an example of this below.)
+
+Spec: https://github.com/toml-lang/toml
+
+Compatible with TOML version
+[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
+
+Documentation: https://godoc.org/github.com/BurntSushi/toml
+
+Installation:
+
+```bash
+go get github.com/BurntSushi/toml
+```
+
+Try the toml validator:
+
+```bash
+go get github.com/BurntSushi/toml/cmd/tomlv
+tomlv some-toml-file.toml
+```
+
+[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
+
+### Testing
+
+This package passes all tests in
+[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
+and the encoder.
+
+### Examples
+
+This package works similarly to how the Go standard library handles `XML`
+and `JSON`. Namely, data is loaded into Go values via reflection.
+
+For the simplest example, consider some TOML file as just a list of keys
+and values:
+
+```toml
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
+```
+
+Which could be defined in Go as:
+
+```go
+type Config struct {
+ Age int
+ Cats []string
+ Pi float64
+ Perfection []int
+ DOB time.Time // requires `import time`
+}
+```
+
+And then decoded with:
+
+```go
+var conf Config
+if _, err := toml.Decode(tomlData, &conf); err != nil {
+ // handle error
+}
+```
+
+You can also use struct tags if your struct field name doesn't map to a TOML
+key value directly:
+
+```toml
+some_key_NAME = "wat"
+```
+
+```go
+type TOML struct {
+ ObscureKey string `toml:"some_key_NAME"`
+}
+```
+
+### Using the `encoding.TextUnmarshaler` interface
+
+Here's an example that automatically parses duration strings into
+`time.Duration` values:
+
+```toml
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+```
+
+Which can be decoded with:
+
+```go
+type song struct {
+ Name string
+ Duration duration
+}
+type songs struct {
+ Song []song
+}
+var favorites songs
+if _, err := toml.Decode(blob, &favorites); err != nil {
+ log.Fatal(err)
+}
+
+for _, s := range favorites.Song {
+ fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+}
+```
+
+And you'll also need a `duration` type that satisfies the
+`encoding.TextUnmarshaler` interface:
+
+```go
+type duration struct {
+ time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+ var err error
+ d.Duration, err = time.ParseDuration(string(text))
+ return err
+}
+```
+
+### More complex usage
+
+Here's an example of how to load the example from the official spec page:
+
+```toml
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
+```
+
+And the corresponding Go types are:
+
+```go
+type tomlConfig struct {
+ Title string
+ Owner ownerInfo
+ DB database `toml:"database"`
+ Servers map[string]server
+ Clients clients
+}
+
+type ownerInfo struct {
+ Name string
+ Org string `toml:"organization"`
+ Bio string
+ DOB time.Time
+}
+
+type database struct {
+ Server string
+ Ports []int
+ ConnMax int `toml:"connection_max"`
+ Enabled bool
+}
+
+type server struct {
+ IP string
+ DC string
+}
+
+type clients struct {
+ Data [][]interface{}
+ Hosts []string
+}
+```
+
+Note that a case insensitive match will be tried if an exact match can't be
+found.
+
+A working example of the above can be found in `_examples/example.{go,toml}`.
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
new file mode 100644
index 000000000..b0fd51d5b
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -0,0 +1,509 @@
+package toml
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "strings"
+ "time"
+)
+
+func e(format string, args ...interface{}) error {
+ return fmt.Errorf("toml: "+format, args...)
+}
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+ UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(p []byte, v interface{}) error {
+ _, err := Decode(string(p), v)
+ return err
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+// When using the various `Decode*` functions, the type `Primitive` may
+// be given to any value, and its decoding will be delayed.
+//
+// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+//
+// The underlying representation of a `Primitive` value is subject to change.
+// Do not rely on it.
+//
+// N.B. Primitive values are still parsed, so using them will only avoid
+// the overhead of reflection. They can be useful when you don't know the
+// exact type of TOML data until run time.
+type Primitive struct {
+ undecoded interface{}
+ context Key
+}
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md := MetaData{decoded: make(map[string]bool)}
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md.context = primValue.context
+ defer func() { md.context = nil }()
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Decode will decode the contents of `data` in TOML format into a pointer
+// `v`.
+//
+// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
+// used interchangeably.)
+//
+// TOML arrays of tables correspond to either a slice of structs or a slice
+// of maps.
+//
+// TOML datetimes correspond to Go `time.Time` values.
+//
+// All other TOML types (float, string, int, bool and array) correspond
+// to the obvious Go types.
+//
+// An exception to the above rules is if a type implements the
+// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
+// (floats, strings, integers, booleans and datetimes) will be converted to
+// a byte string and given to the value's UnmarshalText method. See the
+// Unmarshaler example for a demonstration with time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go
+// struct. The special `toml` struct tag may be used to map TOML keys to
+// struct fields that don't match the key name exactly. (See the example.)
+// A case insensitive match to struct names will be tried if an exact match
+// can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there
+// may exist TOML values that cannot be placed into your representation, and
+// there may be parts of your representation that do not correspond to
+// TOML values. This loose mapping can be made stricter by using the IsDefined
+// and/or Undecoded methods on the MetaData returned.
+//
+// This decoder will not handle cyclic types. If a cyclic type is passed,
+// `Decode` will not terminate.
+func Decode(data string, v interface{}) (MetaData, error) {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
+ }
+ if rv.IsNil() {
+ return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
+ }
+ p, err := parse(data)
+ if err != nil {
+ return MetaData{}, err
+ }
+ md := MetaData{
+ p.mapping, p.types, p.ordered,
+ make(map[string]bool, len(p.ordered)), nil,
+ }
+ return md, md.unify(p.mapping, indirect(rv))
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at `fpath` and decode it for you.
+func DecodeFile(fpath string, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadFile(fpath)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// DecodeReader is just like Decode, except it will consume all bytes
+// from the reader and decode it for you.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadAll(r)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+
+ // Special case. Look for a `Primitive` value.
+ if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+ // Save the undecoded data and the key context into the primitive
+ // value.
+ context := make(Key, len(md.context))
+ copy(context, md.context)
+ rv.Set(reflect.ValueOf(Primitive{
+ undecoded: data,
+ context: context,
+ }))
+ return nil
+ }
+
+ // Special case. Unmarshaler Interface support.
+ if rv.CanAddr() {
+ if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+ return v.UnmarshalTOML(data)
+ }
+ }
+
+ // Special case. Handle time.Time values specifically.
+ // TODO: Remove this code when we decide to drop support for Go 1.1.
+ // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
+ // interfaces.
+ if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
+ return md.unifyDatetime(data, rv)
+ }
+
+ // Special case. Look for a value satisfying the TextUnmarshaler interface.
+ if v, ok := rv.Interface().(TextUnmarshaler); ok {
+ return md.unifyText(data, v)
+ }
+ // BUG(burntsushi)
+ // The behavior here is incorrect whenever a Go type satisfies the
+ // encoding.TextUnmarshaler interface but also corresponds to a TOML
+ // hash or array. In particular, the unmarshaler should only be applied
+ // to primitive TOML values. But at this point, it will be applied to
+ // all kinds of values and produce an incorrect error whenever those values
+ // are hashes or arrays (including arrays of tables).
+
+ k := rv.Kind()
+
+ // laziness
+ if k >= reflect.Int && k <= reflect.Uint64 {
+ return md.unifyInt(data, rv)
+ }
+ switch k {
+ case reflect.Ptr:
+ elem := reflect.New(rv.Type().Elem())
+ err := md.unify(data, reflect.Indirect(elem))
+ if err != nil {
+ return err
+ }
+ rv.Set(elem)
+ return nil
+ case reflect.Struct:
+ return md.unifyStruct(data, rv)
+ case reflect.Map:
+ return md.unifyMap(data, rv)
+ case reflect.Array:
+ return md.unifyArray(data, rv)
+ case reflect.Slice:
+ return md.unifySlice(data, rv)
+ case reflect.String:
+ return md.unifyString(data, rv)
+ case reflect.Bool:
+ return md.unifyBool(data, rv)
+ case reflect.Interface:
+ // we only support empty interfaces.
+ if rv.NumMethod() > 0 {
+ return e("unsupported type %s", rv.Type())
+ }
+ return md.unifyAnything(data, rv)
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ return md.unifyFloat64(data, rv)
+ }
+ return e("unsupported type %s", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if mapping == nil {
+ return nil
+ }
+ return e("type mismatch for %s: expected table but found %T",
+ rv.Type().String(), mapping)
+ }
+
+ for key, datum := range tmap {
+ var f *field
+ fields := cachedTypeFields(rv.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if ff.name == key {
+ f = ff
+ break
+ }
+ if f == nil && strings.EqualFold(ff.name, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv := rv
+ for _, i := range f.index {
+ subv = indirect(subv.Field(i))
+ }
+ if isUnifiable(subv) {
+ md.decoded[md.context.add(key).String()] = true
+ md.context = append(md.context, key)
+ if err := md.unify(datum, subv); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+ } else if f.name != "" {
+ // Bad user! No soup for you!
+ return e("cannot write unexported field %s.%s",
+ rv.Type().String(), f.name)
+ }
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if tmap == nil {
+ return nil
+ }
+ return badtype("map", mapping)
+ }
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ for k, v := range tmap {
+ md.decoded[md.context.add(k).String()] = true
+ md.context = append(md.context, k)
+
+ rvkey := indirect(reflect.New(rv.Type().Key()))
+ rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+ if err := md.unify(v, rvval); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+
+ rvkey.SetString(k)
+ rv.SetMapIndex(rvkey, rvval)
+ }
+ return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ sliceLen := datav.Len()
+ if sliceLen != rv.Len() {
+ return e("expected array length %d; got TOML array of length %d",
+ rv.Len(), sliceLen)
+ }
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ n := datav.Len()
+ if rv.IsNil() || rv.Cap() < n {
+ rv.Set(reflect.MakeSlice(rv.Type(), n, n))
+ }
+ rv.SetLen(n)
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+ sliceLen := data.Len()
+ for i := 0; i < sliceLen; i++ {
+ v := data.Index(i).Interface()
+ sliceval := indirect(rv.Index(i))
+ if err := md.unify(v, sliceval); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
+ if _, ok := data.(time.Time); ok {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+ }
+ return badtype("time.Time", data)
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+ if s, ok := data.(string); ok {
+ rv.SetString(s)
+ return nil
+ }
+ return badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(float64); ok {
+ switch rv.Kind() {
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ rv.SetFloat(num)
+ default:
+ panic("bug")
+ }
+ return nil
+ }
+ return badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(int64); ok {
+ if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int64:
+ // No bounds checking necessary.
+ case reflect.Int8:
+ if num < math.MinInt8 || num > math.MaxInt8 {
+ return e("value %d is out of range for int8", num)
+ }
+ case reflect.Int16:
+ if num < math.MinInt16 || num > math.MaxInt16 {
+ return e("value %d is out of range for int16", num)
+ }
+ case reflect.Int32:
+ if num < math.MinInt32 || num > math.MaxInt32 {
+ return e("value %d is out of range for int32", num)
+ }
+ }
+ rv.SetInt(num)
+ } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+ unum := uint64(num)
+ switch rv.Kind() {
+ case reflect.Uint, reflect.Uint64:
+ // No bounds checking necessary.
+ case reflect.Uint8:
+ if num < 0 || unum > math.MaxUint8 {
+ return e("value %d is out of range for uint8", num)
+ }
+ case reflect.Uint16:
+ if num < 0 || unum > math.MaxUint16 {
+ return e("value %d is out of range for uint16", num)
+ }
+ case reflect.Uint32:
+ if num < 0 || unum > math.MaxUint32 {
+ return e("value %d is out of range for uint32", num)
+ }
+ }
+ rv.SetUint(unum)
+ } else {
+ panic("unreachable")
+ }
+ return nil
+ }
+ return badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+ if b, ok := data.(bool); ok {
+ rv.SetBool(b)
+ return nil
+ }
+ return badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+ var s string
+ switch sdata := data.(type) {
+ case TextMarshaler:
+ text, err := sdata.MarshalText()
+ if err != nil {
+ return err
+ }
+ s = string(text)
+ case fmt.Stringer:
+ s = sdata.String()
+ case string:
+ s = sdata
+ case bool:
+ s = fmt.Sprintf("%v", sdata)
+ case int64:
+ s = fmt.Sprintf("%d", sdata)
+ case float64:
+ s = fmt.Sprintf("%f", sdata)
+ default:
+ return badtype("primitive (string-like)", data)
+ }
+ if err := v.UnmarshalText([]byte(s)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+ return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+// Pointers are followed until the value is not a pointer.
+// New values are allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of
+// interest to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+ if v.Kind() != reflect.Ptr {
+ if v.CanSet() {
+ pv := v.Addr()
+ if _, ok := pv.Interface().(TextUnmarshaler); ok {
+ return pv
+ }
+ }
+ return v
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+ if rv.CanSet() {
+ return true
+ }
+ if _, ok := rv.Interface().(TextUnmarshaler); ok {
+ return true
+ }
+ return false
+}
+
+func badtype(expected string, data interface{}) error {
+ return e("cannot load TOML value of type %T into a Go %s", data, expected)
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
new file mode 100644
index 000000000..b9914a679
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_meta.go
@@ -0,0 +1,121 @@
+package toml
+
+import "strings"
+
+// MetaData allows access to meta information about TOML data that may not
+// be inferrable via reflection. In particular, whether a key has been defined
+// and the TOML type of a key.
+type MetaData struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ keys []Key
+ decoded map[string]bool
+ context Key // Used only during decoding.
+}
+
+// IsDefined returns true if the key given exists in the TOML data. The key
+// should be specified hierarchially. e.g.,
+//
+// // access the TOML key 'a.b.c'
+// IsDefined("a", "b", "c")
+//
+// IsDefined will return false if an empty key given. Keys are case sensitive.
+func (md *MetaData) IsDefined(key ...string) bool {
+ if len(key) == 0 {
+ return false
+ }
+
+ var hash map[string]interface{}
+ var ok bool
+ var hashOrVal interface{} = md.mapping
+ for _, k := range key {
+ if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+ return false
+ }
+ if hashOrVal, ok = hash[k]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that
+// does not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+ fullkey := strings.Join(key, ".")
+ if typ, ok := md.types[fullkey]; ok {
+ return typ.typeString()
+ }
+ return ""
+}
+
+// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
+// to get values of this type.
+type Key []string
+
+func (k Key) String() string {
+ return strings.Join(k, ".")
+}
+
+func (k Key) maybeQuotedAll() string {
+ var ss []string
+ for i := range k {
+ ss = append(ss, k.maybeQuoted(i))
+ }
+ return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+ quote := false
+ for _, c := range k[i] {
+ if !isBareKeyChar(c) {
+ quote = true
+ break
+ }
+ }
+ if quote {
+ return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
+ }
+ return k[i]
+}
+
+func (k Key) add(piece string) Key {
+ newKey := make(Key, len(k)+1)
+ copy(newKey, k)
+ newKey[len(k)] = piece
+ return newKey
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific.
+//
+// The list will have the same order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+ return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+ undecoded := make([]Key, 0, len(md.keys))
+ for _, key := range md.keys {
+ if !md.decoded[key.String()] {
+ undecoded = append(undecoded, key)
+ }
+ }
+ return undecoded
+}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
new file mode 100644
index 000000000..b371f396e
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -0,0 +1,27 @@
+/*
+Package toml provides facilities for decoding and encoding TOML configuration
+files via reflection. There is also support for delaying decoding with
+the Primitive type, and querying the set of keys in a TOML document with the
+MetaData type.
+
+The specification implemented: https://github.com/toml-lang/toml
+
+The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
+whether a file is a valid TOML document. It can also be used to print the
+type of each key in a TOML document.
+
+Testing
+
+There are two important types of tests used for this package. The first is
+contained inside '*_test.go' files and uses the standard Go unit testing
+framework. These tests are primarily devoted to holistically testing the
+decoder and encoder.
+
+The second type of testing is used to verify the implementation's adherence
+to the TOML specification. These tests have been factored into their own
+project: https://github.com/BurntSushi/toml-test
+
+The reason the tests are in a separate project is so that they can be used by
+any implementation of TOML. Namely, it is language agnostic.
+*/
+package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
new file mode 100644
index 000000000..d905c21a2
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -0,0 +1,568 @@
+package toml
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+ errArrayMixedElementTypes = errors.New(
+ "toml: cannot encode array with mixed element types")
+ errArrayNilElement = errors.New(
+ "toml: cannot encode array with nil element")
+ errNonString = errors.New(
+ "toml: cannot encode a map with non-string key type")
+ errAnonNonStruct = errors.New(
+ "toml: cannot encode an anonymous field that is not a struct")
+ errArrayNoTable = errors.New(
+ "toml: TOML array element cannot contain a table")
+ errNoKey = errors.New(
+ "toml: top-level values must be Go maps or structs")
+ errAnything = errors.New("") // used in testing
+)
+
+var quotedReplacer = strings.NewReplacer(
+ "\t", "\\t",
+ "\n", "\\n",
+ "\r", "\\r",
+ "\"", "\\\"",
+ "\\", "\\\\",
+)
+
+// Encoder controls the encoding of Go values to a TOML document to some
+// io.Writer.
+//
+// The indentation level can be controlled with the Indent field.
+type Encoder struct {
+ // A single indentation level. By default it is two spaces.
+ Indent string
+
+ // hasWritten is whether we have written any output to w yet.
+ hasWritten bool
+ w *bufio.Writer
+}
+
+// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
+// given. By default, a single indentation level is 2 spaces.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: bufio.NewWriter(w),
+ Indent: " ",
+ }
+}
+
+// Encode writes a TOML representation of the Go value to the underlying
+// io.Writer. If the value given cannot be encoded to a valid TOML document,
+// then an error is returned.
+//
+// The mapping between Go values and TOML values should be precisely the same
+// as for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. (If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.)
+//
+// When encoding TOML hashes (i.e., Go maps or structs), keys without any
+// sub-hashes are encoded first.
+//
+// If a Go map is encoded, then its keys are sorted alphabetically for
+// deterministic output. More control over this behavior may be provided if
+// there is demand for it.
+//
+// Encoding Go values without a corresponding TOML representation---like map
+// types with non-string keys---will cause an error to be returned. Similarly
+// for mixed arrays/slices, arrays/slices with nil elements, embedded
+// non-struct types and nested slices containing maps or structs.
+// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
+// and so is []map[string][]string.)
+func (enc *Encoder) Encode(v interface{}) error {
+ rv := eindirect(reflect.ValueOf(v))
+ if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ return err
+ }
+ return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if terr, ok := r.(tomlEncodeError); ok {
+ err = terr.error
+ return
+ }
+ panic(r)
+ }
+ }()
+ enc.encode(key, rv)
+ return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+ // Special case. Time needs to be in ISO8601 format.
+ // Special case. If we can marshal the type to text, then we used that.
+ // Basically, this prevents the encoder for handling these types as
+ // generic structs (or whatever the underlying type of a TextMarshaler is).
+ switch rv.Interface().(type) {
+ case time.Time, TextMarshaler:
+ enc.keyEqElement(key, rv)
+ return
+ }
+
+ k := rv.Kind()
+ switch k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64,
+ reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+ enc.keyEqElement(key, rv)
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+ enc.eArrayOfTables(key, rv)
+ } else {
+ enc.keyEqElement(key, rv)
+ }
+ case reflect.Interface:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Map:
+ if rv.IsNil() {
+ return
+ }
+ enc.eTable(key, rv)
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Struct:
+ enc.eTable(key, rv)
+ default:
+ panic(e("unsupported type for key '%s': %s", key, k))
+ }
+}
+
+// eElement encodes any value that can be an array element (primitives and
+// arrays).
+func (enc *Encoder) eElement(rv reflect.Value) {
+ switch v := rv.Interface().(type) {
+ case time.Time:
+ // Special case time.Time as a primitive. Has to come before
+ // TextMarshaler below because time.Time implements
+ // encoding.TextMarshaler, but we need to always use UTC.
+ enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
+ return
+ case TextMarshaler:
+ // Special case. Use text marshaler if it's available for this value.
+ if s, err := v.MarshalText(); err != nil {
+ encPanic(err)
+ } else {
+ enc.writeQuoted(string(s))
+ }
+ return
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ enc.wf(strconv.FormatBool(rv.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64:
+ enc.wf(strconv.FormatInt(rv.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16,
+ reflect.Uint32, reflect.Uint64:
+ enc.wf(strconv.FormatUint(rv.Uint(), 10))
+ case reflect.Float32:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+ case reflect.Float64:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+ case reflect.Array, reflect.Slice:
+ enc.eArrayOrSliceElement(rv)
+ case reflect.Interface:
+ enc.eElement(rv.Elem())
+ case reflect.String:
+ enc.writeQuoted(rv.String())
+ default:
+ panic(e("unexpected primitive type: %s", rv.Kind()))
+ }
+}
+
+// By the TOML spec, all floats must have a decimal with at least one
+// number on either side.
+func floatAddDecimal(fstr string) string {
+ if !strings.Contains(fstr, ".") {
+ return fstr + ".0"
+ }
+ return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+ enc.wf("\"%s\"", quotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+ length := rv.Len()
+ enc.wf("[")
+ for i := 0; i < length; i++ {
+ elem := rv.Index(i)
+ enc.eElement(elem)
+ if i != length-1 {
+ enc.wf(", ")
+ }
+ }
+ enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ for i := 0; i < rv.Len(); i++ {
+ trv := rv.Index(i)
+ if isNil(trv) {
+ continue
+ }
+ panicIfInvalidKey(key)
+ enc.newline()
+ enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ enc.eMapOrStruct(key, trv)
+ }
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+ panicIfInvalidKey(key)
+ if len(key) == 1 {
+ // Output an extra newline between top-level tables.
+ // (The newline isn't written if nothing else has been written though.)
+ enc.newline()
+ }
+ if len(key) > 0 {
+ enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ }
+ enc.eMapOrStruct(key, rv)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
+ switch rv := eindirect(rv); rv.Kind() {
+ case reflect.Map:
+ enc.eMap(key, rv)
+ case reflect.Struct:
+ enc.eStruct(key, rv)
+ default:
+ panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+ }
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+ rt := rv.Type()
+ if rt.Key().Kind() != reflect.String {
+ encPanic(errNonString)
+ }
+
+ // Sort keys so that we have deterministic output. And write keys directly
+ // underneath this key first, before writing sub-structs or sub-maps.
+ var mapKeysDirect, mapKeysSub []string
+ for _, mapKey := range rv.MapKeys() {
+ k := mapKey.String()
+ if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+ mapKeysSub = append(mapKeysSub, k)
+ } else {
+ mapKeysDirect = append(mapKeysDirect, k)
+ }
+ }
+
+ var writeMapKeys = func(mapKeys []string) {
+ sort.Strings(mapKeys)
+ for _, mapKey := range mapKeys {
+ mrv := rv.MapIndex(reflect.ValueOf(mapKey))
+ if isNil(mrv) {
+ // Don't write anything for nil fields.
+ continue
+ }
+ enc.encode(key.add(mapKey), mrv)
+ }
+ }
+ writeMapKeys(mapKeysDirect)
+ writeMapKeys(mapKeysSub)
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+ // Write keys for fields directly under this key first, because if we write
+ // a field that creates a new table, then all keys under it will be in that
+ // table (not the one we're writing here).
+ rt := rv.Type()
+ var fieldsDirect, fieldsSub [][]int
+ var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ // skip unexported fields
+ if f.PkgPath != "" && !f.Anonymous {
+ continue
+ }
+ frv := rv.Field(i)
+ if f.Anonymous {
+ t := f.Type
+ switch t.Kind() {
+ case reflect.Struct:
+ // Treat anonymous struct fields with
+ // tag names as though they are not
+ // anonymous, like encoding/json does.
+ if getOptions(f.Tag).name == "" {
+ addFields(t, frv, f.Index)
+ continue
+ }
+ case reflect.Ptr:
+ if t.Elem().Kind() == reflect.Struct &&
+ getOptions(f.Tag).name == "" {
+ if !frv.IsNil() {
+ addFields(t.Elem(), frv.Elem(), f.Index)
+ }
+ continue
+ }
+ // Fall through to the normal field encoding logic below
+ // for non-struct anonymous fields.
+ }
+ }
+
+ if typeIsHash(tomlTypeOfGo(frv)) {
+ fieldsSub = append(fieldsSub, append(start, f.Index...))
+ } else {
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+ }
+ }
+ }
+ addFields(rt, rv, nil)
+
+ var writeFields = func(fields [][]int) {
+ for _, fieldIndex := range fields {
+ sft := rt.FieldByIndex(fieldIndex)
+ sf := rv.FieldByIndex(fieldIndex)
+ if isNil(sf) {
+ // Don't write anything for nil fields.
+ continue
+ }
+
+ opts := getOptions(sft.Tag)
+ if opts.skip {
+ continue
+ }
+ keyName := sft.Name
+ if opts.name != "" {
+ keyName = opts.name
+ }
+ if opts.omitempty && isEmpty(sf) {
+ continue
+ }
+ if opts.omitzero && isZero(sf) {
+ continue
+ }
+
+ enc.encode(key.add(keyName), sf)
+ }
+ }
+ writeFields(fieldsDirect)
+ writeFields(fieldsSub)
+}
+
+// tomlTypeName returns the TOML type name of the Go value's type. It is
+// used to determine whether the types of array elements are mixed (which is
+// forbidden). If the Go value is nil, then it is illegal for it to be an array
+// element, and valueIsNil is returned as true.
+
+// Returns the TOML type of a Go value. The type may be `nil`, which means
+// no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() {
+ return nil
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ return tomlBool
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64:
+ return tomlInteger
+ case reflect.Float32, reflect.Float64:
+ return tomlFloat
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlHash, tomlArrayType(rv)) {
+ return tomlArrayHash
+ }
+ return tomlArray
+ case reflect.Ptr, reflect.Interface:
+ return tomlTypeOfGo(rv.Elem())
+ case reflect.String:
+ return tomlString
+ case reflect.Map:
+ return tomlHash
+ case reflect.Struct:
+ switch rv.Interface().(type) {
+ case time.Time:
+ return tomlDatetime
+ case TextMarshaler:
+ return tomlString
+ default:
+ return tomlHash
+ }
+ default:
+ panic("unexpected reflect.Kind: " + rv.Kind().String())
+ }
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+ return nil
+ }
+ firstType := tomlTypeOfGo(rv.Index(0))
+ if firstType == nil {
+ encPanic(errArrayNilElement)
+ }
+
+ rvlen := rv.Len()
+ for i := 1; i < rvlen; i++ {
+ elem := rv.Index(i)
+ switch elemType := tomlTypeOfGo(elem); {
+ case elemType == nil:
+ encPanic(errArrayNilElement)
+ case !typeEqual(firstType, elemType):
+ encPanic(errArrayMixedElementTypes)
+ }
+ }
+ // If we have a nested array, then we must make sure that the nested
+ // array contains ONLY primitives.
+ // This checks arbitrarily nested arrays.
+ if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
+ nest := tomlArrayType(eindirect(rv.Index(0)))
+ if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
+ encPanic(errArrayNoTable)
+ }
+ }
+ return firstType
+}
+
+type tagOptions struct {
+ skip bool // "-"
+ name string
+ omitempty bool
+ omitzero bool
+}
+
+func getOptions(tag reflect.StructTag) tagOptions {
+ t := tag.Get("toml")
+ if t == "-" {
+ return tagOptions{skip: true}
+ }
+ var opts tagOptions
+ parts := strings.Split(t, ",")
+ opts.name = parts[0]
+ for _, s := range parts[1:] {
+ switch s {
+ case "omitempty":
+ opts.omitempty = true
+ case "omitzero":
+ opts.omitzero = true
+ }
+ }
+ return opts
+}
+
+func isZero(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rv.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() == 0.0
+ }
+ return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return rv.Len() == 0
+ case reflect.Bool:
+ return !rv.Bool()
+ }
+ return false
+}
+
+func (enc *Encoder) newline() {
+ if enc.hasWritten {
+ enc.wf("\n")
+ }
+}
+
+func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ panicIfInvalidKey(key)
+ enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+ enc.eElement(val)
+ enc.newline()
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+ if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+ encPanic(err)
+ }
+ enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+ return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+ panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ return eindirect(v.Elem())
+ default:
+ return v
+ }
+}
+
+func isNil(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return rv.IsNil()
+ default:
+ return false
+ }
+}
+
+func panicIfInvalidKey(key Key) {
+ for _, k := range key {
+ if len(k) == 0 {
+ encPanic(e("Key '%s' is not a valid table name. Key names "+
+ "cannot be empty.", key.maybeQuotedAll()))
+ }
+ }
+}
+
+func isValidKeyName(s string) bool {
+ return len(s) != 0
+}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
new file mode 100644
index 000000000..d36e1dd60
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types.go
@@ -0,0 +1,19 @@
+// +build go1.2
+
+package toml
+
+// In order to support Go 1.1, we define our own TextMarshaler and
+// TextUnmarshaler types. For Go 1.2+, we just alias them with the
+// standard library interfaces.
+
+import (
+ "encoding"
+)
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler encoding.TextMarshaler
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
new file mode 100644
index 000000000..e8d503d04
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
@@ -0,0 +1,18 @@
+// +build !go1.2
+
+package toml
+
+// These interfaces were introduced in Go 1.2, so we add them manually when
+// compiling for Go 1.1.
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler interface {
+ MarshalText() (text []byte, err error)
+}
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler interface {
+ UnmarshalText(text []byte) error
+}
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
new file mode 100644
index 000000000..e0a742a88
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -0,0 +1,953 @@
+package toml
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type itemType int
+
+const (
+ itemError itemType = iota
+ itemNIL // used in the parser to indicate no type
+ itemEOF
+ itemText
+ itemString
+ itemRawString
+ itemMultilineString
+ itemRawMultilineString
+ itemBool
+ itemInteger
+ itemFloat
+ itemDatetime
+ itemArray // the start of an array
+ itemArrayEnd
+ itemTableStart
+ itemTableEnd
+ itemArrayTableStart
+ itemArrayTableEnd
+ itemKeyStart
+ itemCommentStart
+ itemInlineTableStart
+ itemInlineTableEnd
+)
+
+const (
+ eof = 0
+ comma = ','
+ tableStart = '['
+ tableEnd = ']'
+ arrayTableStart = '['
+ arrayTableEnd = ']'
+ tableSep = '.'
+ keySep = '='
+ arrayStart = '['
+ arrayEnd = ']'
+ commentStart = '#'
+ stringStart = '"'
+ stringEnd = '"'
+ rawStringStart = '\''
+ rawStringEnd = '\''
+ inlineTableStart = '{'
+ inlineTableEnd = '}'
+)
+
+type stateFn func(lx *lexer) stateFn
+
+type lexer struct {
+ input string
+ start int
+ pos int
+ line int
+ state stateFn
+ items chan item
+
+ // Allow for backing up up to three runes.
+ // This is necessary because TOML contains 3-rune tokens (""" and ''').
+ prevWidths [3]int
+ nprev int // how many of prevWidths are in use
+ // If we emit an eof, we can still back up, but it is not OK to call
+ // next again.
+ atEOF bool
+
+ // A stack of state functions used to maintain context.
+ // The idea is to reuse parts of the state machine in various places.
+ // For example, values can appear at the top level or within arbitrarily
+ // nested arrays. The last state on the stack is used after a value has
+ // been lexed. Similarly for comments.
+ stack []stateFn
+}
+
+type item struct {
+ typ itemType
+ val string
+ line int
+}
+
+func (lx *lexer) nextItem() item {
+ for {
+ select {
+ case item := <-lx.items:
+ return item
+ default:
+ lx.state = lx.state(lx)
+ }
+ }
+}
+
+func lex(input string) *lexer {
+ lx := &lexer{
+ input: input,
+ state: lexTop,
+ line: 1,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ }
+ return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+ lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+ if len(lx.stack) == 0 {
+ return lx.errorf("BUG in lexer: no states to pop")
+ }
+ last := lx.stack[len(lx.stack)-1]
+ lx.stack = lx.stack[0 : len(lx.stack)-1]
+ return last
+}
+
+func (lx *lexer) current() string {
+ return lx.input[lx.start:lx.pos]
+}
+
+func (lx *lexer) emit(typ itemType) {
+ lx.items <- item{typ, lx.current(), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+ lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+ if lx.atEOF {
+ panic("next called after EOF")
+ }
+ if lx.pos >= len(lx.input) {
+ lx.atEOF = true
+ return eof
+ }
+
+ if lx.input[lx.pos] == '\n' {
+ lx.line++
+ }
+ lx.prevWidths[2] = lx.prevWidths[1]
+ lx.prevWidths[1] = lx.prevWidths[0]
+ if lx.nprev < 3 {
+ lx.nprev++
+ }
+ r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+ lx.prevWidths[0] = w
+ lx.pos += w
+ return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+ lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called only twice between calls to next.
+func (lx *lexer) backup() {
+ if lx.atEOF {
+ lx.atEOF = false
+ return
+ }
+ if lx.nprev < 1 {
+ panic("backed up too far")
+ }
+ w := lx.prevWidths[0]
+ lx.prevWidths[0] = lx.prevWidths[1]
+ lx.prevWidths[1] = lx.prevWidths[2]
+ lx.nprev--
+ lx.pos -= w
+ if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+ lx.line--
+ }
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+ if lx.next() == valid {
+ return true
+ }
+ lx.backup()
+ return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+ r := lx.next()
+ lx.backup()
+ return r
+}
+
+// skip ignores all input that matches the given predicate.
+func (lx *lexer) skip(pred func(rune) bool) {
+ for {
+ r := lx.next()
+ if pred(r) {
+ continue
+ }
+ lx.backup()
+ lx.ignore()
+ return
+ }
+}
+
+// errorf stops all lexing by emitting an error and returning `nil`.
+// Note that any value that is a character is escaped if it's a special
+// character (newlines, tabs, etc.).
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+ lx.items <- item{
+ itemError,
+ fmt.Sprintf(format, values...),
+ lx.line,
+ }
+ return nil
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+ r := lx.next()
+ if isWhitespace(r) || isNL(r) {
+ return lexSkip(lx, lexTop)
+ }
+ switch r {
+ case commentStart:
+ lx.push(lexTop)
+ return lexCommentStart
+ case tableStart:
+ return lexTableStart
+ case eof:
+ if lx.pos > lx.start {
+ return lx.errorf("unexpected EOF")
+ }
+ lx.emit(itemEOF)
+ return nil
+ }
+
+ // At this point, the only valid item can be a key, so we back up
+ // and let the key lexer do the rest.
+ lx.backup()
+ lx.push(lexTopEnd)
+ return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a newline. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == commentStart:
+ // a comment will read to a newline for us.
+ lx.push(lexTop)
+ return lexCommentStart
+ case isWhitespace(r):
+ return lexTopEnd
+ case isNL(r):
+ lx.ignore()
+ return lexTop
+ case r == eof:
+ lx.emit(itemEOF)
+ return nil
+ }
+ return lx.errorf("expected a top-level item to end with a newline, "+
+ "comment, or EOF, but got %q instead", r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+ if lx.peek() == arrayTableStart {
+ lx.next()
+ lx.emit(itemArrayTableStart)
+ lx.push(lexArrayTableEnd)
+ } else {
+ lx.emit(itemTableStart)
+ lx.push(lexTableEnd)
+ }
+ return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+ lx.emit(itemTableEnd)
+ return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+ if r := lx.next(); r != arrayTableEnd {
+ return lx.errorf("expected end of table array name delimiter %q, "+
+ "but got %q instead", arrayTableEnd, r)
+ }
+ lx.emit(itemArrayTableEnd)
+ return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == tableEnd || r == eof:
+ return lx.errorf("unexpected end of table name " +
+ "(table names cannot be empty)")
+ case r == tableSep:
+ return lx.errorf("unexpected table separator " +
+ "(table names cannot be empty)")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.push(lexTableNameEnd)
+ return lexValue // reuse string lexing
+ default:
+ return lexBareTableName
+ }
+}
+
+// lexBareTableName lexes the name of a table. It assumes that at least one
+// valid character for the table has already been read.
+func lexBareTableName(lx *lexer) stateFn {
+ r := lx.next()
+ if isBareKeyChar(r) {
+ return lexBareTableName
+ }
+ lx.backup()
+ lx.emit(itemText)
+ return lexTableNameEnd
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.next(); {
+ case isWhitespace(r):
+ return lexTableNameEnd
+ case r == tableSep:
+ lx.ignore()
+ return lexTableNameStart
+ case r == tableEnd:
+ return lx.pop()
+ default:
+ return lx.errorf("expected '.' or ']' to end table name, "+
+ "but got %q instead", r)
+ }
+}
+
+// lexKeyStart consumes a key name up until the first non-whitespace character.
+// lexKeyStart will ignore whitespace.
+func lexKeyStart(lx *lexer) stateFn {
+ r := lx.peek()
+ switch {
+ case r == keySep:
+ return lx.errorf("unexpected key separator %q", keySep)
+ case isWhitespace(r) || isNL(r):
+ lx.next()
+ return lexSkip(lx, lexKeyStart)
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ lx.push(lexKeyEnd)
+ return lexValue // reuse string lexing
+ default:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ return lexBareKey
+ }
+}
+
+// lexBareKey consumes the text of a bare key. Assumes that the first character
+// (which is not whitespace) has not yet been consumed.
+func lexBareKey(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isBareKeyChar(r):
+ return lexBareKey
+ case isWhitespace(r):
+ lx.backup()
+ lx.emit(itemText)
+ return lexKeyEnd
+ case r == keySep:
+ lx.backup()
+ lx.emit(itemText)
+ return lexKeyEnd
+ default:
+ return lx.errorf("bare keys cannot contain %q", r)
+ }
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case r == keySep:
+ return lexSkip(lx, lexValue)
+ case isWhitespace(r):
+ return lexSkip(lx, lexKeyEnd)
+ default:
+ return lx.errorf("expected key separator %q, but got %q instead",
+ keySep, r)
+ }
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+ // We allow whitespace to precede a value, but NOT newlines.
+ // In array syntax, the array states are responsible for ignoring newlines.
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexValue)
+ case isDigit(r):
+ lx.backup() // avoid an extra state and use the same as above
+ return lexNumberOrDateStart
+ }
+ switch r {
+ case arrayStart:
+ lx.ignore()
+ lx.emit(itemArray)
+ return lexArrayValue
+ case inlineTableStart:
+ lx.ignore()
+ lx.emit(itemInlineTableStart)
+ return lexInlineTableValue
+ case stringStart:
+ if lx.accept(stringStart) {
+ if lx.accept(stringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the '"'
+ return lexString
+ case rawStringStart:
+ if lx.accept(rawStringStart) {
+ if lx.accept(rawStringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineRawString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case '+', '-':
+ return lexNumberStart
+ case '.': // special error case, be kind to users
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+ if unicode.IsLetter(r) {
+ // Be permissive here; lexBool will give a nice error if the
+ // user wrote something like
+ // x = foo
+ // (i.e. not 'true' or 'false' but is something else word-like.)
+ lx.backup()
+ return lexBool
+ }
+ return lx.errorf("expected value but found %q instead", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and newlines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValue)
+ case r == commentStart:
+ lx.push(lexArrayValue)
+ return lexCommentStart
+ case r == comma:
+ return lx.errorf("unexpected comma")
+ case r == arrayEnd:
+ // NOTE(caleb): The spec isn't clear about whether you can have
+ // a trailing comma or not, so we'll allow it.
+ return lexArrayEnd
+ }
+
+ lx.backup()
+ lx.push(lexArrayValueEnd)
+ return lexValue
+}
+
+// lexArrayValueEnd consumes everything between the end of an array value and
+// the next value (or the end of the array): it ignores whitespace and newlines
+// and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValueEnd)
+ case r == commentStart:
+ lx.push(lexArrayValueEnd)
+ return lexCommentStart
+ case r == comma:
+ lx.ignore()
+ return lexArrayValue // move on to the next value
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+ return lx.errorf(
+ "expected a comma or array terminator %q, but got %q instead",
+ arrayEnd, r,
+ )
+}
+
+// lexArrayEnd finishes the lexing of an array.
+// It assumes that a ']' has just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemArrayEnd)
+ return lx.pop()
+}
+
+// lexInlineTableValue consumes one key/value pair in an inline table.
+// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
+func lexInlineTableValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexInlineTableValue)
+ case isNL(r):
+ return lx.errorf("newlines not allowed within inline tables")
+ case r == commentStart:
+ lx.push(lexInlineTableValue)
+ return lexCommentStart
+ case r == comma:
+ return lx.errorf("unexpected comma")
+ case r == inlineTableEnd:
+ return lexInlineTableEnd
+ }
+ lx.backup()
+ lx.push(lexInlineTableValueEnd)
+ return lexKeyStart
+}
+
+// lexInlineTableValueEnd consumes everything between the end of an inline table
+// key/value pair and the next pair (or the end of the table):
+// it ignores whitespace and expects either a ',' or a '}'.
+func lexInlineTableValueEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexInlineTableValueEnd)
+ case isNL(r):
+ return lx.errorf("newlines not allowed within inline tables")
+ case r == commentStart:
+ lx.push(lexInlineTableValueEnd)
+ return lexCommentStart
+ case r == comma:
+ lx.ignore()
+ return lexInlineTableValue
+ case r == inlineTableEnd:
+ return lexInlineTableEnd
+ }
+ return lx.errorf("expected a comma or an inline table terminator %q, "+
+ "but got %q instead", inlineTableEnd, r)
+}
+
+// lexInlineTableEnd finishes the lexing of an inline table.
+// It assumes that a '}' has just been consumed.
+func lexInlineTableEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemInlineTableEnd)
+ return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == eof:
+ return lx.errorf("unexpected EOF")
+ case isNL(r):
+ return lx.errorf("strings cannot contain newlines")
+ case r == '\\':
+ lx.push(lexString)
+ return lexStringEscape
+ case r == stringEnd:
+ lx.backup()
+ lx.emit(itemString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+ switch lx.next() {
+ case eof:
+ return lx.errorf("unexpected EOF")
+ case '\\':
+ return lexMultilineStringEscape
+ case stringEnd:
+ if lx.accept(stringEnd) {
+ if lx.accept(stringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineString
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == eof:
+ return lx.errorf("unexpected EOF")
+ case isNL(r):
+ return lx.errorf("strings cannot contain newlines")
+ case r == rawStringEnd:
+ lx.backup()
+ lx.emit(itemRawString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexRawString
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'''" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+ switch lx.next() {
+ case eof:
+ return lx.errorf("unexpected EOF")
+ case rawStringEnd:
+ if lx.accept(rawStringEnd) {
+ if lx.accept(rawStringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemRawMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineRawString
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+ // Handle the special case first:
+ if isNL(lx.next()) {
+ return lexMultilineString
+ }
+ lx.backup()
+ lx.push(lexMultilineString)
+ return lexStringEscape(lx)
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+ r := lx.next()
+ switch r {
+ case 'b':
+ fallthrough
+ case 't':
+ fallthrough
+ case 'n':
+ fallthrough
+ case 'f':
+ fallthrough
+ case 'r':
+ fallthrough
+ case '"':
+ fallthrough
+ case '\\':
+ return lx.pop()
+ case 'u':
+ return lexShortUnicodeEscape
+ case 'U':
+ return lexLongUnicodeEscape
+ }
+ return lx.errorf("invalid escape character %q; only the following "+
+ "escape characters are allowed: "+
+ `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 4; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf(`expected four hexadecimal digits after '\u', `+
+ "but got %q instead", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 8; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf(`expected eight hexadecimal digits after '\U', `+
+ "but got %q instead", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+// lexNumberOrDateStart consumes either an integer, a float, or datetime.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexNumberOrDate
+ }
+ switch r {
+ case '_':
+ return lexNumber
+ case 'e', 'E':
+ return lexFloat
+ case '.':
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+ return lx.errorf("expected a digit but got %q", r)
+}
+
+// lexNumberOrDate consumes either an integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexNumberOrDate
+ }
+ switch r {
+ case '-':
+ return lexDatetime
+ case '_':
+ return lexNumber
+ case '.', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDatetime consumes a Datetime, to a first approximation.
+// The parser validates that it matches one of the accepted formats.
+func lexDatetime(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexDatetime
+ }
+ switch r {
+ case '-', 'T', ':', '.', 'Z', '+':
+ return lexDatetime
+ }
+
+ lx.backup()
+ lx.emit(itemDatetime)
+ return lx.pop()
+}
+
+// lexNumberStart consumes either an integer or a float. It assumes that a sign
+// has already been read, but that *no* digits have been consumed.
+// lexNumberStart will move to the appropriate integer or float states.
+func lexNumberStart(lx *lexer) stateFn {
+ // We MUST see a digit. Even floats have to start with a digit.
+ r := lx.next()
+ if !isDigit(r) {
+ if r == '.' {
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+ return lx.errorf("expected a digit but got %q", r)
+ }
+ return lexNumber
+}
+
+// lexNumber consumes an integer or a float after seeing the first digit.
+func lexNumber(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexNumber
+ }
+ switch r {
+ case '_':
+ return lexNumber
+ case '.', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexFloat consumes the elements of a float. It allows any sequence of
+// float-like characters, so floats emitted by the lexer are only a first
+// approximation and must be validated by the parser.
+func lexFloat(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexFloat
+ }
+ switch r {
+ case '_', '.', '-', '+', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemFloat)
+ return lx.pop()
+}
+
+// lexBool consumes a bool string: 'true' or 'false.
+func lexBool(lx *lexer) stateFn {
+ var rs []rune
+ for {
+ r := lx.next()
+ if !unicode.IsLetter(r) {
+ lx.backup()
+ break
+ }
+ rs = append(rs, r)
+ }
+ s := string(rs)
+ switch s {
+ case "true", "false":
+ lx.emit(itemBool)
+ return lx.pop()
+ }
+ return lx.errorf("expected value but found %q instead", s)
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemCommentStart)
+ return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first newline character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+ r := lx.peek()
+ if isNL(r) || r == eof {
+ lx.emit(itemText)
+ return lx.pop()
+ }
+ lx.next()
+ return lexComment
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+ return func(lx *lexer) stateFn {
+ lx.ignore()
+ return nextState
+ }
+}
+
+// isWhitespace returns true if `r` is a whitespace character according
+// to the spec.
+func isWhitespace(r rune) bool {
+ return r == '\t' || r == ' '
+}
+
+func isNL(r rune) bool {
+ return r == '\n' || r == '\r'
+}
+
+func isDigit(r rune) bool {
+ return r >= '0' && r <= '9'
+}
+
+func isHexadecimal(r rune) bool {
+ return (r >= '0' && r <= '9') ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
+
+func isBareKeyChar(r rune) bool {
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' ||
+ r == '-'
+}
+
+func (itype itemType) String() string {
+ switch itype {
+ case itemError:
+ return "Error"
+ case itemNIL:
+ return "NIL"
+ case itemEOF:
+ return "EOF"
+ case itemText:
+ return "Text"
+ case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+ return "String"
+ case itemBool:
+ return "Bool"
+ case itemInteger:
+ return "Integer"
+ case itemFloat:
+ return "Float"
+ case itemDatetime:
+ return "DateTime"
+ case itemTableStart:
+ return "TableStart"
+ case itemTableEnd:
+ return "TableEnd"
+ case itemKeyStart:
+ return "KeyStart"
+ case itemArray:
+ return "Array"
+ case itemArrayEnd:
+ return "ArrayEnd"
+ case itemCommentStart:
+ return "CommentStart"
+ }
+ panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+ return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
new file mode 100644
index 000000000..50869ef92
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -0,0 +1,592 @@
+package toml
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+type parser struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ lx *lexer
+
+ // A list of keys in the order that they appear in the TOML data.
+ ordered []Key
+
+ // the full key for the current hash in scope
+ context Key
+
+ // the base key name for everything except hashes
+ currentKey string
+
+ // rough approximation of line number
+ approxLine int
+
+ // A map of 'key.group.names' to whether they were created implicitly.
+ implicits map[string]bool
+}
+
+type parseError string
+
+func (pe parseError) Error() string {
+ return string(pe)
+}
+
+func parse(data string) (p *parser, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ var ok bool
+ if err, ok = r.(parseError); ok {
+ return
+ }
+ panic(r)
+ }
+ }()
+
+ p = &parser{
+ mapping: make(map[string]interface{}),
+ types: make(map[string]tomlType),
+ lx: lex(data),
+ ordered: make([]Key, 0),
+ implicits: make(map[string]bool),
+ }
+ for {
+ item := p.next()
+ if item.typ == itemEOF {
+ break
+ }
+ p.topLevel(item)
+ }
+
+ return p, nil
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+ msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+ p.approxLine, p.current(), fmt.Sprintf(format, v...))
+ panic(parseError(msg))
+}
+
+func (p *parser) next() item {
+ it := p.lx.nextItem()
+ if it.typ == itemError {
+ p.panicf("%s", it.val)
+ }
+ return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+ panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+ it := p.next()
+ p.assertEqual(typ, it.typ)
+ return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+ if expected != got {
+ p.bug("Expected '%s' but got '%s'.", expected, got)
+ }
+}
+
+func (p *parser) topLevel(item item) {
+ switch item.typ {
+ case itemCommentStart:
+ p.approxLine = item.line
+ p.expect(itemText)
+ case itemTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemTableEnd, kg.typ)
+
+ p.establishContext(key, false)
+ p.setType("", tomlHash)
+ p.ordered = append(p.ordered, key)
+ case itemArrayTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemArrayTableEnd, kg.typ)
+
+ p.establishContext(key, true)
+ p.setType("", tomlArrayHash)
+ p.ordered = append(p.ordered, key)
+ case itemKeyStart:
+ kname := p.next()
+ p.approxLine = kname.line
+ p.currentKey = p.keyString(kname)
+
+ val, typ := p.value(p.next())
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.currentKey = ""
+ default:
+ p.bug("Unexpected type at top level: %s", item.typ)
+ }
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+ switch it.typ {
+ case itemText:
+ return it.val
+ case itemString, itemMultilineString,
+ itemRawString, itemRawMultilineString:
+ s, _ := p.value(it)
+ return s.(string)
+ default:
+ p.bug("Unexpected key type: %s", it.typ)
+ panic("unreachable")
+ }
+}
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item) (interface{}, tomlType) {
+ switch it.typ {
+ case itemString:
+ return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+ case itemMultilineString:
+ trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
+ return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+ case itemRawString:
+ return it.val, p.typeOfPrimitive(it)
+ case itemRawMultilineString:
+ return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+ case itemBool:
+ switch it.val {
+ case "true":
+ return true, p.typeOfPrimitive(it)
+ case "false":
+ return false, p.typeOfPrimitive(it)
+ }
+ p.bug("Expected boolean value, but got '%s'.", it.val)
+ case itemInteger:
+ if !numUnderscoresOK(it.val) {
+ p.panicf("Invalid integer %q: underscores must be surrounded by digits",
+ it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ num, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ // Distinguish integer values. Normally, it'd be a bug if the lexer
+ // provides an invalid integer, but it's possible that the number is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Integer '%s' is out of the range of 64-bit "+
+ "signed integers.", it.val)
+ } else {
+ p.bug("Expected integer value, but got '%s'.", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemFloat:
+ parts := strings.FieldsFunc(it.val, func(r rune) bool {
+ switch r {
+ case '.', 'e', 'E':
+ return true
+ }
+ return false
+ })
+ for _, part := range parts {
+ if !numUnderscoresOK(part) {
+ p.panicf("Invalid float %q: underscores must be "+
+ "surrounded by digits", it.val)
+ }
+ }
+ if !numPeriodsOK(it.val) {
+ // As a special case, numbers like '123.' or '1.e2',
+ // which are valid as far as Go/strconv are concerned,
+ // must be rejected because TOML says that a fractional
+ // part consists of '.' followed by 1+ digits.
+ p.panicf("Invalid float %q: '.' must be followed "+
+ "by one or more digits", it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ num, err := strconv.ParseFloat(val, 64)
+ if err != nil {
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Float '%s' is out of the range of 64-bit "+
+ "IEEE-754 floating-point numbers.", it.val)
+ } else {
+ p.panicf("Invalid float value: %q", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemDatetime:
+ var t time.Time
+ var ok bool
+ var err error
+ for _, format := range []string{
+ "2006-01-02T15:04:05Z07:00",
+ "2006-01-02T15:04:05",
+ "2006-01-02",
+ } {
+ t, err = time.ParseInLocation(format, it.val, time.Local)
+ if err == nil {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ p.panicf("Invalid TOML Datetime: %q.", it.val)
+ }
+ return t, p.typeOfPrimitive(it)
+ case itemArray:
+ array := make([]interface{}, 0)
+ types := make([]tomlType, 0)
+
+ for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ val, typ := p.value(it)
+ array = append(array, val)
+ types = append(types, typ)
+ }
+ return array, p.typeOfArray(types)
+ case itemInlineTableStart:
+ var (
+ hash = make(map[string]interface{})
+ outerContext = p.context
+ outerKey = p.currentKey
+ )
+
+ p.context = append(p.context, p.currentKey)
+ p.currentKey = ""
+ for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+ if it.typ != itemKeyStart {
+ p.bug("Expected key start but instead found %q, around line %d",
+ it.val, p.approxLine)
+ }
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ // retrieve key
+ k := p.next()
+ p.approxLine = k.line
+ kname := p.keyString(k)
+
+ // retrieve value
+ p.currentKey = kname
+ val, typ := p.value(p.next())
+ // make sure we keep metadata up to date
+ p.setType(kname, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ hash[kname] = val
+ }
+ p.context = outerContext
+ p.currentKey = outerKey
+ return hash, tomlHash
+ }
+ p.bug("Unexpected value type: %s", it.typ)
+ panic("unreachable")
+}
+
+// numUnderscoresOK checks whether each underscore in s is surrounded by
+// characters that are not underscores.
+func numUnderscoresOK(s string) bool {
+ accept := false
+ for _, r := range s {
+ if r == '_' {
+ if !accept {
+ return false
+ }
+ accept = false
+ continue
+ }
+ accept = true
+ }
+ return accept
+}
+
+// numPeriodsOK checks whether every period in s is followed by a digit.
+func numPeriodsOK(s string) bool {
+ period := false
+ for _, r := range s {
+ if period && !isDigit(r) {
+ return false
+ }
+ period = r == '.'
+ }
+ return !period
+}
+
+// establishContext sets the current context of the parser,
+// where the context is either a hash or an array of hashes. Which one is
+// set depends on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) establishContext(key Key, array bool) {
+ var ok bool
+
+ // Always start at the top level and drill down for our context.
+ hashContext := p.mapping
+ keyContext := make(Key, 0)
+
+ // We only need implicit hashes for key[0:-1]
+ for _, k := range key[0 : len(key)-1] {
+ _, ok = hashContext[k]
+ keyContext = append(keyContext, k)
+
+ // No key? Make an implicit hash and move on.
+ if !ok {
+ p.addImplicit(keyContext)
+ hashContext[k] = make(map[string]interface{})
+ }
+
+ // If the hash context is actually an array of tables, then set
+ // the hash context to the last element in that array.
+ //
+ // Otherwise, it better be a table, since this MUST be a key group (by
+ // virtue of it not being the last element in a key).
+ switch t := hashContext[k].(type) {
+ case []map[string]interface{}:
+ hashContext = t[len(t)-1]
+ case map[string]interface{}:
+ hashContext = t
+ default:
+ p.panicf("Key '%s' was already created as a hash.", keyContext)
+ }
+ }
+
+ p.context = keyContext
+ if array {
+ // If this is the first element for this array, then allocate a new
+ // list of tables for it.
+ k := key[len(key)-1]
+ if _, ok := hashContext[k]; !ok {
+ hashContext[k] = make([]map[string]interface{}, 0, 5)
+ }
+
+ // Add a new table. But make sure the key hasn't already been used
+ // for something else.
+ if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+ hashContext[k] = append(hash, make(map[string]interface{}))
+ } else {
+ p.panicf("Key '%s' was already created and cannot be used as "+
+ "an array.", keyContext)
+ }
+ } else {
+ p.setValue(key[len(key)-1], make(map[string]interface{}))
+ }
+ p.context = append(p.context, key[len(key)-1])
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+ var tmpHash interface{}
+ var ok bool
+
+ hash := p.mapping
+ keyContext := make(Key, 0)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ if tmpHash, ok = hash[k]; !ok {
+ p.bug("Context for key '%s' has not been established.", keyContext)
+ }
+ switch t := tmpHash.(type) {
+ case []map[string]interface{}:
+ // The context is a table of hashes. Pick the most recent table
+ // defined as the current hash.
+ hash = t[len(t)-1]
+ case map[string]interface{}:
+ hash = t
+ default:
+ p.bug("Expected hash to have type 'map[string]interface{}', but "+
+ "it has '%T' instead.", tmpHash)
+ }
+ }
+ keyContext = append(keyContext, key)
+
+ if _, ok := hash[key]; ok {
+ // Typically, if the given key has already been set, then we have
+ // to raise an error since duplicate keys are disallowed. However,
+ // it's possible that a key was previously defined implicitly. In this
+ // case, it is allowed to be redefined concretely. (See the
+ // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+ //
+ // But we have to make sure to stop marking it as an implicit. (So that
+ // another redefinition provokes an error.)
+ //
+ // Note that since it has already been defined (as a hash), we don't
+ // want to overwrite it. So our business is done.
+ if p.isImplicit(keyContext) {
+ p.removeImplicit(keyContext)
+ return
+ }
+
+ // Otherwise, we have a concrete key trying to override a previous
+ // key, which is *always* wrong.
+ p.panicf("Key '%s' has already been defined.", keyContext)
+ }
+ hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key.
+// It should be called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+ keyContext := make(Key, 0, len(p.context)+1)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ }
+ if len(key) > 0 { // allow type setting for hashes
+ keyContext = append(keyContext, key)
+ }
+ p.types[keyContext.String()] = typ
+}
+
+// addImplicit sets the given Key as having been created implicitly.
+func (p *parser) addImplicit(key Key) {
+ p.implicits[key.String()] = true
+}
+
+// removeImplicit stops tagging the given key as having been implicitly
+// created.
+func (p *parser) removeImplicit(key Key) {
+ p.implicits[key.String()] = false
+}
+
+// isImplicit returns true if the key group pointed to by the key was created
+// implicitly.
+func (p *parser) isImplicit(key Key) bool {
+ return p.implicits[key.String()]
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+ if len(p.currentKey) == 0 {
+ return p.context.String()
+ }
+ if len(p.context) == 0 {
+ return p.currentKey
+ }
+ return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+ if len(s) == 0 || s[0] != '\n' {
+ return s
+ }
+ return s[1:]
+}
+
+func stripEscapedWhitespace(s string) string {
+ esc := strings.Split(s, "\\\n")
+ if len(esc) > 1 {
+ for i := 1; i < len(esc); i++ {
+ esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+ }
+ }
+ return strings.Join(esc, "")
+}
+
+func (p *parser) replaceEscapes(str string) string {
+ var replaced []rune
+ s := []byte(str)
+ r := 0
+ for r < len(s) {
+ if s[r] != '\\' {
+ c, size := utf8.DecodeRune(s[r:])
+ r += size
+ replaced = append(replaced, c)
+ continue
+ }
+ r += 1
+ if r >= len(s) {
+ p.bug("Escape sequence at end of string.")
+ return ""
+ }
+ switch s[r] {
+ default:
+ p.bug("Expected valid escape code after \\, but got %q.", s[r])
+ return ""
+ case 'b':
+ replaced = append(replaced, rune(0x0008))
+ r += 1
+ case 't':
+ replaced = append(replaced, rune(0x0009))
+ r += 1
+ case 'n':
+ replaced = append(replaced, rune(0x000A))
+ r += 1
+ case 'f':
+ replaced = append(replaced, rune(0x000C))
+ r += 1
+ case 'r':
+ replaced = append(replaced, rune(0x000D))
+ r += 1
+ case '"':
+ replaced = append(replaced, rune(0x0022))
+ r += 1
+ case '\\':
+ replaced = append(replaced, rune(0x005C))
+ r += 1
+ case 'u':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+5). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+ replaced = append(replaced, escaped)
+ r += 5
+ case 'U':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+9). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+ replaced = append(replaced, escaped)
+ r += 9
+ }
+ }
+ return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+ s := string(bs)
+ hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+ if err != nil {
+ p.bug("Could not parse '%s' as a hexadecimal number, but the "+
+ "lexer claims it's OK: %s", s, err)
+ }
+ if !utf8.ValidRune(rune(hex)) {
+ p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+ }
+ return rune(hex)
+}
+
+func isStringType(ty itemType) bool {
+ return ty == itemString || ty == itemMultilineString ||
+ ty == itemRawString || ty == itemRawMultilineString
+}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
new file mode 100644
index 000000000..562164be0
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/session.vim
@@ -0,0 +1 @@
+au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
new file mode 100644
index 000000000..c73f8afc1
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_check.go
@@ -0,0 +1,91 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+ typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+ if t1 == nil || t2 == nil {
+ return false
+ }
+ return t1.typeString() == t2.typeString()
+}
+
+func typeIsHash(t tomlType) bool {
+ return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+ return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+ return btype.typeString()
+}
+
+var (
+ tomlInteger tomlBaseType = "Integer"
+ tomlFloat tomlBaseType = "Float"
+ tomlDatetime tomlBaseType = "Datetime"
+ tomlString tomlBaseType = "String"
+ tomlBool tomlBaseType = "Bool"
+ tomlArray tomlBaseType = "Array"
+ tomlHash tomlBaseType = "Hash"
+ tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+ switch lexItem.typ {
+ case itemInteger:
+ return tomlInteger
+ case itemFloat:
+ return tomlFloat
+ case itemDatetime:
+ return tomlDatetime
+ case itemString:
+ return tomlString
+ case itemMultilineString:
+ return tomlString
+ case itemRawString:
+ return tomlString
+ case itemRawMultilineString:
+ return tomlString
+ case itemBool:
+ return tomlBool
+ }
+ p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+ panic("unreachable")
+}
+
+// typeOfArray returns a tomlType for an array given a list of types of its
+// values.
+//
+// In the current spec, if an array is homogeneous, then its type is always
+// "Array". If the array is not homogeneous, an error is generated.
+func (p *parser) typeOfArray(types []tomlType) tomlType {
+ // Empty arrays are cool.
+ if len(types) == 0 {
+ return tomlArray
+ }
+
+ theType := types[0]
+ for _, t := range types[1:] {
+ if !typeEqual(theType, t) {
+ p.panicf("Array contains values of type '%s' and '%s', but "+
+ "arrays must be homogeneous.", theType, t)
+ }
+ }
+ return tomlArray
+}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
new file mode 100644
index 000000000..608997c22
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -0,0 +1,242 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+ "reflect"
+ "sort"
+ "sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string // the name of the field (`toml` tag included)
+ tag bool // whether field has a `toml` tag
+ index []int // represents the depth of an anonymous field
+ typ reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ opts := getOptions(sf.Tag)
+ if opts.skip {
+ continue
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := opts.name != ""
+ name := opts.name
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, field{name, tagged, index, ft})
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ f := field{name: ft.Name(), index: index, typ: ft}
+ next = append(next, f)
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with TOML tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/github.com/alicebob/gopher-json/LICENSE b/vendor/github.com/alicebob/gopher-json/LICENSE
new file mode 100644
index 000000000..b3dbff00c
--- /dev/null
+++ b/vendor/github.com/alicebob/gopher-json/LICENSE
@@ -0,0 +1,22 @@
+This is free and unencumbered software released into the public domain.
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/alicebob/gopher-json/README.md b/vendor/github.com/alicebob/gopher-json/README.md
new file mode 100644
index 000000000..a0e7c3447
--- /dev/null
+++ b/vendor/github.com/alicebob/gopher-json/README.md
@@ -0,0 +1,7 @@
+# gopher-json [](https://godoc.org/layeh.com/gopher-json)
+
+Package json is a simple JSON encoder/decoder for [gopher-lua](https://github.com/yuin/gopher-lua).
+
+## License
+
+Public domain.
diff --git a/vendor/github.com/alicebob/gopher-json/doc.go b/vendor/github.com/alicebob/gopher-json/doc.go
new file mode 100644
index 000000000..c85792b8f
--- /dev/null
+++ b/vendor/github.com/alicebob/gopher-json/doc.go
@@ -0,0 +1,20 @@
+// Package json is a simple JSON encoder/decoder for gopher-lua.
+//
+// Documentation
+//
+// The following functions are exposed by the library:
+// decode(string): Decodes a JSON string. Returns nil and an error string if
+// the string could not be decoded.
+// encode(value): Encodes a value into a JSON string. Returns nil and an error
+// string if the value could not be encoded.
+//
+// Example
+//
+// Below is an example usage of the library:
+// import (
+// luajson "layeh.com/gopher-json"
+// )
+//
+// L := lua.NewState()
+// luajson.Preload(s)
+package json
diff --git a/vendor/github.com/alicebob/gopher-json/json.go b/vendor/github.com/alicebob/gopher-json/json.go
new file mode 100644
index 000000000..39a27bebb
--- /dev/null
+++ b/vendor/github.com/alicebob/gopher-json/json.go
@@ -0,0 +1,180 @@
+package json
+
+import (
+ "encoding/json"
+ "errors"
+ "strconv"
+
+ "github.com/yuin/gopher-lua"
+)
+
+// Preload adds json to the given Lua state's package.preload table. After it
+// has been preloaded, it can be loaded using require:
+//
+// local json = require("json")
+func Preload(L *lua.LState) {
+ L.PreloadModule("json", Loader)
+}
+
+// Loader is the module loader function.
+func Loader(L *lua.LState) int {
+ t := L.NewTable()
+ L.SetFuncs(t, api)
+ L.Push(t)
+ return 1
+}
+
+var api = map[string]lua.LGFunction{
+ "decode": apiDecode,
+ "encode": apiEncode,
+}
+
+func apiDecode(L *lua.LState) int {
+ if L.GetTop() != 1 {
+ L.Error(lua.LString("bad argument #1 to decode"), 1)
+ return 0
+ }
+ str := L.CheckString(1)
+
+ value, err := Decode(L, []byte(str))
+ if err != nil {
+ L.Push(lua.LNil)
+ L.Push(lua.LString(err.Error()))
+ return 2
+ }
+ L.Push(value)
+ return 1
+}
+
+func apiEncode(L *lua.LState) int {
+ if L.GetTop() != 1 {
+ L.Error(lua.LString("bad argument #1 to encode"), 1)
+ return 0
+ }
+ value := L.CheckAny(1)
+
+ data, err := Encode(value)
+ if err != nil {
+ L.Push(lua.LNil)
+ L.Push(lua.LString(err.Error()))
+ return 2
+ }
+ L.Push(lua.LString(string(data)))
+ return 1
+}
+
+var (
+ errFunction = errors.New("cannot encode function to JSON")
+ errChannel = errors.New("cannot encode channel to JSON")
+ errState = errors.New("cannot encode state to JSON")
+ errUserData = errors.New("cannot encode userdata to JSON")
+ errNested = errors.New("cannot encode recursively nested tables to JSON")
+)
+
+// Encode returns the JSON encoding of value.
+func Encode(value lua.LValue) ([]byte, error) {
+ return json.Marshal(jsonValue{
+ LValue: value,
+ visited: make(map[*lua.LTable]bool),
+ })
+}
+
+type jsonValue struct {
+ lua.LValue
+ visited map[*lua.LTable]bool
+}
+
+func (j jsonValue) MarshalJSON() (data []byte, err error) {
+ switch converted := j.LValue.(type) {
+ case lua.LBool:
+ data, err = json.Marshal(converted)
+ case lua.LChannel:
+ err = errChannel
+ case lua.LNumber:
+ data, err = json.Marshal(converted)
+ case *lua.LFunction:
+ err = errFunction
+ case *lua.LNilType:
+ data, err = json.Marshal(converted)
+ case *lua.LState:
+ err = errState
+ case lua.LString:
+ data, err = json.Marshal(converted)
+ case *lua.LTable:
+ var arr []jsonValue
+ var obj map[string]jsonValue
+
+ if j.visited[converted] {
+ panic(errNested)
+ }
+ j.visited[converted] = true
+
+ converted.ForEach(func(k lua.LValue, v lua.LValue) {
+ i, numberKey := k.(lua.LNumber)
+ if numberKey && obj == nil {
+ index := int(i) - 1
+ if index != len(arr) {
+ // map out of order; convert to map
+ obj = make(map[string]jsonValue)
+ for i, value := range arr {
+ obj[strconv.Itoa(i+1)] = value
+ }
+ obj[strconv.Itoa(index+1)] = jsonValue{v, j.visited}
+ return
+ }
+ arr = append(arr, jsonValue{v, j.visited})
+ return
+ }
+ if obj == nil {
+ obj = make(map[string]jsonValue)
+ for i, value := range arr {
+ obj[strconv.Itoa(i+1)] = value
+ }
+ }
+ obj[k.String()] = jsonValue{v, j.visited}
+ })
+ if obj != nil {
+ data, err = json.Marshal(obj)
+ } else {
+ data, err = json.Marshal(arr)
+ }
+ case *lua.LUserData:
+ // TODO: call metatable __tostring?
+ err = errUserData
+ }
+ return
+}
+
+// Decode converts the JSON encoded data to Lua values.
+func Decode(L *lua.LState, data []byte) (lua.LValue, error) {
+ var value interface{}
+ err := json.Unmarshal(data, &value)
+ if err != nil {
+ return nil, err
+ }
+ return decode(L, value), nil
+}
+
+func decode(L *lua.LState, value interface{}) lua.LValue {
+ switch converted := value.(type) {
+ case bool:
+ return lua.LBool(converted)
+ case float64:
+ return lua.LNumber(converted)
+ case string:
+ return lua.LString(converted)
+ case []interface{}:
+ arr := L.CreateTable(len(converted), 0)
+ for _, item := range converted {
+ arr.Append(decode(L, item))
+ }
+ return arr
+ case map[string]interface{}:
+ tbl := L.CreateTable(0, len(converted))
+ for key, item := range converted {
+ tbl.RawSetH(lua.LString(key), decode(L, item))
+ }
+ return tbl
+ }
+ return lua.LNil
+}
diff --git a/vendor/github.com/alicebob/miniredis/.gitignore b/vendor/github.com/alicebob/miniredis/.gitignore
new file mode 100644
index 000000000..a6fadca4d
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/.gitignore
@@ -0,0 +1 @@
+/integration/redis_src/
diff --git a/vendor/github.com/alicebob/miniredis/.travis.yml b/vendor/github.com/alicebob/miniredis/.travis.yml
new file mode 100644
index 000000000..d9122d17d
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+before_script:
+ - (cd ./integration && ./get_redis.sh)
+
+install: go get -t
+
+script: make test testrace int
+
+sudo: false
+
+go:
+ - 1.11
diff --git a/vendor/github.com/alicebob/miniredis/LICENSE b/vendor/github.com/alicebob/miniredis/LICENSE
new file mode 100644
index 000000000..bb02657ca
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Harmen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/alicebob/miniredis/Makefile b/vendor/github.com/alicebob/miniredis/Makefile
new file mode 100644
index 000000000..2aa4cd2c5
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/Makefile
@@ -0,0 +1,12 @@
+.PHONY: all test testrace int
+
+all: test
+
+test:
+ go test ./...
+
+testrace:
+ go test -race ./...
+
+int:
+ ${MAKE} -C integration all
diff --git a/vendor/github.com/alicebob/miniredis/README.md b/vendor/github.com/alicebob/miniredis/README.md
new file mode 100644
index 000000000..bfeed8314
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/README.md
@@ -0,0 +1,336 @@
+# Miniredis
+
+Pure Go Redis test server, used in Go unittests.
+
+
+##
+
+Sometimes you want to test code which uses Redis, without making it a full-blown
+integration test.
+Miniredis implements (parts of) the Redis server, to be used in unittests. It
+enables a simple, cheap, in-memory, Redis replacement, with a real TCP interface. Think of it as the Redis version of `net/http/httptest`.
+
+It saves you from using mock code, and since the redis server lives in the
+test process you can query for values directly, without going through the server
+stack.
+
+There are no dependencies on external binaries, so you can easily integrate it in automated build processes.
+
+## Changelog
+
+### 2.5.0
+
+Added ZPopMin and ZPopMax
+
+### v2.4.6
+
+support for TIME (thanks @leon-barrett and @lirao)
+support for ZREVRANGEBYLEX
+fix for SINTER (thanks @robstein)
+updates for latest redis
+
+### 2.4.4
+
+Fixed nil Lua return value (#43)
+
+### 2.4.3
+
+Fixed using Lua with authenticated redis.
+
+### 2.4.2
+
+Changed redigo import path.
+
+### 2.4
+
+Minor cleanups. Miniredis now requires Go >= 1.9 (only for the tests. If you don't run the tests you can use an older Go version).
+
+### 2.3.1
+
+Lua changes: added `cjson` library, and `redis.sha1hex()`.
+
+### 2.3
+
+Added the `EVAL`, `EVALSHA`, and `SCRIPT` commands. Uses a pure Go Lua interpreter. Please open an issue if there are problems with any Lua code.
+
+### 2.2
+
+Introduced `StartAddr()`.
+
+### 2.1
+
+Internal cleanups. No changes in functionality.
+
+### 2.0
+
+2.0.0 improves TTLs to be `time.Duration` values. `.Expire()` is removed and
+replaced by `.TTL()`, which returns the TTL as a `time.Duration`.
+This should be the change needed to upgrade:
+
+1.0:
+
+ m.Expire() == 4
+
+2.0:
+
+ m.TTL() == 4 * time.Second
+
+Furthermore, `.SetTime()` is added to help with `EXPIREAT` commands, and `.FastForward()` is introduced to test keys expiration.
+
+
+## Commands
+
+Implemented commands:
+
+ - Connection (complete)
+ - AUTH -- see RequireAuth()
+ - ECHO
+ - PING
+ - SELECT
+ - QUIT
+ - Key
+ - DEL
+ - EXISTS
+ - EXPIRE
+ - EXPIREAT
+ - KEYS
+ - MOVE
+ - PERSIST
+ - PEXPIRE
+ - PEXPIREAT
+ - PTTL
+ - RENAME
+ - RENAMENX
+ - RANDOMKEY -- call math.rand.Seed(...) once before using.
+ - TTL
+ - TYPE
+ - SCAN
+ - Transactions (complete)
+ - DISCARD
+ - EXEC
+ - MULTI
+ - UNWATCH
+ - WATCH
+ - Server
+ - DBSIZE
+ - FLUSHALL
+ - FLUSHDB
+ - TIME -- returns time.Now() or value set by SetTime()
+ - String keys (complete)
+ - APPEND
+ - BITCOUNT
+ - BITOP
+ - BITPOS
+ - DECR
+ - DECRBY
+ - GET
+ - GETBIT
+ - GETRANGE
+ - GETSET
+ - INCR
+ - INCRBY
+ - INCRBYFLOAT
+ - MGET
+ - MSET
+ - MSETNX
+ - PSETEX
+ - SET
+ - SETBIT
+ - SETEX
+ - SETNX
+ - SETRANGE
+ - STRLEN
+ - Hash keys (complete)
+ - HDEL
+ - HEXISTS
+ - HGET
+ - HGETALL
+ - HINCRBY
+ - HINCRBYFLOAT
+ - HKEYS
+ - HLEN
+ - HMGET
+ - HMSET
+ - HSET
+ - HSETNX
+ - HVALS
+ - HSCAN
+ - List keys (complete)
+ - BLPOP
+ - BRPOP
+ - BRPOPLPUSH
+ - LINDEX
+ - LINSERT
+ - LLEN
+ - LPOP
+ - LPUSH
+ - LPUSHX
+ - LRANGE
+ - LREM
+ - LSET
+ - LTRIM
+ - RPOP
+ - RPOPLPUSH
+ - RPUSH
+ - RPUSHX
+ - Set keys (complete)
+ - SADD
+ - SCARD
+ - SDIFF
+ - SDIFFSTORE
+ - SINTER
+ - SINTERSTORE
+ - SISMEMBER
+ - SMEMBERS
+ - SMOVE
+ - SPOP -- call math.rand.Seed(...) once before using.
+ - SRANDMEMBER -- call math.rand.Seed(...) once before using.
+ - SREM
+ - SUNION
+ - SUNIONSTORE
+ - SSCAN
+ - Sorted Set keys (complete)
+ - ZADD
+ - ZCARD
+ - ZCOUNT
+ - ZINCRBY
+ - ZINTERSTORE
+ - ZLEXCOUNT
+ - ZPOPMIN
+ - ZPOPMAX
+ - ZRANGE
+ - ZRANGEBYLEX
+ - ZRANGEBYSCORE
+ - ZRANK
+ - ZREM
+ - ZREMRANGEBYLEX
+ - ZREMRANGEBYRANK
+ - ZREMRANGEBYSCORE
+ - ZREVRANGE
+ - ZREVRANGEBYLEX
+ - ZREVRANGEBYSCORE
+ - ZREVRANK
+ - ZSCORE
+ - ZUNIONSTORE
+ - ZSCAN
+ - Scripting
+ - EVAL
+ - EVALSHA
+ - SCRIPT LOAD
+ - SCRIPT EXISTS
+ - SCRIPT FLUSH
+
+## TTLs, key expiration, and time
+
+Since miniredis is intended to be used in unittests TTLs don't decrease
+automatically. You can use `TTL()` to get the TTL (as a time.Duration) of a
+key. It will return 0 when no TTL is set.
+
+`m.FastForward(d)` can be used to decrement all TTLs. All TTLs which become <=
+0 will be removed.
+
+EXPIREAT and PEXPIREAT values will be
+converted to a duration. For that you can either set m.SetTime(t) to use that
+time as the base for the (P)EXPIREAT conversion, or don't call SetTime(), in
+which case time.Now() will be used.
+
+SetTime() also sets the value returned by TIME, which defaults to time.Now().
+It is not updated by FastForward, only by SetTime.
+
+## Example
+
+``` Go
+func TestSomething(t *testing.T) {
+ s, err := miniredis.Run()
+ if err != nil {
+ panic(err)
+ }
+ defer s.Close()
+
+ // Optionally set some keys your code expects:
+ s.Set("foo", "bar")
+ s.HSet("some", "other", "key")
+
+ // Run your code and see if it behaves.
+ // An example using the redigo library from "github.com/gomodule/redigo/redis":
+ c, err := redis.Dial("tcp", s.Addr())
+ _, err = c.Do("SET", "foo", "bar")
+
+ // Optionally check values in redis...
+ if got, err := s.Get("foo"); err != nil || got != "bar" {
+ t.Error("'foo' has the wrong value")
+ }
+ // ... or use a helper for that:
+ s.CheckGet(t, "foo", "bar")
+
+ // TTL and expiration:
+ s.Set("foo", "bar")
+ s.SetTTL("foo", 10*time.Second)
+ s.FastForward(11 * time.Second)
+ if s.Exists("foo") {
+ t.Fatal("'foo' should not have existed anymore")
+ }
+}
+```
+
+## Not supported
+
+Commands which will probably not be implemented:
+
+ - CLUSTER (all)
+ - ~~CLUSTER *~~
+ - ~~READONLY~~
+ - ~~READWRITE~~
+ - GEO (all) -- unless someone needs these
+ - ~~GEOADD~~
+ - ~~GEODIST~~
+ - ~~GEOHASH~~
+ - ~~GEOPOS~~
+ - ~~GEORADIUS~~
+ - ~~GEORADIUSBYMEMBER~~
+ - HyperLogLog (all) -- unless someone needs these
+ - ~~PFADD~~
+ - ~~PFCOUNT~~
+ - ~~PFMERGE~~
+ - Key
+ - ~~DUMP~~
+ - ~~MIGRATE~~
+ - ~~OBJECT~~
+ - ~~RESTORE~~
+ - ~~WAIT~~
+ - Pub/Sub (all)
+ - ~~PSUBSCRIBE~~
+ - ~~PUBLISH~~
+ - ~~PUBSUB~~
+ - ~~PUNSUBSCRIBE~~
+ - ~~SUBSCRIBE~~
+ - ~~UNSUBSCRIBE~~
+ - Scripting
+ - ~~SCRIPT DEBUG~~
+ - ~~SCRIPT KILL~~
+ - Server
+ - ~~BGSAVE~~
+ - ~~BGWRITEAOF~~
+ - ~~CLIENT *~~
+ - ~~COMMAND *~~
+ - ~~CONFIG *~~
+ - ~~DEBUG *~~
+ - ~~INFO~~
+ - ~~LASTSAVE~~
+ - ~~MONITOR~~
+ - ~~ROLE~~
+ - ~~SAVE~~
+ - ~~SHUTDOWN~~
+ - ~~SLAVEOF~~
+ - ~~SLOWLOG~~
+ - ~~SYNC~~
+
+
+## &c.
+
+Tests are run against Redis 5.0.3. The [./integration](./integration/) subdir
+compares miniredis against a real redis instance.
+
+
+[](https://travis-ci.org/alicebob/miniredis)
+[](https://godoc.org/github.com/alicebob/miniredis)
diff --git a/vendor/github.com/alicebob/miniredis/check.go b/vendor/github.com/alicebob/miniredis/check.go
new file mode 100644
index 000000000..8b42b2e0f
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/check.go
@@ -0,0 +1,68 @@
+package miniredis
+
+// 'Fail' methods.
+
+import (
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+)
+
+// T is implemented by Testing.T
+type T interface {
+ Fail()
+}
+
+// CheckGet does not call Errorf() iff there is a string key with the
+// expected value. Normal use case is `m.CheckGet(t, "username", "theking")`.
+func (m *Miniredis) CheckGet(t T, key, expected string) {
+ found, err := m.Get(key)
+ if err != nil {
+ lError(t, "GET error, key %#v: %v", key, err)
+ return
+ }
+ if found != expected {
+ lError(t, "GET error, key %#v: Expected %#v, got %#v", key, expected, found)
+ return
+ }
+}
+
+// CheckList does not call Errorf() iff there is a list key with the
+// expected values.
+// Normal use case is `m.CheckGet(t, "favorite_colors", "red", "green", "infrared")`.
+func (m *Miniredis) CheckList(t T, key string, expected ...string) {
+ found, err := m.List(key)
+ if err != nil {
+ lError(t, "List error, key %#v: %v", key, err)
+ return
+ }
+ if !reflect.DeepEqual(expected, found) {
+ lError(t, "List error, key %#v: Expected %#v, got %#v", key, expected, found)
+ return
+ }
+}
+
+// CheckSet does not call Errorf() iff there is a set key with the
+// expected values.
+// Normal use case is `m.CheckSet(t, "visited", "Rome", "Stockholm", "Dublin")`.
+func (m *Miniredis) CheckSet(t T, key string, expected ...string) {
+ found, err := m.Members(key)
+ if err != nil {
+ lError(t, "Set error, key %#v: %v", key, err)
+ return
+ }
+ sort.Strings(expected)
+ if !reflect.DeepEqual(expected, found) {
+ lError(t, "Set error, key %#v: Expected %#v, got %#v", key, expected, found)
+ return
+ }
+}
+
+func lError(t T, format string, args ...interface{}) {
+ _, file, line, _ := runtime.Caller(2)
+ prefix := fmt.Sprintf("%s:%d: ", filepath.Base(file), line)
+ fmt.Printf(prefix+format+"\n", args...)
+ t.Fail()
+}
diff --git a/vendor/github.com/alicebob/miniredis/cmd_connection.go b/vendor/github.com/alicebob/miniredis/cmd_connection.go
new file mode 100644
index 000000000..ca648f4bf
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/cmd_connection.go
@@ -0,0 +1,96 @@
+// Commands from https://redis.io/commands#connection
+
+package miniredis
+
+import (
+ "strconv"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+func commandsConnection(m *Miniredis) {
+ m.srv.Register("AUTH", m.cmdAuth)
+ m.srv.Register("ECHO", m.cmdEcho)
+ m.srv.Register("PING", m.cmdPing)
+ m.srv.Register("SELECT", m.cmdSelect)
+ m.srv.Register("QUIT", m.cmdQuit)
+}
+
+// PING
+func (m *Miniredis) cmdPing(c *server.Peer, cmd string, args []string) {
+ if !m.handleAuth(c) {
+ return
+ }
+ c.WriteInline("PONG")
+}
+
+// AUTH
+func (m *Miniredis) cmdAuth(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ pw := args[0]
+
+ m.Lock()
+ defer m.Unlock()
+ if m.password == "" {
+ c.WriteError("ERR Client sent AUTH, but no password is set")
+ return
+ }
+ if m.password != pw {
+ c.WriteError("ERR invalid password")
+ return
+ }
+
+ setAuthenticated(c)
+ c.WriteOK()
+}
+
+// ECHO
+func (m *Miniredis) cmdEcho(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ msg := args[0]
+ c.WriteBulk(msg)
+}
+
+// SELECT
+func (m *Miniredis) cmdSelect(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ id, err := strconv.Atoi(args[0])
+ if err != nil {
+ id = 0
+ }
+
+ m.Lock()
+ defer m.Unlock()
+
+ ctx := getCtx(c)
+ ctx.selectedDB = id
+
+ c.WriteOK()
+}
+
+// QUIT
+func (m *Miniredis) cmdQuit(c *server.Peer, cmd string, args []string) {
+ // QUIT isn't transactionfied and accepts any arguments.
+ c.WriteOK()
+ c.Close()
+}
diff --git a/vendor/github.com/alicebob/miniredis/cmd_generic.go b/vendor/github.com/alicebob/miniredis/cmd_generic.go
new file mode 100644
index 000000000..fa3947908
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/cmd_generic.go
@@ -0,0 +1,479 @@
+// Commands from https://redis.io/commands#generic
+
+package miniredis
+
+import (
+ "math/rand"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+// commandsGeneric handles EXPIRE, TTL, PERSIST, &c.
+func commandsGeneric(m *Miniredis) {
+ m.srv.Register("DEL", m.cmdDel)
+ // DUMP
+ m.srv.Register("EXISTS", m.cmdExists)
+ m.srv.Register("EXPIRE", makeCmdExpire(m, false, time.Second))
+ m.srv.Register("EXPIREAT", makeCmdExpire(m, true, time.Second))
+ m.srv.Register("KEYS", m.cmdKeys)
+ // MIGRATE
+ m.srv.Register("MOVE", m.cmdMove)
+ // OBJECT
+ m.srv.Register("PERSIST", m.cmdPersist)
+ m.srv.Register("PEXPIRE", makeCmdExpire(m, false, time.Millisecond))
+ m.srv.Register("PEXPIREAT", makeCmdExpire(m, true, time.Millisecond))
+ m.srv.Register("PTTL", m.cmdPTTL)
+ m.srv.Register("RANDOMKEY", m.cmdRandomkey)
+ m.srv.Register("RENAME", m.cmdRename)
+ m.srv.Register("RENAMENX", m.cmdRenamenx)
+ // RESTORE
+ // SORT
+ m.srv.Register("TTL", m.cmdTTL)
+ m.srv.Register("TYPE", m.cmdType)
+ m.srv.Register("SCAN", m.cmdScan)
+}
+
+// generic expire command for EXPIRE, PEXPIRE, EXPIREAT, PEXPIREAT
+// d is the time unit. If unix is set it'll be seen as a unixtimestamp and
+// converted to a duration.
+func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, string, []string) {
+ return func(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ value := args[1]
+ i, err := strconv.Atoi(value)
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ // Key must be present.
+ if _, ok := db.keys[key]; !ok {
+ c.WriteInt(0)
+ return
+ }
+ if unix {
+ var ts time.Time
+ switch d {
+ case time.Millisecond:
+ ts = time.Unix(int64(i/1000), 1000000*int64(i%1000))
+ case time.Second:
+ ts = time.Unix(int64(i), 0)
+ default:
+ panic("invalid time unit (d). Fixme!")
+ }
+ now := m.now
+ if now.IsZero() {
+ now = time.Now().UTC()
+ }
+ db.ttl[key] = ts.Sub(now)
+ } else {
+ db.ttl[key] = time.Duration(i) * d
+ }
+ db.keyVersion[key]++
+ db.checkTTL(key)
+ c.WriteInt(1)
+ })
+ }
+}
+
+// TTL
+func (m *Miniredis) cmdTTL(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if _, ok := db.keys[key]; !ok {
+ // No such key
+ c.WriteInt(-2)
+ return
+ }
+
+ v, ok := db.ttl[key]
+ if !ok {
+ // no expire value
+ c.WriteInt(-1)
+ return
+ }
+ c.WriteInt(int(v.Seconds()))
+ })
+}
+
+// PTTL
+func (m *Miniredis) cmdPTTL(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if _, ok := db.keys[key]; !ok {
+ // no such key
+ c.WriteInt(-2)
+ return
+ }
+
+ v, ok := db.ttl[key]
+ if !ok {
+ // no expire value
+ c.WriteInt(-1)
+ return
+ }
+ c.WriteInt(int(v.Nanoseconds() / 1000000))
+ })
+}
+
+// PERSIST
+func (m *Miniredis) cmdPersist(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if _, ok := db.keys[key]; !ok {
+ // no such key
+ c.WriteInt(0)
+ return
+ }
+
+ if _, ok := db.ttl[key]; !ok {
+ // no expire value
+ c.WriteInt(0)
+ return
+ }
+ delete(db.ttl, key)
+ db.keyVersion[key]++
+ c.WriteInt(1)
+ })
+}
+
+// DEL
+func (m *Miniredis) cmdDel(c *server.Peer, cmd string, args []string) {
+ if !m.handleAuth(c) {
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ count := 0
+ for _, key := range args {
+ if db.exists(key) {
+ count++
+ }
+ db.del(key, true) // delete expire
+ }
+ c.WriteInt(count)
+ })
+}
+
+// TYPE
+func (m *Miniredis) cmdType(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError("usage error")
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ c.WriteInline("none")
+ return
+ }
+
+ c.WriteInline(t)
+ })
+}
+
+// EXISTS
+func (m *Miniredis) cmdExists(c *server.Peer, cmd string, args []string) {
+ if len(args) < 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ found := 0
+ for _, k := range args {
+ if db.exists(k) {
+ found++
+ }
+ }
+ c.WriteInt(found)
+ })
+}
+
+// MOVE
+func (m *Miniredis) cmdMove(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ targetDB, err := strconv.Atoi(args[1])
+ if err != nil {
+ targetDB = 0
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ if ctx.selectedDB == targetDB {
+ c.WriteError("ERR source and destination objects are the same")
+ return
+ }
+ db := m.db(ctx.selectedDB)
+ targetDB := m.db(targetDB)
+
+ if !db.move(key, targetDB) {
+ c.WriteInt(0)
+ return
+ }
+ c.WriteInt(1)
+ })
+}
+
+// KEYS
+func (m *Miniredis) cmdKeys(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ keys := matchKeys(db.allKeys(), key)
+ c.WriteLen(len(keys))
+ for _, s := range keys {
+ c.WriteBulk(s)
+ }
+ })
+}
+
+// RANDOMKEY
+func (m *Miniredis) cmdRandomkey(c *server.Peer, cmd string, args []string) {
+ if len(args) != 0 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if len(db.keys) == 0 {
+ c.WriteNull()
+ return
+ }
+ nr := rand.Intn(len(db.keys))
+ for k := range db.keys {
+ if nr == 0 {
+ c.WriteBulk(k)
+ return
+ }
+ nr--
+ }
+ })
+}
+
+// RENAME
+func (m *Miniredis) cmdRename(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ from, to := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(from) {
+ c.WriteError(msgKeyNotFound)
+ return
+ }
+
+ db.rename(from, to)
+ c.WriteOK()
+ })
+}
+
+// RENAMENX
+func (m *Miniredis) cmdRenamenx(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ from, to := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(from) {
+ c.WriteError(msgKeyNotFound)
+ return
+ }
+
+ if db.exists(to) {
+ c.WriteInt(0)
+ return
+ }
+
+ db.rename(from, to)
+ c.WriteInt(1)
+ })
+}
+
+// SCAN
+func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) {
+ if len(args) < 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ cursor, err := strconv.Atoi(args[0])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidCursor)
+ return
+ }
+ args = args[1:]
+
+ // MATCH and COUNT options
+ var withMatch bool
+ var match string
+ for len(args) > 0 {
+ if strings.ToLower(args[0]) == "count" {
+ // we do nothing with count
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ if _, err := strconv.Atoi(args[1]); err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ args = args[2:]
+ continue
+ }
+ if strings.ToLower(args[0]) == "match" {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ withMatch = true
+ match, args = args[1], args[2:]
+ continue
+ }
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+ // We return _all_ (matched) keys every time.
+
+ if cursor != 0 {
+ // Invalid cursor.
+ c.WriteLen(2)
+ c.WriteBulk("0") // no next cursor
+ c.WriteLen(0) // no elements
+ return
+ }
+
+ keys := db.allKeys()
+ if withMatch {
+ keys = matchKeys(keys, match)
+ }
+
+ c.WriteLen(2)
+ c.WriteBulk("0") // no next cursor
+ c.WriteLen(len(keys))
+ for _, k := range keys {
+ c.WriteBulk(k)
+ }
+ })
+}
diff --git a/vendor/github.com/alicebob/miniredis/cmd_hash.go b/vendor/github.com/alicebob/miniredis/cmd_hash.go
new file mode 100644
index 000000000..1c65ebece
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/cmd_hash.go
@@ -0,0 +1,571 @@
+// Commands from https://redis.io/commands#hash
+
+package miniredis
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+// commandsHash handles all hash value operations.
+func commandsHash(m *Miniredis) {
+ m.srv.Register("HDEL", m.cmdHdel)
+ m.srv.Register("HEXISTS", m.cmdHexists)
+ m.srv.Register("HGET", m.cmdHget)
+ m.srv.Register("HGETALL", m.cmdHgetall)
+ m.srv.Register("HINCRBY", m.cmdHincrby)
+ m.srv.Register("HINCRBYFLOAT", m.cmdHincrbyfloat)
+ m.srv.Register("HKEYS", m.cmdHkeys)
+ m.srv.Register("HLEN", m.cmdHlen)
+ m.srv.Register("HMGET", m.cmdHmget)
+ m.srv.Register("HMSET", m.cmdHmset)
+ m.srv.Register("HSET", m.cmdHset)
+ m.srv.Register("HSETNX", m.cmdHsetnx)
+ m.srv.Register("HVALS", m.cmdHvals)
+ m.srv.Register("HSCAN", m.cmdHscan)
+}
+
+// HSET
+func (m *Miniredis) cmdHset(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, field, value := args[0], args[1], args[2]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ if db.hashSet(key, field, value) {
+ c.WriteInt(0)
+ } else {
+ c.WriteInt(1)
+ }
+ })
+}
+
+// HSETNX
+func (m *Miniredis) cmdHsetnx(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, field, value := args[0], args[1], args[2]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ if _, ok := db.hashKeys[key]; !ok {
+ db.hashKeys[key] = map[string]string{}
+ db.keys[key] = "hash"
+ }
+ _, ok := db.hashKeys[key][field]
+ if ok {
+ c.WriteInt(0)
+ return
+ }
+ db.hashKeys[key][field] = value
+ db.keyVersion[key]++
+ c.WriteInt(1)
+ })
+}
+
+// HMSET
+func (m *Miniredis) cmdHmset(c *server.Peer, cmd string, args []string) {
+ if len(args) < 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, args := args[0], args[1:]
+ if len(args)%2 != 0 {
+ setDirty(c)
+ // non-default error message
+ c.WriteError("ERR wrong number of arguments for HMSET")
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ for len(args) > 0 {
+ field, value := args[0], args[1]
+ args = args[2:]
+ db.hashSet(key, field, value)
+ }
+ c.WriteOK()
+ })
+}
+
+// HGET
+func (m *Miniredis) cmdHget(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, field := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ c.WriteNull()
+ return
+ }
+ if t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ value, ok := db.hashKeys[key][field]
+ if !ok {
+ c.WriteNull()
+ return
+ }
+ c.WriteBulk(value)
+ })
+}
+
+// HDEL
+func (m *Miniredis) cmdHdel(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, fields := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ // No key is zero deleted
+ c.WriteInt(0)
+ return
+ }
+ if t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ deleted := 0
+ for _, f := range fields {
+ _, ok := db.hashKeys[key][f]
+ if !ok {
+ continue
+ }
+ delete(db.hashKeys[key], f)
+ deleted++
+ }
+ c.WriteInt(deleted)
+
+ // Nothing left. Remove the whole key.
+ if len(db.hashKeys[key]) == 0 {
+ db.del(key, true)
+ }
+ })
+}
+
+// HEXISTS
+func (m *Miniredis) cmdHexists(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, field := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ c.WriteInt(0)
+ return
+ }
+ if t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ if _, ok := db.hashKeys[key][field]; !ok {
+ c.WriteInt(0)
+ return
+ }
+ c.WriteInt(1)
+ })
+}
+
+// HGETALL
+func (m *Miniredis) cmdHgetall(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ c.WriteLen(0)
+ return
+ }
+ if t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ c.WriteLen(len(db.hashKeys[key]) * 2)
+ for _, k := range db.hashFields(key) {
+ c.WriteBulk(k)
+ c.WriteBulk(db.hashGet(key, k))
+ }
+ })
+}
+
+// HKEYS
+func (m *Miniredis) cmdHkeys(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteLen(0)
+ return
+ }
+ if db.t(key) != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ fields := db.hashFields(key)
+ c.WriteLen(len(fields))
+ for _, f := range fields {
+ c.WriteBulk(f)
+ }
+ })
+}
+
+// HVALS
+func (m *Miniredis) cmdHvals(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ c.WriteLen(0)
+ return
+ }
+ if t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ c.WriteLen(len(db.hashKeys[key]))
+ for _, v := range db.hashKeys[key] {
+ c.WriteBulk(v)
+ }
+ })
+}
+
+// HLEN
+func (m *Miniredis) cmdHlen(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ c.WriteInt(0)
+ return
+ }
+ if t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ c.WriteInt(len(db.hashKeys[key]))
+ })
+}
+
+// HMGET
+func (m *Miniredis) cmdHmget(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ f, ok := db.hashKeys[key]
+ if !ok {
+ f = map[string]string{}
+ }
+
+ c.WriteLen(len(args) - 1)
+ for _, k := range args[1:] {
+ v, ok := f[k]
+ if !ok {
+ c.WriteNull()
+ continue
+ }
+ c.WriteBulk(v)
+ }
+ })
+}
+
+// HINCRBY
+func (m *Miniredis) cmdHincrby(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, field, deltas := args[0], args[1], args[2]
+
+ delta, err := strconv.Atoi(deltas)
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ v, err := db.hashIncr(key, field, delta)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+ c.WriteInt(v)
+ })
+}
+
+// HINCRBYFLOAT
+func (m *Miniredis) cmdHincrbyfloat(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, field, deltas := args[0], args[1], args[2]
+
+ delta, err := strconv.ParseFloat(deltas, 64)
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidFloat)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "hash" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ v, err := db.hashIncrfloat(key, field, delta)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+ c.WriteBulk(formatFloat(v))
+ })
+}
+
+// HSCAN
+func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ cursor, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidCursor)
+ return
+ }
+ args = args[2:]
+
+ // MATCH and COUNT options
+ var withMatch bool
+ var match string
+ for len(args) > 0 {
+ if strings.ToLower(args[0]) == "count" {
+ // we do nothing with count
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ _, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ args = args[2:]
+ continue
+ }
+ if strings.ToLower(args[0]) == "match" {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ withMatch = true
+ match, args = args[1], args[2:]
+ continue
+ }
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+ // return _all_ (matched) keys every time
+
+ if cursor != 0 {
+ // Invalid cursor.
+ c.WriteLen(2)
+ c.WriteBulk("0") // no next cursor
+ c.WriteLen(0) // no elements
+ return
+ }
+ if db.exists(key) && db.t(key) != "hash" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.hashFields(key)
+ if withMatch {
+ members = matchKeys(members, match)
+ }
+
+ c.WriteLen(2)
+ c.WriteBulk("0") // no next cursor
+ // HSCAN gives key, values.
+ c.WriteLen(len(members) * 2)
+ for _, k := range members {
+ c.WriteBulk(k)
+ c.WriteBulk(db.hashGet(key, k))
+ }
+ })
+}
diff --git a/vendor/github.com/alicebob/miniredis/cmd_list.go b/vendor/github.com/alicebob/miniredis/cmd_list.go
new file mode 100644
index 000000000..ae543dc6c
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/cmd_list.go
@@ -0,0 +1,687 @@
+// Commands from https://redis.io/commands#list
+
+package miniredis
+
+import (
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+type leftright int
+
+const (
+ left leftright = iota
+ right
+)
+
+// commandsList handles list commands (mostly L*)
+func commandsList(m *Miniredis) {
+ m.srv.Register("BLPOP", m.cmdBlpop)
+ m.srv.Register("BRPOP", m.cmdBrpop)
+ m.srv.Register("BRPOPLPUSH", m.cmdBrpoplpush)
+ m.srv.Register("LINDEX", m.cmdLindex)
+ m.srv.Register("LINSERT", m.cmdLinsert)
+ m.srv.Register("LLEN", m.cmdLlen)
+ m.srv.Register("LPOP", m.cmdLpop)
+ m.srv.Register("LPUSH", m.cmdLpush)
+ m.srv.Register("LPUSHX", m.cmdLpushx)
+ m.srv.Register("LRANGE", m.cmdLrange)
+ m.srv.Register("LREM", m.cmdLrem)
+ m.srv.Register("LSET", m.cmdLset)
+ m.srv.Register("LTRIM", m.cmdLtrim)
+ m.srv.Register("RPOP", m.cmdRpop)
+ m.srv.Register("RPOPLPUSH", m.cmdRpoplpush)
+ m.srv.Register("RPUSH", m.cmdRpush)
+ m.srv.Register("RPUSHX", m.cmdRpushx)
+}
+
+// BLPOP
+func (m *Miniredis) cmdBlpop(c *server.Peer, cmd string, args []string) {
+ m.cmdBXpop(c, cmd, args, left)
+}
+
+// BRPOP
+func (m *Miniredis) cmdBrpop(c *server.Peer, cmd string, args []string) {
+ m.cmdBXpop(c, cmd, args, right)
+}
+
+func (m *Miniredis) cmdBXpop(c *server.Peer, cmd string, args []string, lr leftright) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+ timeoutS := args[len(args)-1]
+ keys := args[:len(args)-1]
+
+ timeout, err := strconv.Atoi(timeoutS)
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidTimeout)
+ return
+ }
+ if timeout < 0 {
+ setDirty(c)
+ c.WriteError(msgNegTimeout)
+ return
+ }
+
+ blocking(
+ m,
+ c,
+ time.Duration(timeout)*time.Second,
+ func(c *server.Peer, ctx *connCtx) bool {
+ db := m.db(ctx.selectedDB)
+ for _, key := range keys {
+ if !db.exists(key) {
+ continue
+ }
+ if db.t(key) != "list" {
+ c.WriteError(msgWrongType)
+ return true
+ }
+
+ if len(db.listKeys[key]) == 0 {
+ continue
+ }
+ c.WriteLen(2)
+ c.WriteBulk(key)
+ var v string
+ switch lr {
+ case left:
+ v = db.listLpop(key)
+ case right:
+ v = db.listPop(key)
+ }
+ c.WriteBulk(v)
+ return true
+ }
+ return false
+ },
+ func(c *server.Peer) {
+ // timeout
+ c.WriteNull()
+ },
+ )
+}
+
+// LINDEX
+func (m *Miniredis) cmdLindex(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, offsets := args[0], args[1]
+
+ offset, err := strconv.Atoi(offsets)
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ // No such key
+ c.WriteNull()
+ return
+ }
+ if t != "list" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ l := db.listKeys[key]
+ if offset < 0 {
+ offset = len(l) + offset
+ }
+ if offset < 0 || offset > len(l)-1 {
+ c.WriteNull()
+ return
+ }
+ c.WriteBulk(l[offset])
+ })
+}
+
+// LINSERT
+func (m *Miniredis) cmdLinsert(c *server.Peer, cmd string, args []string) {
+ if len(args) != 4 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ where := 0
+ switch strings.ToLower(args[1]) {
+ case "before":
+ where = -1
+ case "after":
+ where = +1
+ default:
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ pivot := args[2]
+ value := args[3]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ // No such key
+ c.WriteInt(0)
+ return
+ }
+ if t != "list" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ l := db.listKeys[key]
+ for i, el := range l {
+ if el != pivot {
+ continue
+ }
+
+ if where < 0 {
+ l = append(l[:i], append(listKey{value}, l[i:]...)...)
+ } else {
+ if i == len(l)-1 {
+ l = append(l, value)
+ } else {
+ l = append(l[:i+1], append(listKey{value}, l[i+1:]...)...)
+ }
+ }
+ db.listKeys[key] = l
+ db.keyVersion[key]++
+ c.WriteInt(len(l))
+ return
+ }
+ c.WriteInt(-1)
+ })
+}
+
+// LLEN
+func (m *Miniredis) cmdLlen(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ // No such key. That's zero length.
+ c.WriteInt(0)
+ return
+ }
+ if t != "list" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ c.WriteInt(len(db.listKeys[key]))
+ })
+}
+
+// LPOP
+func (m *Miniredis) cmdLpop(c *server.Peer, cmd string, args []string) {
+ m.cmdXpop(c, cmd, args, left)
+}
+
+// RPOP
+func (m *Miniredis) cmdRpop(c *server.Peer, cmd string, args []string) {
+ m.cmdXpop(c, cmd, args, right)
+}
+
+func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftright) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ // non-existing key is fine
+ c.WriteNull()
+ return
+ }
+ if db.t(key) != "list" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ var elem string
+ switch lr {
+ case left:
+ elem = db.listLpop(key)
+ case right:
+ elem = db.listPop(key)
+ }
+ c.WriteBulk(elem)
+ })
+}
+
+// LPUSH
+func (m *Miniredis) cmdLpush(c *server.Peer, cmd string, args []string) {
+ m.cmdXpush(c, cmd, args, left)
+}
+
+// RPUSH
+func (m *Miniredis) cmdRpush(c *server.Peer, cmd string, args []string) {
+ m.cmdXpush(c, cmd, args, right)
+}
+
+func (m *Miniredis) cmdXpush(c *server.Peer, cmd string, args []string, lr leftright) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, args := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if db.exists(key) && db.t(key) != "list" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ var newLen int
+ for _, value := range args {
+ switch lr {
+ case left:
+ newLen = db.listLpush(key, value)
+ case right:
+ newLen = db.listPush(key, value)
+ }
+ }
+ c.WriteInt(newLen)
+ })
+}
+
+// LPUSHX
+func (m *Miniredis) cmdLpushx(c *server.Peer, cmd string, args []string) {
+ m.cmdXpushx(c, cmd, args, left)
+}
+
+// RPUSHX
+func (m *Miniredis) cmdRpushx(c *server.Peer, cmd string, args []string) {
+ m.cmdXpushx(c, cmd, args, right)
+}
+
+func (m *Miniredis) cmdXpushx(c *server.Peer, cmd string, args []string, lr leftright) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, args := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+ if db.t(key) != "list" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ var newLen int
+ for _, value := range args {
+ switch lr {
+ case left:
+ newLen = db.listLpush(key, value)
+ case right:
+ newLen = db.listPush(key, value)
+ }
+ }
+ c.WriteInt(newLen)
+ })
+}
+
+// LRANGE
+func (m *Miniredis) cmdLrange(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ start, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ end, err := strconv.Atoi(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "list" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ l := db.listKeys[key]
+ if len(l) == 0 {
+ c.WriteLen(0)
+ return
+ }
+
+ rs, re := redisRange(len(l), start, end, false)
+ c.WriteLen(re - rs)
+ for _, el := range l[rs:re] {
+ c.WriteBulk(el)
+ }
+ })
+}
+
+// LREM
+func (m *Miniredis) cmdLrem(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ count, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ value := args[2]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+ if db.t(key) != "list" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ l := db.listKeys[key]
+ if count < 0 {
+ reverseSlice(l)
+ }
+ deleted := 0
+ newL := []string{}
+ toDelete := len(l)
+ if count < 0 {
+ toDelete = -count
+ }
+ if count > 0 {
+ toDelete = count
+ }
+ for _, el := range l {
+ if el == value {
+ if toDelete > 0 {
+ deleted++
+ toDelete--
+ continue
+ }
+ }
+ newL = append(newL, el)
+ }
+ if count < 0 {
+ reverseSlice(newL)
+ }
+ if len(newL) == 0 {
+ db.del(key, true)
+ } else {
+ db.listKeys[key] = newL
+ db.keyVersion[key]++
+ }
+
+ c.WriteInt(deleted)
+ })
+}
+
+// LSET
+func (m *Miniredis) cmdLset(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ index, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ value := args[2]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteError(msgKeyNotFound)
+ return
+ }
+ if db.t(key) != "list" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ l := db.listKeys[key]
+ if index < 0 {
+ index = len(l) + index
+ }
+ if index < 0 || index > len(l)-1 {
+ c.WriteError(msgOutOfRange)
+ return
+ }
+ l[index] = value
+ db.keyVersion[key]++
+
+ c.WriteOK()
+ })
+}
+
+// LTRIM
+func (m *Miniredis) cmdLtrim(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ start, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ end, err := strconv.Atoi(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ t, ok := db.keys[key]
+ if !ok {
+ c.WriteOK()
+ return
+ }
+ if t != "list" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ l := db.listKeys[key]
+ rs, re := redisRange(len(l), start, end, false)
+ l = l[rs:re]
+ if len(l) == 0 {
+ db.del(key, true)
+ } else {
+ db.listKeys[key] = l
+ db.keyVersion[key]++
+ }
+ c.WriteOK()
+ })
+}
+
+// RPOPLPUSH
+func (m *Miniredis) cmdRpoplpush(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ src, dst := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(src) {
+ c.WriteNull()
+ return
+ }
+ if db.t(src) != "list" || (db.exists(dst) && db.t(dst) != "list") {
+ c.WriteError(msgWrongType)
+ return
+ }
+ elem := db.listPop(src)
+ db.listLpush(dst, elem)
+ c.WriteBulk(elem)
+ })
+}
+
+// BRPOPLPUSH
+func (m *Miniredis) cmdBrpoplpush(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ src := args[0]
+ dst := args[1]
+ timeout, err := strconv.Atoi(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidTimeout)
+ return
+ }
+ if timeout < 0 {
+ setDirty(c)
+ c.WriteError(msgNegTimeout)
+ return
+ }
+
+ blocking(
+ m,
+ c,
+ time.Duration(timeout)*time.Second,
+ func(c *server.Peer, ctx *connCtx) bool {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(src) {
+ return false
+ }
+ if db.t(src) != "list" || (db.exists(dst) && db.t(dst) != "list") {
+ c.WriteError(msgWrongType)
+ return true
+ }
+ if len(db.listKeys[src]) == 0 {
+ return false
+ }
+ elem := db.listPop(src)
+ db.listLpush(dst, elem)
+ c.WriteBulk(elem)
+ return true
+ },
+ func(c *server.Peer) {
+ // timeout
+ c.WriteNull()
+ },
+ )
+}
diff --git a/vendor/github.com/alicebob/miniredis/cmd_scripting.go b/vendor/github.com/alicebob/miniredis/cmd_scripting.go
new file mode 100644
index 000000000..296e61b9c
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/cmd_scripting.go
@@ -0,0 +1,220 @@
+package miniredis
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ luajson "github.com/alicebob/gopher-json"
+ "github.com/yuin/gopher-lua"
+ "github.com/yuin/gopher-lua/parse"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+func commandsScripting(m *Miniredis) {
+ m.srv.Register("EVAL", m.cmdEval)
+ m.srv.Register("EVALSHA", m.cmdEvalsha)
+ m.srv.Register("SCRIPT", m.cmdScript)
+}
+
+// Execute lua. Needs to run m.Lock()ed, from within withTx().
+func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) {
+ l := lua.NewState(lua.Options{SkipOpenLibs: true})
+ defer l.Close()
+
+ // Taken from the go-lua manual
+ for _, pair := range []struct {
+ n string
+ f lua.LGFunction
+ }{
+ {lua.LoadLibName, lua.OpenPackage},
+ {lua.BaseLibName, lua.OpenBase},
+ {lua.CoroutineLibName, lua.OpenCoroutine},
+ {lua.TabLibName, lua.OpenTable},
+ {lua.StringLibName, lua.OpenString},
+ {lua.MathLibName, lua.OpenMath},
+ } {
+ if err := l.CallByParam(lua.P{
+ Fn: l.NewFunction(pair.f),
+ NRet: 0,
+ Protect: true,
+ }, lua.LString(pair.n)); err != nil {
+ panic(err)
+ }
+ }
+
+ luajson.Preload(l)
+ requireGlobal(l, "cjson", "json")
+
+ m.Unlock()
+ conn := m.redigo()
+ m.Lock()
+ defer conn.Close()
+
+ // set global variable KEYS
+ keysTable := l.NewTable()
+ keysS, args := args[0], args[1:]
+ keysLen, err := strconv.Atoi(keysS)
+ if err != nil {
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ if keysLen < 0 {
+ c.WriteError(msgNegativeKeysNumber)
+ return
+ }
+ if keysLen > len(args) {
+ c.WriteError(msgInvalidKeysNumber)
+ return
+ }
+ keys, args := args[:keysLen], args[keysLen:]
+ for i, k := range keys {
+ l.RawSet(keysTable, lua.LNumber(i+1), lua.LString(k))
+ }
+ l.SetGlobal("KEYS", keysTable)
+
+ argvTable := l.NewTable()
+ for i, a := range args {
+ l.RawSet(argvTable, lua.LNumber(i+1), lua.LString(a))
+ }
+ l.SetGlobal("ARGV", argvTable)
+
+ redisFuncs := mkLuaFuncs(conn)
+ // Register command handlers
+ l.Push(l.NewFunction(func(l *lua.LState) int {
+ mod := l.RegisterModule("redis", redisFuncs).(*lua.LTable)
+ l.Push(mod)
+ return 1
+ }))
+
+ l.Push(lua.LString("redis"))
+ l.Call(1, 0)
+
+ m.Unlock() // This runs in a transaction, but can access our db recursively
+ defer m.Lock()
+ if err := l.DoString(script); err != nil {
+ c.WriteError(errLuaParseError(err))
+ return
+ }
+
+ luaToRedis(l, c, l.Get(1))
+}
+
+func (m *Miniredis) cmdEval(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+ script, args := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ m.runLuaScript(c, script, args)
+ })
+}
+
+func (m *Miniredis) cmdEvalsha(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ sha, args := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ script, ok := m.scripts[sha]
+ if !ok {
+ c.WriteError(msgNoScriptFound)
+ return
+ }
+
+ m.runLuaScript(c, script, args)
+ })
+}
+
+func (m *Miniredis) cmdScript(c *server.Peer, cmd string, args []string) {
+ if len(args) < 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ subcmd, args := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ switch strings.ToLower(subcmd) {
+ case "load":
+ if len(args) != 1 {
+ c.WriteError(fmt.Sprintf(msgFScriptUsage, "LOAD"))
+ return
+ }
+ script := args[0]
+
+ if _, err := parse.Parse(strings.NewReader(script), "user_script"); err != nil {
+ c.WriteError(errLuaParseError(err))
+ return
+ }
+ sha := sha1Hex(script)
+ m.scripts[sha] = script
+ c.WriteBulk(sha)
+
+ case "exists":
+ c.WriteLen(len(args))
+ for _, arg := range args {
+ if _, ok := m.scripts[arg]; ok {
+ c.WriteInt(1)
+ } else {
+ c.WriteInt(0)
+ }
+ }
+
+ case "flush":
+ if len(args) != 0 {
+ c.WriteError(fmt.Sprintf(msgFScriptUsage, "FLUSH"))
+ return
+ }
+
+ m.scripts = map[string]string{}
+ c.WriteOK()
+
+ default:
+ c.WriteError(fmt.Sprintf(msgFScriptUsage, strings.ToUpper(subcmd)))
+ }
+ })
+}
+
+func sha1Hex(s string) string {
+ h := sha1.New()
+ io.WriteString(h, s)
+ return hex.EncodeToString(h.Sum(nil))
+}
+
+// requireGlobal imports module modName into the global namespace with the
+// identifier id. panics if an error results from the function execution
+func requireGlobal(l *lua.LState, id, modName string) {
+ if err := l.CallByParam(lua.P{
+ Fn: l.GetGlobal("require"),
+ NRet: 1,
+ Protect: true,
+ }, lua.LString(modName)); err != nil {
+ panic(err)
+ }
+ mod := l.Get(-1)
+ l.Pop(1)
+
+ l.SetGlobal(id, mod)
+}
diff --git a/vendor/github.com/alicebob/miniredis/cmd_server.go b/vendor/github.com/alicebob/miniredis/cmd_server.go
new file mode 100644
index 000000000..c021644cd
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/cmd_server.go
@@ -0,0 +1,104 @@
+// Commands from https://redis.io/commands#server
+
+package miniredis
+
+import (
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+func commandsServer(m *Miniredis) {
+ m.srv.Register("DBSIZE", m.cmdDbsize)
+ m.srv.Register("FLUSHALL", m.cmdFlushall)
+ m.srv.Register("FLUSHDB", m.cmdFlushdb)
+ m.srv.Register("TIME", m.cmdTime)
+}
+
+// DBSIZE
+func (m *Miniredis) cmdDbsize(c *server.Peer, cmd string, args []string) {
+ if len(args) > 0 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ c.WriteInt(len(db.keys))
+ })
+}
+
+// FLUSHALL
+func (m *Miniredis) cmdFlushall(c *server.Peer, cmd string, args []string) {
+ if len(args) > 0 && strings.ToLower(args[0]) == "async" {
+ args = args[1:]
+ }
+ if len(args) > 0 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+
+ if !m.handleAuth(c) {
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ m.flushAll()
+ c.WriteOK()
+ })
+}
+
+// FLUSHDB
+func (m *Miniredis) cmdFlushdb(c *server.Peer, cmd string, args []string) {
+ if len(args) > 0 && strings.ToLower(args[0]) == "async" {
+ args = args[1:]
+ }
+ if len(args) > 0 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+
+ if !m.handleAuth(c) {
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ m.db(ctx.selectedDB).flush()
+ c.WriteOK()
+ })
+}
+
+// TIME: time values are returned in string format instead of int
+func (m *Miniredis) cmdTime(c *server.Peer, cmd string, args []string) {
+ if len(args) > 0 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ now := m.now
+ if now.IsZero() {
+ now = time.Now()
+ }
+ nanos := now.UnixNano()
+ seconds := nanos / 1000000000
+ microseconds := (nanos / 1000) % 1000000
+
+ c.WriteLen(2)
+ c.WriteBulk(strconv.FormatInt(seconds, 10))
+ c.WriteBulk(strconv.FormatInt(microseconds, 10))
+ })
+}
diff --git a/vendor/github.com/alicebob/miniredis/cmd_set.go b/vendor/github.com/alicebob/miniredis/cmd_set.go
new file mode 100644
index 000000000..2220cf55d
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/cmd_set.go
@@ -0,0 +1,639 @@
+// Commands from https://redis.io/commands#set
+
+package miniredis
+
+import (
+ "math/rand"
+ "strconv"
+ "strings"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+// commandsSet handles all set value operations.
+func commandsSet(m *Miniredis) {
+ m.srv.Register("SADD", m.cmdSadd)
+ m.srv.Register("SCARD", m.cmdScard)
+ m.srv.Register("SDIFF", m.cmdSdiff)
+ m.srv.Register("SDIFFSTORE", m.cmdSdiffstore)
+ m.srv.Register("SINTER", m.cmdSinter)
+ m.srv.Register("SINTERSTORE", m.cmdSinterstore)
+ m.srv.Register("SISMEMBER", m.cmdSismember)
+ m.srv.Register("SMEMBERS", m.cmdSmembers)
+ m.srv.Register("SMOVE", m.cmdSmove)
+ m.srv.Register("SPOP", m.cmdSpop)
+ m.srv.Register("SRANDMEMBER", m.cmdSrandmember)
+ m.srv.Register("SREM", m.cmdSrem)
+ m.srv.Register("SUNION", m.cmdSunion)
+ m.srv.Register("SUNIONSTORE", m.cmdSunionstore)
+ m.srv.Register("SSCAN", m.cmdSscan)
+}
+
+// SADD
+func (m *Miniredis) cmdSadd(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, elems := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if db.exists(key) && db.t(key) != "set" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ added := db.setAdd(key, elems...)
+ c.WriteInt(added)
+ })
+}
+
+// SCARD
+func (m *Miniredis) cmdScard(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(key) != "set" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.setMembers(key)
+ c.WriteInt(len(members))
+ })
+}
+
+// SDIFF
+func (m *Miniredis) cmdSdiff(c *server.Peer, cmd string, args []string) {
+ if len(args) < 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ keys := args
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ set, err := db.setDiff(keys)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+
+ c.WriteLen(len(set))
+ for k := range set {
+ c.WriteBulk(k)
+ }
+ })
+}
+
+// SDIFFSTORE
+func (m *Miniredis) cmdSdiffstore(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ dest, keys := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ set, err := db.setDiff(keys)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+
+ db.del(dest, true)
+ db.setSet(dest, set)
+ c.WriteInt(len(set))
+ })
+}
+
+// SINTER
+func (m *Miniredis) cmdSinter(c *server.Peer, cmd string, args []string) {
+ if len(args) < 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ keys := args
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ set, err := db.setInter(keys)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+
+ c.WriteLen(len(set))
+ for k := range set {
+ c.WriteBulk(k)
+ }
+ })
+}
+
+// SINTERSTORE
+func (m *Miniredis) cmdSinterstore(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ dest, keys := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ set, err := db.setInter(keys)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+
+ db.del(dest, true)
+ db.setSet(dest, set)
+ c.WriteInt(len(set))
+ })
+}
+
+// SISMEMBER
+func (m *Miniredis) cmdSismember(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, value := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(key) != "set" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ if db.setIsMember(key, value) {
+ c.WriteInt(1)
+ return
+ }
+ c.WriteInt(0)
+ })
+}
+
+// SMEMBERS
+func (m *Miniredis) cmdSmembers(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteLen(0)
+ return
+ }
+
+ if db.t(key) != "set" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.setMembers(key)
+
+ c.WriteLen(len(members))
+ for _, elem := range members {
+ c.WriteBulk(elem)
+ }
+ })
+}
+
+// SMOVE
+func (m *Miniredis) cmdSmove(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ src, dst, member := args[0], args[1], args[2]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(src) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(src) != "set" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ if db.exists(dst) && db.t(dst) != "set" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ if !db.setIsMember(src, member) {
+ c.WriteInt(0)
+ return
+ }
+ db.setRem(src, member)
+ db.setAdd(dst, member)
+ c.WriteInt(1)
+ })
+}
+
+// SPOP
+func (m *Miniredis) cmdSpop(c *server.Peer, cmd string, args []string) {
+ if len(args) == 0 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, args := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ withCount := false
+ count := 1
+ if len(args) > 0 {
+ v, err := strconv.Atoi(args[0])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ count = v
+ withCount = true
+ args = args[1:]
+ }
+ if len(args) > 0 {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ if !db.exists(key) {
+ if !withCount {
+ c.WriteNull()
+ return
+ }
+ c.WriteLen(0)
+ return
+ }
+
+ if db.t(key) != "set" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ var deleted []string
+ for i := 0; i < count; i++ {
+ members := db.setMembers(key)
+ if len(members) == 0 {
+ break
+ }
+ member := members[rand.Intn(len(members))]
+ db.setRem(key, member)
+ deleted = append(deleted, member)
+ }
+ // without `count` return a single value...
+ if !withCount {
+ if len(deleted) == 0 {
+ c.WriteNull()
+ return
+ }
+ c.WriteBulk(deleted[0])
+ return
+ }
+ // ... with `count` return a list
+ c.WriteLen(len(deleted))
+ for _, v := range deleted {
+ c.WriteBulk(v)
+ }
+ })
+}
+
+// SRANDMEMBER
+func (m *Miniredis) cmdSrandmember(c *server.Peer, cmd string, args []string) {
+ if len(args) < 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if len(args) > 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ count := 0
+ withCount := false
+ if len(args) == 2 {
+ var err error
+ count, err = strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ withCount = true
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteNull()
+ return
+ }
+
+ if db.t(key) != "set" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.setMembers(key)
+ if count < 0 {
+ // Non-unique elements is allowed with negative count.
+ c.WriteLen(-count)
+ for count != 0 {
+ member := members[rand.Intn(len(members))]
+ c.WriteBulk(member)
+ count++
+ }
+ return
+ }
+
+ // Must be unique elements.
+ shuffle(members)
+ if count > len(members) {
+ count = len(members)
+ }
+ if !withCount {
+ c.WriteBulk(members[0])
+ return
+ }
+ c.WriteLen(count)
+ for i := range make([]struct{}, count) {
+ c.WriteBulk(members[i])
+ }
+ })
+}
+
+// SREM
+func (m *Miniredis) cmdSrem(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, fields := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(key) != "set" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ c.WriteInt(db.setRem(key, fields...))
+ })
+}
+
+// SUNION
+func (m *Miniredis) cmdSunion(c *server.Peer, cmd string, args []string) {
+ if len(args) < 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ keys := args
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ set, err := db.setUnion(keys)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+
+ c.WriteLen(len(set))
+ for k := range set {
+ c.WriteBulk(k)
+ }
+ })
+}
+
+// SUNIONSTORE
+func (m *Miniredis) cmdSunionstore(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ dest, keys := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ set, err := db.setUnion(keys)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+
+ db.del(dest, true)
+ db.setSet(dest, set)
+ c.WriteInt(len(set))
+ })
+}
+
+// SSCAN
+func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ cursor, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidCursor)
+ return
+ }
+ args = args[2:]
+ // MATCH and COUNT options
+ var withMatch bool
+ var match string
+ for len(args) > 0 {
+ if strings.ToLower(args[0]) == "count" {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ _, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ // We do nothing with count.
+ args = args[2:]
+ continue
+ }
+ if strings.ToLower(args[0]) == "match" {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ withMatch = true
+ match = args[1]
+ args = args[2:]
+ continue
+ }
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+ // return _all_ (matched) keys every time
+
+ if cursor != 0 {
+ // invalid cursor
+ c.WriteLen(2)
+ c.WriteBulk("0") // no next cursor
+ c.WriteLen(0) // no elements
+ return
+ }
+ if db.exists(key) && db.t(key) != "set" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.setMembers(key)
+ if withMatch {
+ members = matchKeys(members, match)
+ }
+
+ c.WriteLen(2)
+ c.WriteBulk("0") // no next cursor
+ c.WriteLen(len(members))
+ for _, k := range members {
+ c.WriteBulk(k)
+ }
+ })
+}
+
+// shuffle shuffles a string. Kinda.
+func shuffle(m []string) {
+ for _ = range m {
+ i := rand.Intn(len(m))
+ j := rand.Intn(len(m))
+ m[i], m[j] = m[j], m[i]
+ }
+}
diff --git a/vendor/github.com/alicebob/miniredis/cmd_sorted_set.go b/vendor/github.com/alicebob/miniredis/cmd_sorted_set.go
new file mode 100644
index 000000000..5252b015f
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/cmd_sorted_set.go
@@ -0,0 +1,1399 @@
+// Commands from https://redis.io/commands#sorted_set
+
+package miniredis
+
+import (
+ "errors"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+var (
+ errInvalidRangeItem = errors.New(msgInvalidRangeItem)
+)
+
+// commandsSortedSet handles all sorted set operations.
+func commandsSortedSet(m *Miniredis) {
+ m.srv.Register("ZADD", m.cmdZadd)
+ m.srv.Register("ZCARD", m.cmdZcard)
+ m.srv.Register("ZCOUNT", m.cmdZcount)
+ m.srv.Register("ZINCRBY", m.cmdZincrby)
+ m.srv.Register("ZINTERSTORE", m.cmdZinterstore)
+ m.srv.Register("ZLEXCOUNT", m.cmdZlexcount)
+ m.srv.Register("ZRANGE", m.makeCmdZrange(false))
+ m.srv.Register("ZRANGEBYLEX", m.makeCmdZrangebylex(false))
+ m.srv.Register("ZRANGEBYSCORE", m.makeCmdZrangebyscore(false))
+ m.srv.Register("ZRANK", m.makeCmdZrank(false))
+ m.srv.Register("ZREM", m.cmdZrem)
+ m.srv.Register("ZREMRANGEBYLEX", m.cmdZremrangebylex)
+ m.srv.Register("ZREMRANGEBYRANK", m.cmdZremrangebyrank)
+ m.srv.Register("ZREMRANGEBYSCORE", m.cmdZremrangebyscore)
+ m.srv.Register("ZREVRANGE", m.makeCmdZrange(true))
+ m.srv.Register("ZREVRANGEBYLEX", m.makeCmdZrangebylex(true))
+ m.srv.Register("ZREVRANGEBYSCORE", m.makeCmdZrangebyscore(true))
+ m.srv.Register("ZREVRANK", m.makeCmdZrank(true))
+ m.srv.Register("ZSCORE", m.cmdZscore)
+ m.srv.Register("ZUNIONSTORE", m.cmdZunionstore)
+ m.srv.Register("ZSCAN", m.cmdZscan)
+ m.srv.Register("ZPOPMAX", m.cmdZpopmax(true))
+ m.srv.Register("ZPOPMIN", m.cmdZpopmax(false))
+}
+
+// ZADD
+func (m *Miniredis) cmdZadd(c *server.Peer, cmd string, args []string) {
+ if len(args) < 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, args := args[0], args[1:]
+ var (
+ nx = false
+ xx = false
+ ch = false
+ incr = false
+ elems = map[string]float64{}
+ )
+
+outer:
+ for len(args) > 0 {
+ switch strings.ToUpper(args[0]) {
+ case "NX":
+ nx = true
+ args = args[1:]
+ continue
+ case "XX":
+ xx = true
+ args = args[1:]
+ continue
+ case "CH":
+ ch = true
+ args = args[1:]
+ continue
+ case "INCR":
+ incr = true
+ args = args[1:]
+ continue
+ default:
+ break outer
+ }
+ }
+
+ if len(args) == 0 || len(args)%2 != 0 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ for len(args) > 0 {
+ score, err := strconv.ParseFloat(args[0], 64)
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidFloat)
+ return
+ }
+ elems[args[1]] = score
+ args = args[2:]
+ }
+
+ if xx && nx {
+ setDirty(c)
+ c.WriteError(msgXXandNX)
+ return
+ }
+
+ if incr && len(elems) > 1 {
+ setDirty(c)
+ c.WriteError(msgSingleElementPair)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if db.exists(key) && db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ if incr {
+ for member, delta := range elems {
+ if nx && db.ssetExists(key, member) {
+ c.WriteNull()
+ return
+ }
+ if xx && !db.ssetExists(key, member) {
+ c.WriteNull()
+ return
+ }
+ newScore := db.ssetIncrby(key, member, delta)
+ c.WriteBulk(formatFloat(newScore))
+ }
+ return
+ }
+
+ res := 0
+ for member, score := range elems {
+ if nx && db.ssetExists(key, member) {
+ continue
+ }
+ if xx && !db.ssetExists(key, member) {
+ continue
+ }
+ old := db.ssetScore(key, member)
+ if db.ssetAdd(key, score, member) {
+ res++
+ } else {
+ if ch && old != score {
+ // if 'CH' is specified, only count changed keys
+ res++
+ }
+ }
+ }
+ c.WriteInt(res)
+ })
+}
+
+// ZCARD
+func (m *Miniredis) cmdZcard(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ c.WriteInt(db.ssetCard(key))
+ })
+}
+
+// ZCOUNT
+func (m *Miniredis) cmdZcount(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ min, minIncl, err := parseFloatRange(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidMinMax)
+ return
+ }
+ max, maxIncl, err := parseFloatRange(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidMinMax)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.ssetElements(key)
+ members = withSSRange(members, min, minIncl, max, maxIncl)
+ c.WriteInt(len(members))
+ })
+}
+
+// ZINCRBY
+func (m *Miniredis) cmdZincrby(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ delta, err := strconv.ParseFloat(args[1], 64)
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidFloat)
+ return
+ }
+ member := args[2]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if db.exists(key) && db.t(key) != "zset" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ newScore := db.ssetIncrby(key, member, delta)
+ c.WriteBulk(formatFloat(newScore))
+ })
+}
+
+// ZINTERSTORE
+func (m *Miniredis) cmdZinterstore(c *server.Peer, cmd string, args []string) {
+ if len(args) < 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ destination := args[0]
+ numKeys, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ args = args[2:]
+ if len(args) < numKeys {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ if numKeys <= 0 {
+ setDirty(c)
+ c.WriteError("ERR at least 1 input key is needed for ZUNIONSTORE/ZINTERSTORE")
+ return
+ }
+ keys := args[:numKeys]
+ args = args[numKeys:]
+
+ withWeights := false
+ weights := []float64{}
+ aggregate := "sum"
+ for len(args) > 0 {
+ switch strings.ToLower(args[0]) {
+ case "weights":
+ if len(args) < numKeys+1 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ for i := 0; i < numKeys; i++ {
+ f, err := strconv.ParseFloat(args[i+1], 64)
+ if err != nil {
+ setDirty(c)
+ c.WriteError("ERR weight value is not a float")
+ return
+ }
+ weights = append(weights, f)
+ }
+ withWeights = true
+ args = args[numKeys+1:]
+ case "aggregate":
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ aggregate = strings.ToLower(args[1])
+ switch aggregate {
+ case "sum", "min", "max":
+ default:
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ args = args[2:]
+ default:
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+ db.del(destination, true)
+
+ // We collect everything and remove all keys which turned out not to be
+ // present in every set.
+ sset := map[string]float64{}
+ counts := map[string]int{}
+ for i, key := range keys {
+ if !db.exists(key) {
+ continue
+ }
+ if db.t(key) != "zset" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ for _, el := range db.ssetElements(key) {
+ score := el.score
+ if withWeights {
+ score *= weights[i]
+ }
+ counts[el.member]++
+ old, ok := sset[el.member]
+ if !ok {
+ sset[el.member] = score
+ continue
+ }
+ switch aggregate {
+ default:
+ panic("Invalid aggregate")
+ case "sum":
+ sset[el.member] += score
+ case "min":
+ if score < old {
+ sset[el.member] = score
+ }
+ case "max":
+ if score > old {
+ sset[el.member] = score
+ }
+ }
+ }
+ }
+ for key, count := range counts {
+ if count != numKeys {
+ delete(sset, key)
+ }
+ }
+ db.ssetSet(destination, sset)
+ c.WriteInt(len(sset))
+ })
+}
+
+// ZLEXCOUNT
+func (m *Miniredis) cmdZlexcount(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ min, minIncl, err := parseLexrange(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(err.Error())
+ return
+ }
+ max, maxIncl, err := parseLexrange(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(err.Error())
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.ssetMembers(key)
+ // Just key sort. If scores are not the same we don't care.
+ sort.Strings(members)
+ members = withLexRange(members, min, minIncl, max, maxIncl)
+
+ c.WriteInt(len(members))
+ })
+}
+
+// ZRANGE and ZREVRANGE
+func (m *Miniredis) makeCmdZrange(reverse bool) server.Cmd {
+ return func(c *server.Peer, cmd string, args []string) {
+ if len(args) < 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ start, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ end, err := strconv.Atoi(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ withScores := false
+ if len(args) > 4 {
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ if len(args) == 4 {
+ if strings.ToLower(args[3]) != "withscores" {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ withScores = true
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteLen(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.ssetMembers(key)
+ if reverse {
+ reverseSlice(members)
+ }
+ rs, re := redisRange(len(members), start, end, false)
+ if withScores {
+ c.WriteLen((re - rs) * 2)
+ } else {
+ c.WriteLen(re - rs)
+ }
+ for _, el := range members[rs:re] {
+ c.WriteBulk(el)
+ if withScores {
+ c.WriteBulk(formatFloat(db.ssetScore(key, el)))
+ }
+ }
+ })
+ }
+}
+
+// ZRANGEBYLEX and ZREVRANGEBYLEX
+func (m *Miniredis) makeCmdZrangebylex(reverse bool) server.Cmd {
+ return func(c *server.Peer, cmd string, args []string) {
+ if len(args) < 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ min, minIncl, err := parseLexrange(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(err.Error())
+ return
+ }
+ max, maxIncl, err := parseLexrange(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(err.Error())
+ return
+ }
+ args = args[3:]
+
+ withLimit := false
+ limitStart := 0
+ limitEnd := 0
+ for len(args) > 0 {
+ if strings.ToLower(args[0]) == "limit" {
+ withLimit = true
+ args = args[1:]
+ if len(args) < 2 {
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ limitStart, err = strconv.Atoi(args[0])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ limitEnd, err = strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ args = args[2:]
+ continue
+ }
+ // Syntax error
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteLen(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.ssetMembers(key)
+ // Just key sort. If scores are not the same we don't care.
+ sort.Strings(members)
+ if reverse {
+ min, max = max, min
+ minIncl, maxIncl = maxIncl, minIncl
+ }
+ members = withLexRange(members, min, minIncl, max, maxIncl)
+ if reverse {
+ reverseSlice(members)
+ }
+
+ // Apply LIMIT ranges. That's . Unlike RANGE.
+ if withLimit {
+ if limitStart < 0 {
+ members = nil
+ } else {
+ if limitStart < len(members) {
+ members = members[limitStart:]
+ } else {
+ // out of range
+ members = nil
+ }
+ if limitEnd >= 0 {
+ if len(members) > limitEnd {
+ members = members[:limitEnd]
+ }
+ }
+ }
+ }
+
+ c.WriteLen(len(members))
+ for _, el := range members {
+ c.WriteBulk(el)
+ }
+ })
+ }
+}
+
+// ZRANGEBYSCORE and ZREVRANGEBYSCORE
+func (m *Miniredis) makeCmdZrangebyscore(reverse bool) server.Cmd {
+ return func(c *server.Peer, cmd string, args []string) {
+ if len(args) < 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ min, minIncl, err := parseFloatRange(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidMinMax)
+ return
+ }
+ max, maxIncl, err := parseFloatRange(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidMinMax)
+ return
+ }
+ args = args[3:]
+
+ withScores := false
+ withLimit := false
+ limitStart := 0
+ limitEnd := 0
+ for len(args) > 0 {
+ if strings.ToLower(args[0]) == "limit" {
+ withLimit = true
+ args = args[1:]
+ if len(args) < 2 {
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ limitStart, err = strconv.Atoi(args[0])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ limitEnd, err = strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ args = args[2:]
+ continue
+ }
+ if strings.ToLower(args[0]) == "withscores" {
+ withScores = true
+ args = args[1:]
+ continue
+ }
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteLen(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.ssetElements(key)
+ if reverse {
+ min, max = max, min
+ minIncl, maxIncl = maxIncl, minIncl
+ }
+ members = withSSRange(members, min, minIncl, max, maxIncl)
+ if reverse {
+ reverseElems(members)
+ }
+
+ // Apply LIMIT ranges. That's . Unlike RANGE.
+ if withLimit {
+ if limitStart < 0 {
+ members = ssElems{}
+ } else {
+ if limitStart < len(members) {
+ members = members[limitStart:]
+ } else {
+ // out of range
+ members = ssElems{}
+ }
+ if limitEnd >= 0 {
+ if len(members) > limitEnd {
+ members = members[:limitEnd]
+ }
+ }
+ }
+ }
+
+ if withScores {
+ c.WriteLen(len(members) * 2)
+ } else {
+ c.WriteLen(len(members))
+ }
+ for _, el := range members {
+ c.WriteBulk(el.member)
+ if withScores {
+ c.WriteBulk(formatFloat(el.score))
+ }
+ }
+ })
+ }
+}
+
+// ZRANK and ZREVRANK
+func (m *Miniredis) makeCmdZrank(reverse bool) server.Cmd {
+ return func(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, member := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteNull()
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ direction := asc
+ if reverse {
+ direction = desc
+ }
+ rank, ok := db.ssetRank(key, member, direction)
+ if !ok {
+ c.WriteNull()
+ return
+ }
+ c.WriteInt(rank)
+ })
+ }
+}
+
+// ZREM
+func (m *Miniredis) cmdZrem(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, members := args[0], args[1:]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ deleted := 0
+ for _, member := range members {
+ if db.ssetRem(key, member) {
+ deleted++
+ }
+ }
+ c.WriteInt(deleted)
+ })
+}
+
+// ZREMRANGEBYLEX
+func (m *Miniredis) cmdZremrangebylex(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ min, minIncl, err := parseLexrange(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(err.Error())
+ return
+ }
+ max, maxIncl, err := parseLexrange(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(err.Error())
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.ssetMembers(key)
+ // Just key sort. If scores are not the same we don't care.
+ sort.Strings(members)
+ members = withLexRange(members, min, minIncl, max, maxIncl)
+
+ for _, el := range members {
+ db.ssetRem(key, el)
+ }
+ c.WriteInt(len(members))
+ })
+}
+
+// ZREMRANGEBYRANK
+func (m *Miniredis) cmdZremrangebyrank(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ start, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ end, err := strconv.Atoi(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.ssetMembers(key)
+ rs, re := redisRange(len(members), start, end, false)
+ for _, el := range members[rs:re] {
+ db.ssetRem(key, el)
+ }
+ c.WriteInt(re - rs)
+ })
+}
+
+// ZREMRANGEBYSCORE
+func (m *Miniredis) cmdZremrangebyscore(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ min, minIncl, err := parseFloatRange(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidMinMax)
+ return
+ }
+ max, maxIncl, err := parseFloatRange(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidMinMax)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.ssetElements(key)
+ members = withSSRange(members, min, minIncl, max, maxIncl)
+
+ for _, el := range members {
+ db.ssetRem(key, el.member)
+ }
+ c.WriteInt(len(members))
+ })
+}
+
+// ZSCORE
+func (m *Miniredis) cmdZscore(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, member := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteNull()
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ if !db.ssetExists(key, member) {
+ c.WriteNull()
+ return
+ }
+
+ c.WriteBulk(formatFloat(db.ssetScore(key, member)))
+ })
+}
+
+// parseFloatRange handles ZRANGEBYSCORE floats. They are inclusive unless the
+// string starts with '('
+func parseFloatRange(s string) (float64, bool, error) {
+ if len(s) == 0 {
+ return 0, false, nil
+ }
+ inclusive := true
+ if s[0] == '(' {
+ s = s[1:]
+ inclusive = false
+ }
+ f, err := strconv.ParseFloat(s, 64)
+ return f, inclusive, err
+}
+
+// parseLexrange handles ZRANGEBYLEX ranges. They start with '[', '(', or are
+// '+' or '-'.
+// Returns range, inclusive, error.
+// On '+' or '-' that's just returned.
+func parseLexrange(s string) (string, bool, error) {
+ if len(s) == 0 {
+ return "", false, errInvalidRangeItem
+ }
+ if s == "+" || s == "-" {
+ return s, false, nil
+ }
+ switch s[0] {
+ case '(':
+ return s[1:], false, nil
+ case '[':
+ return s[1:], true, nil
+ default:
+ return "", false, errInvalidRangeItem
+ }
+}
+
+// withSSRange limits a list of sorted set elements by the ZRANGEBYSCORE range
+// logic.
+func withSSRange(members ssElems, min float64, minIncl bool, max float64, maxIncl bool) ssElems {
+ gt := func(a, b float64) bool { return a > b }
+ gteq := func(a, b float64) bool { return a >= b }
+
+ mincmp := gt
+ if minIncl {
+ mincmp = gteq
+ }
+ for i, m := range members {
+ if mincmp(m.score, min) {
+ members = members[i:]
+ goto checkmax
+ }
+ }
+ // all elements were smaller
+ return nil
+
+checkmax:
+ maxcmp := gteq
+ if maxIncl {
+ maxcmp = gt
+ }
+ for i, m := range members {
+ if maxcmp(m.score, max) {
+ members = members[:i]
+ break
+ }
+ }
+
+ return members
+}
+
+// withLexRange limits a list of sorted set elements.
+func withLexRange(members []string, min string, minIncl bool, max string, maxIncl bool) []string {
+ if max == "-" || min == "+" {
+ return nil
+ }
+ if min != "-" {
+ if minIncl {
+ for i, m := range members {
+ if m >= min {
+ members = members[i:]
+ break
+ }
+ }
+ } else {
+ // Excluding min
+ for i, m := range members {
+ if m > min {
+ members = members[i:]
+ break
+ }
+ }
+ }
+ }
+ if max != "+" {
+ if maxIncl {
+ for i, m := range members {
+ if m > max {
+ members = members[:i]
+ break
+ }
+ }
+ } else {
+ // Excluding max
+ for i, m := range members {
+ if m >= max {
+ members = members[:i]
+ break
+ }
+ }
+ }
+ }
+ return members
+}
+
+// ZUNIONSTORE
+func (m *Miniredis) cmdZunionstore(c *server.Peer, cmd string, args []string) {
+ if len(args) < 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ destination := args[0]
+ numKeys, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ args = args[2:]
+ if len(args) < numKeys {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ if numKeys <= 0 {
+ setDirty(c)
+ c.WriteError("ERR at least 1 input key is needed for ZUNIONSTORE/ZINTERSTORE")
+ return
+ }
+ keys := args[:numKeys]
+ args = args[numKeys:]
+
+ withWeights := false
+ weights := []float64{}
+ aggregate := "sum"
+ for len(args) > 0 {
+ switch strings.ToLower(args[0]) {
+ case "weights":
+ if len(args) < numKeys+1 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ for i := 0; i < numKeys; i++ {
+ f, err := strconv.ParseFloat(args[i+1], 64)
+ if err != nil {
+ setDirty(c)
+ c.WriteError("ERR weight value is not a float")
+ return
+ }
+ weights = append(weights, f)
+ }
+ withWeights = true
+ args = args[numKeys+1:]
+ case "aggregate":
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ aggregate = strings.ToLower(args[1])
+ switch aggregate {
+ default:
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ case "sum", "min", "max":
+ }
+ args = args[2:]
+ default:
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+ deleteDest := true
+ for _, key := range keys {
+ if destination == key {
+ deleteDest = false
+ }
+ }
+ if deleteDest {
+ db.del(destination, true)
+ }
+
+ sset := sortedSet{}
+ for i, key := range keys {
+ if !db.exists(key) {
+ continue
+ }
+ if db.t(key) != "zset" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ for _, el := range db.ssetElements(key) {
+ score := el.score
+ if withWeights {
+ score *= weights[i]
+ }
+ old, ok := sset[el.member]
+ if !ok {
+ sset[el.member] = score
+ continue
+ }
+ switch aggregate {
+ default:
+ panic("Invalid aggregate")
+ case "sum":
+ sset[el.member] += score
+ case "min":
+ if score < old {
+ sset[el.member] = score
+ }
+ case "max":
+ if score > old {
+ sset[el.member] = score
+ }
+ }
+ }
+ }
+ db.ssetSet(destination, sset)
+ c.WriteInt(sset.card())
+ })
+}
+
+// ZSCAN
+func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ cursor, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidCursor)
+ return
+ }
+ args = args[2:]
+ // MATCH and COUNT options
+ var withMatch bool
+ var match string
+ for len(args) > 0 {
+ if strings.ToLower(args[0]) == "count" {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ _, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ // We do nothing with count.
+ args = args[2:]
+ continue
+ }
+ if strings.ToLower(args[0]) == "match" {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ withMatch = true
+ match = args[1]
+ args = args[2:]
+ continue
+ }
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+ // We return _all_ (matched) keys every time.
+
+ if cursor != 0 {
+ // Invalid cursor.
+ c.WriteLen(2)
+ c.WriteBulk("0") // no next cursor
+ c.WriteLen(0) // no elements
+ return
+ }
+ if db.exists(key) && db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.ssetMembers(key)
+ if withMatch {
+ members = matchKeys(members, match)
+ }
+
+ c.WriteLen(2)
+ c.WriteBulk("0") // no next cursor
+ // HSCAN gives key, values.
+ c.WriteLen(len(members) * 2)
+ for _, k := range members {
+ c.WriteBulk(k)
+ c.WriteBulk(formatFloat(db.ssetScore(key, k)))
+ }
+ })
+}
+
+// ZPOPMAX and ZPOPMIN
+func (m *Miniredis) cmdZpopmax(reverse bool) server.Cmd {
+ return func(c *server.Peer, cmd string, args []string) {
+ if len(args) < 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ count := 1
+ var err error
+ if len(args) > 1 {
+ count, err = strconv.Atoi(args[1])
+
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ }
+
+ withScores := true
+ if len(args) > 2 {
+ c.WriteError(msgSyntaxError)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteLen(0)
+ return
+ }
+
+ if db.t(key) != "zset" {
+ c.WriteError(ErrWrongType.Error())
+ return
+ }
+
+ members := db.ssetMembers(key)
+ if reverse {
+ reverseSlice(members)
+ }
+ rs, re := redisRange(len(members), 0, count-1, false)
+ if withScores {
+ c.WriteLen((re - rs) * 2)
+ } else {
+ c.WriteLen(re - rs)
+ }
+ for _, el := range members[rs:re] {
+ c.WriteBulk(el)
+ if withScores {
+ c.WriteBulk(formatFloat(db.ssetScore(key, el)))
+ }
+ db.ssetRem(key, el)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/alicebob/miniredis/cmd_string.go b/vendor/github.com/alicebob/miniredis/cmd_string.go
new file mode 100644
index 000000000..930da9921
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/cmd_string.go
@@ -0,0 +1,1075 @@
+// Commands from https://redis.io/commands#string
+
+package miniredis
+
+import (
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+// commandsString handles all string value operations.
+func commandsString(m *Miniredis) {
+ m.srv.Register("APPEND", m.cmdAppend)
+ m.srv.Register("BITCOUNT", m.cmdBitcount)
+ m.srv.Register("BITOP", m.cmdBitop)
+ m.srv.Register("BITPOS", m.cmdBitpos)
+ m.srv.Register("DECRBY", m.cmdDecrby)
+ m.srv.Register("DECR", m.cmdDecr)
+ m.srv.Register("GETBIT", m.cmdGetbit)
+ m.srv.Register("GET", m.cmdGet)
+ m.srv.Register("GETRANGE", m.cmdGetrange)
+ m.srv.Register("GETSET", m.cmdGetset)
+ m.srv.Register("INCRBYFLOAT", m.cmdIncrbyfloat)
+ m.srv.Register("INCRBY", m.cmdIncrby)
+ m.srv.Register("INCR", m.cmdIncr)
+ m.srv.Register("MGET", m.cmdMget)
+ m.srv.Register("MSET", m.cmdMset)
+ m.srv.Register("MSETNX", m.cmdMsetnx)
+ m.srv.Register("PSETEX", m.cmdPsetex)
+ m.srv.Register("SETBIT", m.cmdSetbit)
+ m.srv.Register("SETEX", m.cmdSetex)
+ m.srv.Register("SET", m.cmdSet)
+ m.srv.Register("SETNX", m.cmdSetnx)
+ m.srv.Register("SETRANGE", m.cmdSetrange)
+ m.srv.Register("STRLEN", m.cmdStrlen)
+}
+
+// SET
+func (m *Miniredis) cmdSet(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ var (
+ nx = false // set iff not exists
+ xx = false // set iff exists
+ ttl time.Duration
+ )
+
+ key, value, args := args[0], args[1], args[2:]
+ for len(args) > 0 {
+ timeUnit := time.Second
+ switch strings.ToUpper(args[0]) {
+ case "NX":
+ nx = true
+ args = args[1:]
+ continue
+ case "XX":
+ xx = true
+ args = args[1:]
+ continue
+ case "PX":
+ timeUnit = time.Millisecond
+ fallthrough
+ case "EX":
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ expire, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ ttl = time.Duration(expire) * timeUnit
+ if ttl <= 0 {
+ setDirty(c)
+ c.WriteError(msgInvalidSETime)
+ return
+ }
+
+ args = args[2:]
+ continue
+ default:
+ setDirty(c)
+ c.WriteError(msgSyntaxError)
+ return
+ }
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if nx {
+ if db.exists(key) {
+ c.WriteNull()
+ return
+ }
+ }
+ if xx {
+ if !db.exists(key) {
+ c.WriteNull()
+ return
+ }
+ }
+
+ db.del(key, true) // be sure to remove existing values of other type keys.
+ // a vanilla SET clears the expire
+ db.stringSet(key, value)
+ if ttl != 0 {
+ db.ttl[key] = ttl
+ }
+ c.WriteOK()
+ })
+}
+
+// SETEX
+func (m *Miniredis) cmdSetex(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ ttl, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ if ttl <= 0 {
+ setDirty(c)
+ c.WriteError(msgInvalidSETEXTime)
+ return
+ }
+ value := args[2]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ db.del(key, true) // Clear any existing keys.
+ db.stringSet(key, value)
+ db.ttl[key] = time.Duration(ttl) * time.Second
+ c.WriteOK()
+ })
+}
+
+// PSETEX
+func (m *Miniredis) cmdPsetex(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ ttl, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ if ttl <= 0 {
+ setDirty(c)
+ c.WriteError(msgInvalidPSETEXTime)
+ return
+ }
+ value := args[2]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ db.del(key, true) // Clear any existing keys.
+ db.stringSet(key, value)
+ db.ttl[key] = time.Duration(ttl) * time.Millisecond
+ c.WriteOK()
+ })
+}
+
+// SETNX
+func (m *Miniredis) cmdSetnx(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, value := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if _, ok := db.keys[key]; ok {
+ c.WriteInt(0)
+ return
+ }
+
+ db.stringSet(key, value)
+ c.WriteInt(1)
+ })
+}
+
+// MSET
+func (m *Miniredis) cmdMset(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ if len(args)%2 != 0 {
+ setDirty(c)
+ // non-default error message
+ c.WriteError("ERR wrong number of arguments for MSET")
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ for len(args) > 0 {
+ key, value := args[0], args[1]
+ args = args[2:]
+
+ db.del(key, true) // clear TTL
+ db.stringSet(key, value)
+ }
+ c.WriteOK()
+ })
+}
+
+// MSETNX
+func (m *Miniredis) cmdMsetnx(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ if len(args)%2 != 0 {
+ setDirty(c)
+ // non-default error message (yes, with 'MSET').
+ c.WriteError("ERR wrong number of arguments for MSET")
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ keys := map[string]string{}
+ existing := false
+ for len(args) > 0 {
+ key := args[0]
+ value := args[1]
+ args = args[2:]
+ keys[key] = value
+ if _, ok := db.keys[key]; ok {
+ existing = true
+ }
+ }
+
+ res := 0
+ if !existing {
+ res = 1
+ for k, v := range keys {
+ // Nothing to delete. That's the whole point.
+ db.stringSet(k, v)
+ }
+ }
+ c.WriteInt(res)
+ })
+}
+
+// GET
+func (m *Miniredis) cmdGet(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteNull()
+ return
+ }
+ if db.t(key) != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ c.WriteBulk(db.stringGet(key))
+ })
+}
+
+// GETSET
+func (m *Miniredis) cmdGetset(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, value := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ old, ok := db.stringKeys[key]
+ db.stringSet(key, value)
+ // a GETSET clears the ttl
+ delete(db.ttl, key)
+
+ if !ok {
+ c.WriteNull()
+ return
+ }
+ c.WriteBulk(old)
+ })
+}
+
+// MGET
+func (m *Miniredis) cmdMget(c *server.Peer, cmd string, args []string) {
+ if len(args) < 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ c.WriteLen(len(args))
+ for _, k := range args {
+ if t, ok := db.keys[k]; !ok || t != "string" {
+ c.WriteNull()
+ continue
+ }
+ v, ok := db.stringKeys[k]
+ if !ok {
+ // Should not happen, we just checked keys[]
+ c.WriteNull()
+ continue
+ }
+ c.WriteBulk(v)
+ }
+ })
+}
+
+// INCR
+func (m *Miniredis) cmdIncr(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ key := args[0]
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ v, err := db.stringIncr(key, +1)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+ // Don't touch TTL
+ c.WriteInt(v)
+ })
+}
+
+// INCRBY
+func (m *Miniredis) cmdIncrby(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ delta, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ v, err := db.stringIncr(key, delta)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+ // Don't touch TTL
+ c.WriteInt(v)
+ })
+}
+
+// INCRBYFLOAT
+func (m *Miniredis) cmdIncrbyfloat(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ delta, err := strconv.ParseFloat(args[1], 64)
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidFloat)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ v, err := db.stringIncrfloat(key, delta)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+ // Don't touch TTL
+ c.WriteBulk(formatFloat(v))
+ })
+}
+
+// DECR
+func (m *Miniredis) cmdDecr(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ key := args[0]
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ v, err := db.stringIncr(key, -1)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+ // Don't touch TTL
+ c.WriteInt(v)
+ })
+}
+
+// DECRBY
+func (m *Miniredis) cmdDecrby(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ delta, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ v, err := db.stringIncr(key, -delta)
+ if err != nil {
+ c.WriteError(err.Error())
+ return
+ }
+ // Don't touch TTL
+ c.WriteInt(v)
+ })
+}
+
+// STRLEN
+func (m *Miniredis) cmdStrlen(c *server.Peer, cmd string, args []string) {
+ if len(args) != 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ c.WriteInt(len(db.stringKeys[key]))
+ })
+}
+
+// APPEND
+func (m *Miniredis) cmdAppend(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key, value := args[0], args[1]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ newValue := db.stringKeys[key] + value
+ db.stringSet(key, newValue)
+
+ c.WriteInt(len(newValue))
+ })
+}
+
+// GETRANGE
+func (m *Miniredis) cmdGetrange(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ start, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ end, err := strconv.Atoi(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ v := db.stringKeys[key]
+ c.WriteBulk(withRange(v, start, end))
+ })
+}
+
+// SETRANGE
+func (m *Miniredis) cmdSetrange(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ pos, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ if pos < 0 {
+ setDirty(c)
+ c.WriteError("ERR offset is out of range")
+ return
+ }
+ subst := args[2]
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ v := []byte(db.stringKeys[key])
+ if len(v) < pos+len(subst) {
+ newV := make([]byte, pos+len(subst))
+ copy(newV, v)
+ v = newV
+ }
+ copy(v[pos:pos+len(subst)], subst)
+ db.stringSet(key, string(v))
+ c.WriteInt(len(v))
+ })
+}
+
+// BITCOUNT
+func (m *Miniredis) cmdBitcount(c *server.Peer, cmd string, args []string) {
+ if len(args) < 1 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ var (
+ useRange = false
+ start, end = 0, 0
+ key = args[0]
+ )
+ args = args[1:]
+ if len(args) >= 2 {
+ useRange = true
+ var err error
+ start, err = strconv.Atoi(args[0])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ end, err = strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ args = args[2:]
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if !db.exists(key) {
+ c.WriteInt(0)
+ return
+ }
+ if db.t(key) != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+
+ // Real redis only checks after it knows the key is there and a string.
+ if len(args) != 0 {
+ c.WriteError(msgSyntaxError)
+ return
+ }
+
+ v := db.stringKeys[key]
+ if useRange {
+ v = withRange(v, start, end)
+ }
+
+ c.WriteInt(countBits([]byte(v)))
+ })
+}
+
+// BITOP
+func (m *Miniredis) cmdBitop(c *server.Peer, cmd string, args []string) {
+ if len(args) < 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ var (
+ op = strings.ToUpper(args[0])
+ target = args[1]
+ input = args[2:]
+ )
+
+ // 'op' is tested when the transaction is executed.
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ switch op {
+ case "AND", "OR", "XOR":
+ first := input[0]
+ if t, ok := db.keys[first]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ res := []byte(db.stringKeys[first])
+ for _, vk := range input[1:] {
+ if t, ok := db.keys[vk]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ v := db.stringKeys[vk]
+ cb := map[string]func(byte, byte) byte{
+ "AND": func(a, b byte) byte { return a & b },
+ "OR": func(a, b byte) byte { return a | b },
+ "XOR": func(a, b byte) byte { return a ^ b },
+ }[op]
+ res = sliceBinOp(cb, res, []byte(v))
+ }
+ db.del(target, false) // Keep TTL
+ if len(res) == 0 {
+ db.del(target, true)
+ } else {
+ db.stringSet(target, string(res))
+ }
+ c.WriteInt(len(res))
+ case "NOT":
+ // NOT only takes a single argument.
+ if len(input) != 1 {
+ c.WriteError("ERR BITOP NOT must be called with a single source key.")
+ return
+ }
+ key := input[0]
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ value := []byte(db.stringKeys[key])
+ for i := range value {
+ value[i] = ^value[i]
+ }
+ db.del(target, false) // Keep TTL
+ if len(value) == 0 {
+ db.del(target, true)
+ } else {
+ db.stringSet(target, string(value))
+ }
+ c.WriteInt(len(value))
+ default:
+ c.WriteError(msgSyntaxError)
+ }
+ })
+}
+
+// BITPOS
+func (m *Miniredis) cmdBitpos(c *server.Peer, cmd string, args []string) {
+ if len(args) < 2 || len(args) > 4 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ bit, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ var start, end int
+ withEnd := false
+ if len(args) > 2 {
+ start, err = strconv.Atoi(args[2])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ }
+ if len(args) > 3 {
+ end, err = strconv.Atoi(args[3])
+ if err != nil {
+ setDirty(c)
+ c.WriteError(msgInvalidInt)
+ return
+ }
+ withEnd = true
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ value := db.stringKeys[key]
+ if start != 0 {
+ if start > len(value) {
+ start = len(value)
+ }
+ }
+ if withEnd {
+ end++ // redis end semantics.
+ if end < 0 {
+ end = len(value) + end
+ }
+ if end > len(value) {
+ end = len(value)
+ }
+ } else {
+ end = len(value)
+ }
+ if start != 0 || withEnd {
+ if end < start {
+ value = ""
+ } else {
+ value = value[start:end]
+ }
+ }
+ pos := bitPos([]byte(value), bit == 1)
+ if pos >= 0 {
+ pos += start * 8
+ }
+ // Special case when looking for 0, but not when start and end are
+ // given.
+ if bit == 0 && pos == -1 && !withEnd {
+ pos = start*8 + len(value)*8
+ }
+ c.WriteInt(pos)
+ })
+}
+
+// GETBIT
+func (m *Miniredis) cmdGetbit(c *server.Peer, cmd string, args []string) {
+ if len(args) != 2 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ bit, err := strconv.Atoi(args[1])
+ if err != nil {
+ setDirty(c)
+ c.WriteError("ERR bit offset is not an integer or out of range")
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ value := db.stringKeys[key]
+
+ ourByteNr := bit / 8
+ var ourByte byte
+ if ourByteNr > len(value)-1 {
+ ourByte = '\x00'
+ } else {
+ ourByte = value[ourByteNr]
+ }
+ res := 0
+ if toBits(ourByte)[bit%8] {
+ res = 1
+ }
+ c.WriteInt(res)
+ })
+}
+
+// SETBIT
+func (m *Miniredis) cmdSetbit(c *server.Peer, cmd string, args []string) {
+ if len(args) != 3 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ key := args[0]
+ bit, err := strconv.Atoi(args[1])
+ if err != nil || bit < 0 {
+ setDirty(c)
+ c.WriteError("ERR bit offset is not an integer or out of range")
+ return
+ }
+ newBit, err := strconv.Atoi(args[2])
+ if err != nil || (newBit != 0 && newBit != 1) {
+ setDirty(c)
+ c.WriteError("ERR bit is not an integer or out of range")
+ return
+ }
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ db := m.db(ctx.selectedDB)
+
+ if t, ok := db.keys[key]; ok && t != "string" {
+ c.WriteError(msgWrongType)
+ return
+ }
+ value := []byte(db.stringKeys[key])
+
+ ourByteNr := bit / 8
+ ourBitNr := bit % 8
+ if ourByteNr > len(value)-1 {
+ // Too short. Expand.
+ newValue := make([]byte, ourByteNr+1)
+ copy(newValue, value)
+ value = newValue
+ }
+ old := 0
+ if toBits(value[ourByteNr])[ourBitNr] {
+ old = 1
+ }
+ if newBit == 0 {
+ value[ourByteNr] &^= 1 << uint8(7-ourBitNr)
+ } else {
+ value[ourByteNr] |= 1 << uint8(7-ourBitNr)
+ }
+ db.stringSet(key, string(value))
+
+ c.WriteInt(old)
+ })
+}
+
+// Redis range. both start and end can be negative.
+func withRange(v string, start, end int) string {
+ s, e := redisRange(len(v), start, end, true /* string getrange symantics */)
+ return v[s:e]
+}
+
+func countBits(v []byte) int {
+ count := 0
+ for _, b := range []byte(v) {
+ for b > 0 {
+ count += int((b % uint8(2)))
+ b = b >> 1
+ }
+ }
+ return count
+}
+
+// sliceBinOp applies an operator to all slice elements, with Redis string
+// padding logic.
+func sliceBinOp(f func(a, b byte) byte, a, b []byte) []byte {
+ maxl := len(a)
+ if len(b) > maxl {
+ maxl = len(b)
+ }
+ lA := make([]byte, maxl)
+ copy(lA, a)
+ lB := make([]byte, maxl)
+ copy(lB, b)
+ res := make([]byte, maxl)
+ for i := range res {
+ res[i] = f(lA[i], lB[i])
+ }
+ return res
+}
+
+// Return the number of the first bit set/unset.
+func bitPos(s []byte, bit bool) int {
+ for i, b := range s {
+ for j, set := range toBits(b) {
+ if set == bit {
+ return i*8 + j
+ }
+ }
+ }
+ return -1
+}
+
+// toBits changes a byte in 8 bools.
+func toBits(s byte) [8]bool {
+ r := [8]bool{}
+ for i := range r {
+ if s&(uint8(1)< version {
+ // Abort! Abort!
+ stopTx(ctx)
+ c.WriteLen(0)
+ return
+ }
+ }
+
+ c.WriteLen(len(ctx.transaction))
+ for _, cb := range ctx.transaction {
+ cb(c, ctx)
+ }
+ // wake up anyone who waits on anything.
+ m.signal.Broadcast()
+
+ stopTx(ctx)
+}
+
+// DISCARD
+func (m *Miniredis) cmdDiscard(c *server.Peer, cmd string, args []string) {
+ if len(args) != 0 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ ctx := getCtx(c)
+ if !inTx(ctx) {
+ c.WriteError("ERR DISCARD without MULTI")
+ return
+ }
+
+ stopTx(ctx)
+ c.WriteOK()
+}
+
+// WATCH
+func (m *Miniredis) cmdWatch(c *server.Peer, cmd string, args []string) {
+ if len(args) == 0 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ ctx := getCtx(c)
+ if inTx(ctx) {
+ c.WriteError("ERR WATCH in MULTI")
+ return
+ }
+
+ m.Lock()
+ defer m.Unlock()
+ db := m.db(ctx.selectedDB)
+
+ for _, key := range args {
+ watch(db, ctx, key)
+ }
+ c.WriteOK()
+}
+
+// UNWATCH
+func (m *Miniredis) cmdUnwatch(c *server.Peer, cmd string, args []string) {
+ if len(args) != 0 {
+ setDirty(c)
+ c.WriteError(errWrongNumber(cmd))
+ return
+ }
+ if !m.handleAuth(c) {
+ return
+ }
+
+ // Doesn't matter if UNWATCH is in a TX or not. Looks like a Redis bug to me.
+ unwatch(getCtx(c))
+
+ withTx(m, c, func(c *server.Peer, ctx *connCtx) {
+ // Do nothing if it's called in a transaction.
+ c.WriteOK()
+ })
+}
diff --git a/vendor/github.com/alicebob/miniredis/db.go b/vendor/github.com/alicebob/miniredis/db.go
new file mode 100644
index 000000000..5600afe85
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/db.go
@@ -0,0 +1,551 @@
+package miniredis
+
+import (
+ "sort"
+ "strconv"
+ "time"
+)
+
+func (db *RedisDB) exists(k string) bool {
+ _, ok := db.keys[k]
+ return ok
+}
+
+// t gives the type of a key, or ""
+func (db *RedisDB) t(k string) string {
+ return db.keys[k]
+}
+
+// allKeys returns all keys. Sorted.
+func (db *RedisDB) allKeys() []string {
+ res := make([]string, 0, len(db.keys))
+ for k := range db.keys {
+ res = append(res, k)
+ }
+ sort.Strings(res) // To make things deterministic.
+ return res
+}
+
+// flush removes all keys and values.
+func (db *RedisDB) flush() {
+ db.keys = map[string]string{}
+ db.stringKeys = map[string]string{}
+ db.hashKeys = map[string]hashKey{}
+ db.listKeys = map[string]listKey{}
+ db.setKeys = map[string]setKey{}
+ db.sortedsetKeys = map[string]sortedSet{}
+ db.ttl = map[string]time.Duration{}
+}
+
+// move something to another db. Will return ok. Or not.
+func (db *RedisDB) move(key string, to *RedisDB) bool {
+ if _, ok := to.keys[key]; ok {
+ return false
+ }
+
+ t, ok := db.keys[key]
+ if !ok {
+ return false
+ }
+ to.keys[key] = db.keys[key]
+ switch t {
+ case "string":
+ to.stringKeys[key] = db.stringKeys[key]
+ case "hash":
+ to.hashKeys[key] = db.hashKeys[key]
+ case "list":
+ to.listKeys[key] = db.listKeys[key]
+ case "set":
+ to.setKeys[key] = db.setKeys[key]
+ case "zset":
+ to.sortedsetKeys[key] = db.sortedsetKeys[key]
+ default:
+ panic("unhandled key type")
+ }
+ to.keyVersion[key]++
+ if v, ok := db.ttl[key]; ok {
+ to.ttl[key] = v
+ }
+ db.del(key, true)
+ return true
+}
+
+func (db *RedisDB) rename(from, to string) {
+ db.del(to, true)
+ switch db.t(from) {
+ case "string":
+ db.stringKeys[to] = db.stringKeys[from]
+ case "hash":
+ db.hashKeys[to] = db.hashKeys[from]
+ case "list":
+ db.listKeys[to] = db.listKeys[from]
+ case "set":
+ db.setKeys[to] = db.setKeys[from]
+ case "zset":
+ db.sortedsetKeys[to] = db.sortedsetKeys[from]
+ default:
+ panic("missing case")
+ }
+ db.keys[to] = db.keys[from]
+ db.keyVersion[to]++
+ db.ttl[to] = db.ttl[from]
+
+ db.del(from, true)
+}
+
+func (db *RedisDB) del(k string, delTTL bool) {
+ if !db.exists(k) {
+ return
+ }
+ t := db.t(k)
+ delete(db.keys, k)
+ db.keyVersion[k]++
+ if delTTL {
+ delete(db.ttl, k)
+ }
+ switch t {
+ case "string":
+ delete(db.stringKeys, k)
+ case "hash":
+ delete(db.hashKeys, k)
+ case "list":
+ delete(db.listKeys, k)
+ case "set":
+ delete(db.setKeys, k)
+ case "zset":
+ delete(db.sortedsetKeys, k)
+ default:
+ panic("Unknown key type: " + t)
+ }
+}
+
+// stringGet returns the string key or "" on error/nonexists.
+func (db *RedisDB) stringGet(k string) string {
+ if t, ok := db.keys[k]; !ok || t != "string" {
+ return ""
+ }
+ return db.stringKeys[k]
+}
+
+// stringSet force set()s a key. Does not touch expire.
+func (db *RedisDB) stringSet(k, v string) {
+ db.del(k, false)
+ db.keys[k] = "string"
+ db.stringKeys[k] = v
+ db.keyVersion[k]++
+}
+
+// change int key value
+func (db *RedisDB) stringIncr(k string, delta int) (int, error) {
+ v := 0
+ if sv, ok := db.stringKeys[k]; ok {
+ var err error
+ v, err = strconv.Atoi(sv)
+ if err != nil {
+ return 0, ErrIntValueError
+ }
+ }
+ v += delta
+ db.stringSet(k, strconv.Itoa(v))
+ return v, nil
+}
+
+// change float key value
+func (db *RedisDB) stringIncrfloat(k string, delta float64) (float64, error) {
+ v := 0.0
+ if sv, ok := db.stringKeys[k]; ok {
+ var err error
+ v, err = strconv.ParseFloat(sv, 64)
+ if err != nil {
+ return 0, ErrFloatValueError
+ }
+ }
+ v += delta
+ db.stringSet(k, formatFloat(v))
+ return v, nil
+}
+
+// listLpush is 'left push', aka unshift. Returns the new length.
+func (db *RedisDB) listLpush(k, v string) int {
+ l, ok := db.listKeys[k]
+ if !ok {
+ db.keys[k] = "list"
+ }
+ l = append([]string{v}, l...)
+ db.listKeys[k] = l
+ db.keyVersion[k]++
+ return len(l)
+}
+
+// 'left pop', aka shift.
+func (db *RedisDB) listLpop(k string) string {
+ l := db.listKeys[k]
+ el := l[0]
+ l = l[1:]
+ if len(l) == 0 {
+ db.del(k, true)
+ } else {
+ db.listKeys[k] = l
+ }
+ db.keyVersion[k]++
+ return el
+}
+
+func (db *RedisDB) listPush(k string, v ...string) int {
+ l, ok := db.listKeys[k]
+ if !ok {
+ db.keys[k] = "list"
+ }
+ l = append(l, v...)
+ db.listKeys[k] = l
+ db.keyVersion[k]++
+ return len(l)
+}
+
+func (db *RedisDB) listPop(k string) string {
+ l := db.listKeys[k]
+ el := l[len(l)-1]
+ l = l[:len(l)-1]
+ if len(l) == 0 {
+ db.del(k, true)
+ } else {
+ db.listKeys[k] = l
+ db.keyVersion[k]++
+ }
+ return el
+}
+
+// setset replaces a whole set.
+func (db *RedisDB) setSet(k string, set setKey) {
+ db.keys[k] = "set"
+ db.setKeys[k] = set
+ db.keyVersion[k]++
+}
+
+// setadd adds members to a set. Returns nr of new keys.
+func (db *RedisDB) setAdd(k string, elems ...string) int {
+ s, ok := db.setKeys[k]
+ if !ok {
+ s = setKey{}
+ db.keys[k] = "set"
+ }
+ added := 0
+ for _, e := range elems {
+ if _, ok := s[e]; !ok {
+ added++
+ }
+ s[e] = struct{}{}
+ }
+ db.setKeys[k] = s
+ db.keyVersion[k]++
+ return added
+}
+
+// setrem removes members from a set. Returns nr of deleted keys.
+func (db *RedisDB) setRem(k string, fields ...string) int {
+ s, ok := db.setKeys[k]
+ if !ok {
+ return 0
+ }
+ removed := 0
+ for _, f := range fields {
+ if _, ok := s[f]; ok {
+ removed++
+ delete(s, f)
+ }
+ }
+ if len(s) == 0 {
+ db.del(k, true)
+ } else {
+ db.setKeys[k] = s
+ }
+ db.keyVersion[k]++
+ return removed
+}
+
+// All members of a set.
+func (db *RedisDB) setMembers(k string) []string {
+ set := db.setKeys[k]
+ members := make([]string, 0, len(set))
+ for k := range set {
+ members = append(members, k)
+ }
+ sort.Strings(members)
+ return members
+}
+
+// Is a SET value present?
+func (db *RedisDB) setIsMember(k, v string) bool {
+ set, ok := db.setKeys[k]
+ if !ok {
+ return false
+ }
+ _, ok = set[v]
+ return ok
+}
+
+// hashFields returns all (sorted) keys ('fields') for a hash key.
+func (db *RedisDB) hashFields(k string) []string {
+ v := db.hashKeys[k]
+ r := make([]string, 0, len(v))
+ for k := range v {
+ r = append(r, k)
+ }
+ sort.Strings(r)
+ return r
+}
+
+// hashGet a value
+func (db *RedisDB) hashGet(key, field string) string {
+ return db.hashKeys[key][field]
+}
+
+// hashSet returns whether the key already existed
+func (db *RedisDB) hashSet(k, f, v string) bool {
+ if t, ok := db.keys[k]; ok && t != "hash" {
+ db.del(k, true)
+ }
+ db.keys[k] = "hash"
+ if _, ok := db.hashKeys[k]; !ok {
+ db.hashKeys[k] = map[string]string{}
+ }
+ _, ok := db.hashKeys[k][f]
+ db.hashKeys[k][f] = v
+ db.keyVersion[k]++
+ return ok
+}
+
+// hashIncr changes int key value
+func (db *RedisDB) hashIncr(key, field string, delta int) (int, error) {
+ v := 0
+ if h, ok := db.hashKeys[key]; ok {
+ if f, ok := h[field]; ok {
+ var err error
+ v, err = strconv.Atoi(f)
+ if err != nil {
+ return 0, ErrIntValueError
+ }
+ }
+ }
+ v += delta
+ db.hashSet(key, field, strconv.Itoa(v))
+ return v, nil
+}
+
+// hashIncrfloat changes float key value
+func (db *RedisDB) hashIncrfloat(key, field string, delta float64) (float64, error) {
+ v := 0.0
+ if h, ok := db.hashKeys[key]; ok {
+ if f, ok := h[field]; ok {
+ var err error
+ v, err = strconv.ParseFloat(f, 64)
+ if err != nil {
+ return 0, ErrFloatValueError
+ }
+ }
+ }
+ v += delta
+ db.hashSet(key, field, formatFloat(v))
+ return v, nil
+}
+
+// sortedSet set returns a sortedSet as map
+func (db *RedisDB) sortedSet(key string) map[string]float64 {
+ ss := db.sortedsetKeys[key]
+ return map[string]float64(ss)
+}
+
+// ssetSet sets a complete sorted set.
+func (db *RedisDB) ssetSet(key string, sset sortedSet) {
+ db.keys[key] = "zset"
+ db.keyVersion[key]++
+ db.sortedsetKeys[key] = sset
+}
+
+// ssetAdd adds member to a sorted set. Returns whether this was a new member.
+func (db *RedisDB) ssetAdd(key string, score float64, member string) bool {
+ ss, ok := db.sortedsetKeys[key]
+ if !ok {
+ ss = newSortedSet()
+ db.keys[key] = "zset"
+ }
+ _, ok = ss[member]
+ ss[member] = score
+ db.sortedsetKeys[key] = ss
+ db.keyVersion[key]++
+ return !ok
+}
+
+// All members from a sorted set, ordered by score.
+func (db *RedisDB) ssetMembers(key string) []string {
+ ss, ok := db.sortedsetKeys[key]
+ if !ok {
+ return nil
+ }
+ elems := ss.byScore(asc)
+ members := make([]string, 0, len(elems))
+ for _, e := range elems {
+ members = append(members, e.member)
+ }
+ return members
+}
+
+// All members+scores from a sorted set, ordered by score.
+func (db *RedisDB) ssetElements(key string) ssElems {
+ ss, ok := db.sortedsetKeys[key]
+ if !ok {
+ return nil
+ }
+ return ss.byScore(asc)
+}
+
+// ssetCard is the sorted set cardinality.
+func (db *RedisDB) ssetCard(key string) int {
+ ss := db.sortedsetKeys[key]
+ return ss.card()
+}
+
+// ssetRank is the sorted set rank.
+func (db *RedisDB) ssetRank(key, member string, d direction) (int, bool) {
+ ss := db.sortedsetKeys[key]
+ return ss.rankByScore(member, d)
+}
+
+// ssetScore is sorted set score.
+func (db *RedisDB) ssetScore(key, member string) float64 {
+ ss := db.sortedsetKeys[key]
+ return ss[member]
+}
+
+// ssetRem is sorted set key delete.
+func (db *RedisDB) ssetRem(key, member string) bool {
+ ss := db.sortedsetKeys[key]
+ _, ok := ss[member]
+ delete(ss, member)
+ if len(ss) == 0 {
+ // Delete key on removal of last member
+ db.del(key, true)
+ }
+ return ok
+}
+
+// ssetExists tells if a member exists in a sorted set.
+func (db *RedisDB) ssetExists(key, member string) bool {
+ ss := db.sortedsetKeys[key]
+ _, ok := ss[member]
+ return ok
+}
+
+// ssetIncrby changes float sorted set score.
+func (db *RedisDB) ssetIncrby(k, m string, delta float64) float64 {
+ ss, ok := db.sortedsetKeys[k]
+ if !ok {
+ ss = newSortedSet()
+ db.keys[k] = "zset"
+ db.sortedsetKeys[k] = ss
+ }
+
+ v, _ := ss.get(m)
+ v += delta
+ ss.set(v, m)
+ db.keyVersion[k]++
+ return v
+}
+
+// setDiff implements the logic behind SDIFF*
+func (db *RedisDB) setDiff(keys []string) (setKey, error) {
+ key := keys[0]
+ keys = keys[1:]
+ if db.exists(key) && db.t(key) != "set" {
+ return nil, ErrWrongType
+ }
+ s := setKey{}
+ for k := range db.setKeys[key] {
+ s[k] = struct{}{}
+ }
+ for _, sk := range keys {
+ if !db.exists(sk) {
+ continue
+ }
+ if db.t(sk) != "set" {
+ return nil, ErrWrongType
+ }
+ for e := range db.setKeys[sk] {
+ delete(s, e)
+ }
+ }
+ return s, nil
+}
+
+// setInter implements the logic behind SINTER*
+func (db *RedisDB) setInter(keys []string) (setKey, error) {
+ key := keys[0]
+ keys = keys[1:]
+ if !db.exists(key) {
+ return setKey{}, nil
+ }
+ if db.t(key) != "set" {
+ return nil, ErrWrongType
+ }
+ s := setKey{}
+ for k := range db.setKeys[key] {
+ s[k] = struct{}{}
+ }
+ for _, sk := range keys {
+ if !db.exists(sk) {
+ return setKey{}, nil
+ }
+ if db.t(sk) != "set" {
+ return nil, ErrWrongType
+ }
+ other := db.setKeys[sk]
+ for e := range s {
+ if _, ok := other[e]; ok {
+ continue
+ }
+ delete(s, e)
+ }
+ }
+ return s, nil
+}
+
+// setUnion implements the logic behind SUNION*
+func (db *RedisDB) setUnion(keys []string) (setKey, error) {
+ key := keys[0]
+ keys = keys[1:]
+ if db.exists(key) && db.t(key) != "set" {
+ return nil, ErrWrongType
+ }
+ s := setKey{}
+ for k := range db.setKeys[key] {
+ s[k] = struct{}{}
+ }
+ for _, sk := range keys {
+ if !db.exists(sk) {
+ continue
+ }
+ if db.t(sk) != "set" {
+ return nil, ErrWrongType
+ }
+ for e := range db.setKeys[sk] {
+ s[e] = struct{}{}
+ }
+ }
+ return s, nil
+}
+
+// fastForward proceeds the current timestamp with duration, works as a time machine
+func (db *RedisDB) fastForward(duration time.Duration) {
+ for _, key := range db.allKeys() {
+ if value, ok := db.ttl[key]; ok {
+ db.ttl[key] = value - duration
+ db.checkTTL(key)
+ }
+ }
+}
+
+func (db *RedisDB) checkTTL(key string) {
+ if v, ok := db.ttl[key]; ok && v <= 0 {
+ db.del(key, true)
+ }
+}
diff --git a/vendor/github.com/alicebob/miniredis/direct.go b/vendor/github.com/alicebob/miniredis/direct.go
new file mode 100644
index 000000000..ca41449fe
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/direct.go
@@ -0,0 +1,549 @@
+package miniredis
+
+// Commands to modify and query our databases directly.
+
+import (
+ "errors"
+ "time"
+)
+
+var (
+ // ErrKeyNotFound is returned when a key doesn't exist.
+ ErrKeyNotFound = errors.New(msgKeyNotFound)
+ // ErrWrongType when a key is not the right type.
+ ErrWrongType = errors.New(msgWrongType)
+ // ErrIntValueError can returned by INCRBY
+ ErrIntValueError = errors.New(msgInvalidInt)
+ // ErrFloatValueError can returned by INCRBYFLOAT
+ ErrFloatValueError = errors.New(msgInvalidFloat)
+)
+
+// Select sets the DB id for all direct commands.
+func (m *Miniredis) Select(i int) {
+ m.Lock()
+ defer m.Unlock()
+ m.selectedDB = i
+}
+
+// Keys returns all keys from the selected database, sorted.
+func (m *Miniredis) Keys() []string {
+ return m.DB(m.selectedDB).Keys()
+}
+
+// Keys returns all keys, sorted.
+func (db *RedisDB) Keys() []string {
+ db.master.Lock()
+ defer db.master.Unlock()
+ return db.allKeys()
+}
+
+// FlushAll removes all keys from all databases.
+func (m *Miniredis) FlushAll() {
+ m.Lock()
+ defer m.Unlock()
+ m.flushAll()
+}
+
+func (m *Miniredis) flushAll() {
+ for _, db := range m.dbs {
+ db.flush()
+ }
+}
+
+// FlushDB removes all keys from the selected database.
+func (m *Miniredis) FlushDB() {
+ m.DB(m.selectedDB).FlushDB()
+}
+
+// FlushDB removes all keys.
+func (db *RedisDB) FlushDB() {
+ db.master.Lock()
+ defer db.master.Unlock()
+ db.flush()
+}
+
+// Get returns string keys added with SET.
+func (m *Miniredis) Get(k string) (string, error) {
+ return m.DB(m.selectedDB).Get(k)
+}
+
+// Get returns a string key.
+func (db *RedisDB) Get(k string) (string, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if !db.exists(k) {
+ return "", ErrKeyNotFound
+ }
+ if db.t(k) != "string" {
+ return "", ErrWrongType
+ }
+ return db.stringGet(k), nil
+}
+
+// Set sets a string key. Removes expire.
+func (m *Miniredis) Set(k, v string) error {
+ return m.DB(m.selectedDB).Set(k, v)
+}
+
+// Set sets a string key. Removes expire.
+// Unlike redis the key can't be an existing non-string key.
+func (db *RedisDB) Set(k, v string) error {
+ db.master.Lock()
+ defer db.master.Unlock()
+
+ if db.exists(k) && db.t(k) != "string" {
+ return ErrWrongType
+ }
+ db.del(k, true) // Remove expire
+ db.stringSet(k, v)
+ return nil
+}
+
+// Incr changes a int string value by delta.
+func (m *Miniredis) Incr(k string, delta int) (int, error) {
+ return m.DB(m.selectedDB).Incr(k, delta)
+}
+
+// Incr changes a int string value by delta.
+func (db *RedisDB) Incr(k string, delta int) (int, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+
+ if db.exists(k) && db.t(k) != "string" {
+ return 0, ErrWrongType
+ }
+
+ return db.stringIncr(k, delta)
+}
+
+// Incrfloat changes a float string value by delta.
+func (m *Miniredis) Incrfloat(k string, delta float64) (float64, error) {
+ return m.DB(m.selectedDB).Incrfloat(k, delta)
+}
+
+// Incrfloat changes a float string value by delta.
+func (db *RedisDB) Incrfloat(k string, delta float64) (float64, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+
+ if db.exists(k) && db.t(k) != "string" {
+ return 0, ErrWrongType
+ }
+
+ return db.stringIncrfloat(k, delta)
+}
+
+// List returns the list k, or an error if it's not there or something else.
+// This is the same as the Redis command `LRANGE 0 -1`, but you can do your own
+// range-ing.
+func (m *Miniredis) List(k string) ([]string, error) {
+ return m.DB(m.selectedDB).List(k)
+}
+
+// List returns the list k, or an error if it's not there or something else.
+// This is the same as the Redis command `LRANGE 0 -1`, but you can do your own
+// range-ing.
+func (db *RedisDB) List(k string) ([]string, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+
+ if !db.exists(k) {
+ return nil, ErrKeyNotFound
+ }
+ if db.t(k) != "list" {
+ return nil, ErrWrongType
+ }
+ return db.listKeys[k], nil
+}
+
+// Lpush is an unshift. Returns the new length.
+func (m *Miniredis) Lpush(k, v string) (int, error) {
+ return m.DB(m.selectedDB).Lpush(k, v)
+}
+
+// Lpush is an unshift. Returns the new length.
+func (db *RedisDB) Lpush(k, v string) (int, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+
+ if db.exists(k) && db.t(k) != "list" {
+ return 0, ErrWrongType
+ }
+ return db.listLpush(k, v), nil
+}
+
+// Lpop is a shift. Returns the popped element.
+func (m *Miniredis) Lpop(k string) (string, error) {
+ return m.DB(m.selectedDB).Lpop(k)
+}
+
+// Lpop is a shift. Returns the popped element.
+func (db *RedisDB) Lpop(k string) (string, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+
+ if !db.exists(k) {
+ return "", ErrKeyNotFound
+ }
+ if db.t(k) != "list" {
+ return "", ErrWrongType
+ }
+ return db.listLpop(k), nil
+}
+
+// Push add element at the end. Is called RPUSH in redis. Returns the new length.
+func (m *Miniredis) Push(k string, v ...string) (int, error) {
+ return m.DB(m.selectedDB).Push(k, v...)
+}
+
+// Push add element at the end. Is called RPUSH in redis. Returns the new length.
+func (db *RedisDB) Push(k string, v ...string) (int, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+
+ if db.exists(k) && db.t(k) != "list" {
+ return 0, ErrWrongType
+ }
+ return db.listPush(k, v...), nil
+}
+
+// Pop removes and returns the last element. Is called RPOP in Redis.
+func (m *Miniredis) Pop(k string) (string, error) {
+ return m.DB(m.selectedDB).Pop(k)
+}
+
+// Pop removes and returns the last element. Is called RPOP in Redis.
+func (db *RedisDB) Pop(k string) (string, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+
+ if !db.exists(k) {
+ return "", ErrKeyNotFound
+ }
+ if db.t(k) != "list" {
+ return "", ErrWrongType
+ }
+
+ return db.listPop(k), nil
+}
+
+// SetAdd adds keys to a set. Returns the number of new keys.
+func (m *Miniredis) SetAdd(k string, elems ...string) (int, error) {
+ return m.DB(m.selectedDB).SetAdd(k, elems...)
+}
+
+// SetAdd adds keys to a set. Returns the number of new keys.
+func (db *RedisDB) SetAdd(k string, elems ...string) (int, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if db.exists(k) && db.t(k) != "set" {
+ return 0, ErrWrongType
+ }
+ return db.setAdd(k, elems...), nil
+}
+
+// Members gives all set keys. Sorted.
+func (m *Miniredis) Members(k string) ([]string, error) {
+ return m.DB(m.selectedDB).Members(k)
+}
+
+// Members gives all set keys. Sorted.
+func (db *RedisDB) Members(k string) ([]string, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if !db.exists(k) {
+ return nil, ErrKeyNotFound
+ }
+ if db.t(k) != "set" {
+ return nil, ErrWrongType
+ }
+ return db.setMembers(k), nil
+}
+
+// IsMember tells if value is in the set.
+func (m *Miniredis) IsMember(k, v string) (bool, error) {
+ return m.DB(m.selectedDB).IsMember(k, v)
+}
+
+// IsMember tells if value is in the set.
+func (db *RedisDB) IsMember(k, v string) (bool, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if !db.exists(k) {
+ return false, ErrKeyNotFound
+ }
+ if db.t(k) != "set" {
+ return false, ErrWrongType
+ }
+ return db.setIsMember(k, v), nil
+}
+
+// HKeys returns all (sorted) keys ('fields') for a hash key.
+func (m *Miniredis) HKeys(k string) ([]string, error) {
+ return m.DB(m.selectedDB).HKeys(k)
+}
+
+// HKeys returns all (sorted) keys ('fields') for a hash key.
+func (db *RedisDB) HKeys(key string) ([]string, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if !db.exists(key) {
+ return nil, ErrKeyNotFound
+ }
+ if db.t(key) != "hash" {
+ return nil, ErrWrongType
+ }
+ return db.hashFields(key), nil
+}
+
+// Del deletes a key and any expiration value. Returns whether there was a key.
+func (m *Miniredis) Del(k string) bool {
+ return m.DB(m.selectedDB).Del(k)
+}
+
+// Del deletes a key and any expiration value. Returns whether there was a key.
+func (db *RedisDB) Del(k string) bool {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if !db.exists(k) {
+ return false
+ }
+ db.del(k, true)
+ return true
+}
+
+// TTL is the left over time to live. As set via EXPIRE, PEXPIRE, EXPIREAT,
+// PEXPIREAT.
+// 0 if not set.
+func (m *Miniredis) TTL(k string) time.Duration {
+ return m.DB(m.selectedDB).TTL(k)
+}
+
+// TTL is the left over time to live. As set via EXPIRE, PEXPIRE, EXPIREAT,
+// PEXPIREAT.
+// 0 if not set.
+func (db *RedisDB) TTL(k string) time.Duration {
+ db.master.Lock()
+ defer db.master.Unlock()
+ return db.ttl[k]
+}
+
+// SetTTL sets the TTL of a key.
+func (m *Miniredis) SetTTL(k string, ttl time.Duration) {
+ m.DB(m.selectedDB).SetTTL(k, ttl)
+}
+
+// SetTTL sets the time to live of a key.
+func (db *RedisDB) SetTTL(k string, ttl time.Duration) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ db.ttl[k] = ttl
+ db.keyVersion[k]++
+}
+
+// Type gives the type of a key, or ""
+func (m *Miniredis) Type(k string) string {
+ return m.DB(m.selectedDB).Type(k)
+}
+
+// Type gives the type of a key, or ""
+func (db *RedisDB) Type(k string) string {
+ db.master.Lock()
+ defer db.master.Unlock()
+ return db.t(k)
+}
+
+// Exists tells whether a key exists.
+func (m *Miniredis) Exists(k string) bool {
+ return m.DB(m.selectedDB).Exists(k)
+}
+
+// Exists tells whether a key exists.
+func (db *RedisDB) Exists(k string) bool {
+ db.master.Lock()
+ defer db.master.Unlock()
+ return db.exists(k)
+}
+
+// HGet returns hash keys added with HSET.
+// This will return an empty string if the key is not set. Redis would return
+// a nil.
+// Returns empty string when the key is of a different type.
+func (m *Miniredis) HGet(k, f string) string {
+ return m.DB(m.selectedDB).HGet(k, f)
+}
+
+// HGet returns hash keys added with HSET.
+// Returns empty string when the key is of a different type.
+func (db *RedisDB) HGet(k, f string) string {
+ db.master.Lock()
+ defer db.master.Unlock()
+ h, ok := db.hashKeys[k]
+ if !ok {
+ return ""
+ }
+ return h[f]
+}
+
+// HSet sets a hash key.
+// If there is another key by the same name it will be gone.
+func (m *Miniredis) HSet(k, f, v string) {
+ m.DB(m.selectedDB).HSet(k, f, v)
+}
+
+// HSet sets a hash key.
+// If there is another key by the same name it will be gone.
+func (db *RedisDB) HSet(k, f, v string) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ db.hashSet(k, f, v)
+}
+
+// HDel deletes a hash key.
+func (m *Miniredis) HDel(k, f string) {
+ m.DB(m.selectedDB).HDel(k, f)
+}
+
+// HDel deletes a hash key.
+func (db *RedisDB) HDel(k, f string) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ db.hdel(k, f)
+}
+
+func (db *RedisDB) hdel(k, f string) {
+ if _, ok := db.hashKeys[k]; !ok {
+ return
+ }
+ delete(db.hashKeys[k], f)
+ db.keyVersion[k]++
+}
+
+// HIncr increases a key/field by delta (int).
+func (m *Miniredis) HIncr(k, f string, delta int) (int, error) {
+ return m.DB(m.selectedDB).HIncr(k, f, delta)
+}
+
+// HIncr increases a key/field by delta (int).
+func (db *RedisDB) HIncr(k, f string, delta int) (int, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ return db.hashIncr(k, f, delta)
+}
+
+// HIncrfloat increases a key/field by delta (float).
+func (m *Miniredis) HIncrfloat(k, f string, delta float64) (float64, error) {
+ return m.DB(m.selectedDB).HIncrfloat(k, f, delta)
+}
+
+// HIncrfloat increases a key/field by delta (float).
+func (db *RedisDB) HIncrfloat(k, f string, delta float64) (float64, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ return db.hashIncrfloat(k, f, delta)
+}
+
+// SRem removes fields from a set. Returns number of deleted fields.
+func (m *Miniredis) SRem(k string, fields ...string) (int, error) {
+ return m.DB(m.selectedDB).SRem(k, fields...)
+}
+
+// SRem removes fields from a set. Returns number of deleted fields.
+func (db *RedisDB) SRem(k string, fields ...string) (int, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if !db.exists(k) {
+ return 0, ErrKeyNotFound
+ }
+ if db.t(k) != "set" {
+ return 0, ErrWrongType
+ }
+ return db.setRem(k, fields...), nil
+}
+
+// ZAdd adds a score,member to a sorted set.
+func (m *Miniredis) ZAdd(k string, score float64, member string) (bool, error) {
+ return m.DB(m.selectedDB).ZAdd(k, score, member)
+}
+
+// ZAdd adds a score,member to a sorted set.
+func (db *RedisDB) ZAdd(k string, score float64, member string) (bool, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if db.exists(k) && db.t(k) != "zset" {
+ return false, ErrWrongType
+ }
+ return db.ssetAdd(k, score, member), nil
+}
+
+// ZMembers returns all members by score
+func (m *Miniredis) ZMembers(k string) ([]string, error) {
+ return m.DB(m.selectedDB).ZMembers(k)
+}
+
+// ZMembers returns all members by score
+func (db *RedisDB) ZMembers(k string) ([]string, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if !db.exists(k) {
+ return nil, ErrKeyNotFound
+ }
+ if db.t(k) != "zset" {
+ return nil, ErrWrongType
+ }
+ return db.ssetMembers(k), nil
+}
+
+// SortedSet returns a raw string->float64 map.
+func (m *Miniredis) SortedSet(k string) (map[string]float64, error) {
+ return m.DB(m.selectedDB).SortedSet(k)
+}
+
+// SortedSet returns a raw string->float64 map.
+func (db *RedisDB) SortedSet(k string) (map[string]float64, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if !db.exists(k) {
+ return nil, ErrKeyNotFound
+ }
+ if db.t(k) != "zset" {
+ return nil, ErrWrongType
+ }
+ return db.sortedSet(k), nil
+}
+
+// ZRem deletes a member. Returns whether the was a key.
+func (m *Miniredis) ZRem(k, member string) (bool, error) {
+ return m.DB(m.selectedDB).ZRem(k, member)
+}
+
+// ZRem deletes a member. Returns whether the was a key.
+func (db *RedisDB) ZRem(k, member string) (bool, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if !db.exists(k) {
+ return false, ErrKeyNotFound
+ }
+ if db.t(k) != "zset" {
+ return false, ErrWrongType
+ }
+ return db.ssetRem(k, member), nil
+}
+
+// ZScore gives the score of a sorted set member.
+func (m *Miniredis) ZScore(k, member string) (float64, error) {
+ return m.DB(m.selectedDB).ZScore(k, member)
+}
+
+// ZScore gives the score of a sorted set member.
+func (db *RedisDB) ZScore(k, member string) (float64, error) {
+ db.master.Lock()
+ defer db.master.Unlock()
+ if !db.exists(k) {
+ return 0, ErrKeyNotFound
+ }
+ if db.t(k) != "zset" {
+ return 0, ErrWrongType
+ }
+ return db.ssetScore(k, member), nil
+}
diff --git a/vendor/github.com/alicebob/miniredis/keys.go b/vendor/github.com/alicebob/miniredis/keys.go
new file mode 100644
index 000000000..b7cd98fba
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/keys.go
@@ -0,0 +1,65 @@
+package miniredis
+
+// Translate the 'KEYS' argument ('foo*', 'f??', &c.) into a regexp.
+
+import (
+ "bytes"
+ "regexp"
+)
+
+// patternRE compiles a KEYS argument to a regexp. Returns nil if the given
+// pattern will never match anything.
+// The general strategy is to sandwich all non-meta characters between \Q...\E.
+func patternRE(k string) *regexp.Regexp {
+ re := bytes.Buffer{}
+ re.WriteString(`^\Q`)
+ for i := 0; i < len(k); i++ {
+ p := k[i]
+ switch p {
+ case '*':
+ re.WriteString(`\E.*\Q`)
+ case '?':
+ re.WriteString(`\E.\Q`)
+ case '[':
+ charClass := bytes.Buffer{}
+ i++
+ for ; i < len(k); i++ {
+ if k[i] == ']' {
+ break
+ }
+ if k[i] == '\\' {
+ if i == len(k)-1 {
+ // Ends with a '\'. U-huh.
+ return nil
+ }
+ charClass.WriteByte(k[i])
+ i++
+ charClass.WriteByte(k[i])
+ continue
+ }
+ charClass.WriteByte(k[i])
+ }
+ if charClass.Len() == 0 {
+ // '[]' is valid in Redis, but matches nothing.
+ return nil
+ }
+ re.WriteString(`\E[`)
+ re.Write(charClass.Bytes())
+ re.WriteString(`]\Q`)
+
+ case '\\':
+ if i == len(k)-1 {
+ // Ends with a '\'. U-huh.
+ return nil
+ }
+ // Forget the \, keep the next char.
+ i++
+ re.WriteByte(k[i])
+ continue
+ default:
+ re.WriteByte(p)
+ }
+ }
+ re.WriteString(`\E$`)
+ return regexp.MustCompile(re.String())
+}
diff --git a/vendor/github.com/alicebob/miniredis/lua.go b/vendor/github.com/alicebob/miniredis/lua.go
new file mode 100644
index 000000000..a338425bf
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/lua.go
@@ -0,0 +1,189 @@
+package miniredis
+
+import (
+ redigo "github.com/gomodule/redigo/redis"
+ "github.com/yuin/gopher-lua"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+func mkLuaFuncs(conn redigo.Conn) map[string]lua.LGFunction {
+ mkCall := func(failFast bool) func(l *lua.LState) int {
+ return func(l *lua.LState) int {
+ top := l.GetTop()
+ if top == 0 {
+ l.Error(lua.LString("Please specify at least one argument for redis.call()"), 1)
+ return 0
+ }
+ var args []interface{}
+ for i := 1; i <= top; i++ {
+ switch a := l.Get(i).(type) {
+ // case lua.LBool:
+ // args[i-2] = a
+ case lua.LNumber:
+ // value, _ := strconv.ParseFloat(lua.LVAsString(arg), 64)
+ args = append(args, float64(a))
+ case lua.LString:
+ args = append(args, string(a))
+ default:
+ l.Error(lua.LString("Lua redis() command arguments must be strings or integers"), 1)
+ return 0
+ }
+ }
+ cmd, ok := args[0].(string)
+ if !ok {
+ l.Error(lua.LString("Unknown Redis command called from Lua script"), 1)
+ return 0
+ }
+ res, err := conn.Do(cmd, args[1:]...)
+ if err != nil {
+ if failFast {
+ // call() mode
+ l.Error(lua.LString(err.Error()), 1)
+ return 0
+ }
+ // pcall() mode
+ l.Push(lua.LNil)
+ return 1
+ }
+
+ if res == nil {
+ l.Push(lua.LFalse)
+ } else {
+ switch r := res.(type) {
+ case int64:
+ l.Push(lua.LNumber(r))
+ case []uint8:
+ l.Push(lua.LString(string(r)))
+ case []interface{}:
+ l.Push(redisToLua(l, r))
+ case string:
+ l.Push(lua.LString(r))
+ default:
+ panic("type not handled")
+ }
+ }
+ return 1
+ }
+ }
+
+ return map[string]lua.LGFunction{
+ "call": mkCall(true),
+ "pcall": mkCall(false),
+ "error_reply": func(l *lua.LState) int {
+ msg := l.CheckString(1)
+ res := &lua.LTable{}
+ res.RawSetString("err", lua.LString(msg))
+ l.Push(res)
+ return 1
+ },
+ "status_reply": func(l *lua.LState) int {
+ msg := l.CheckString(1)
+ res := &lua.LTable{}
+ res.RawSetString("ok", lua.LString(msg))
+ l.Push(res)
+ return 1
+ },
+ "sha1hex": func(l *lua.LState) int {
+ top := l.GetTop()
+ if top != 1 {
+ l.Error(lua.LString("wrong number of arguments"), 1)
+ return 0
+ }
+ msg := lua.LVAsString(l.Get(1))
+ l.Push(lua.LString(sha1Hex(msg)))
+ return 1
+ },
+ "replicate_commands": func(l *lua.LState) int {
+ // ignored
+ return 1
+ },
+ }
+}
+
+func luaToRedis(l *lua.LState, c *server.Peer, value lua.LValue) {
+ if value == nil {
+ c.WriteNull()
+ return
+ }
+
+ switch t := value.(type) {
+ case *lua.LNilType:
+ c.WriteNull()
+ case lua.LBool:
+ if lua.LVAsBool(value) {
+ c.WriteInt(1)
+ } else {
+ c.WriteNull()
+ }
+ case lua.LNumber:
+ c.WriteInt(int(lua.LVAsNumber(value)))
+ case lua.LString:
+ s := lua.LVAsString(value)
+ if s == "OK" {
+ c.WriteInline(s)
+ } else {
+ c.WriteBulk(s)
+ }
+ case *lua.LTable:
+ // special case for tables with an 'err' or 'ok' field
+ // note: according to the docs this only counts when 'err' or 'ok' is
+ // the only field.
+ if s := t.RawGetString("err"); s.Type() != lua.LTNil {
+ c.WriteError(s.String())
+ return
+ }
+ if s := t.RawGetString("ok"); s.Type() != lua.LTNil {
+ c.WriteInline(s.String())
+ return
+ }
+
+ result := []lua.LValue{}
+ for j := 1; true; j++ {
+ val := l.GetTable(value, lua.LNumber(j))
+ if val == nil {
+ result = append(result, val)
+ continue
+ }
+
+ if val.Type() == lua.LTNil {
+ break
+ }
+
+ result = append(result, val)
+ }
+
+ c.WriteLen(len(result))
+ for _, r := range result {
+ luaToRedis(l, c, r)
+ }
+ default:
+ panic("....")
+ }
+}
+
+func redisToLua(l *lua.LState, res []interface{}) *lua.LTable {
+ rettb := l.NewTable()
+ for _, e := range res {
+ var v lua.LValue
+ if e == nil {
+ v = lua.LFalse
+ } else {
+ switch et := e.(type) {
+ case int64:
+ v = lua.LNumber(et)
+ case []uint8:
+ v = lua.LString(string(et))
+ case []interface{}:
+ v = redisToLua(l, et)
+ case string:
+ v = lua.LString(et)
+ default:
+ // TODO: oops?
+ v = lua.LString(e.(string))
+ }
+ }
+ l.RawSet(rettb, lua.LNumber(rettb.Len()+1), v)
+ }
+ return rettb
+}
diff --git a/vendor/github.com/alicebob/miniredis/miniredis.go b/vendor/github.com/alicebob/miniredis/miniredis.go
new file mode 100644
index 000000000..0688bdfef
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/miniredis.go
@@ -0,0 +1,373 @@
+// Package miniredis is a pure Go Redis test server, for use in Go unittests.
+// There are no dependencies on system binaries, and every server you start
+// will be empty.
+//
+// Start a server with `s, err := miniredis.Run()`.
+// Stop it with `defer s.Close()`.
+//
+// Point your Redis client to `s.Addr()` or `s.Host(), s.Port()`.
+//
+// Set keys directly via s.Set(...) and similar commands, or use a Redis client.
+//
+// For direct use you can select a Redis database with either `s.Select(12);
+// s.Get("foo")` or `s.DB(12).Get("foo")`.
+//
+package miniredis
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+
+ redigo "github.com/gomodule/redigo/redis"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+type hashKey map[string]string
+type listKey []string
+type setKey map[string]struct{}
+
+// RedisDB holds a single (numbered) Redis database.
+type RedisDB struct {
+ master *sync.Mutex // pointer to the lock in Miniredis
+ id int // db id
+ keys map[string]string // Master map of keys with their type
+ stringKeys map[string]string // GET/SET &c. keys
+ hashKeys map[string]hashKey // MGET/MSET &c. keys
+ listKeys map[string]listKey // LPUSH &c. keys
+ setKeys map[string]setKey // SADD &c. keys
+ sortedsetKeys map[string]sortedSet // ZADD &c. keys
+ ttl map[string]time.Duration // effective TTL values
+ keyVersion map[string]uint // used to watch values
+}
+
+// Miniredis is a Redis server implementation.
+type Miniredis struct {
+ sync.Mutex
+ srv *server.Server
+ port int
+ password string
+ dbs map[int]*RedisDB
+ selectedDB int // DB id used in the direct Get(), Set() &c.
+ scripts map[string]string // sha1 -> lua src
+ signal *sync.Cond
+ now time.Time // used to make a duration from EXPIREAT. time.Now() if not set.
+}
+
+type txCmd func(*server.Peer, *connCtx)
+
+// database id + key combo
+type dbKey struct {
+ db int
+ key string
+}
+
+// connCtx has all state for a single connection.
+type connCtx struct {
+ selectedDB int // selected DB
+ authenticated bool // auth enabled and a valid AUTH seen
+ transaction []txCmd // transaction callbacks. Or nil.
+ dirtyTransaction bool // any error during QUEUEing.
+ watch map[dbKey]uint // WATCHed keys.
+}
+
+// NewMiniRedis makes a new, non-started, Miniredis object.
+func NewMiniRedis() *Miniredis {
+ m := Miniredis{
+ dbs: map[int]*RedisDB{},
+ scripts: map[string]string{},
+ }
+ m.signal = sync.NewCond(&m)
+ return &m
+}
+
+func newRedisDB(id int, l *sync.Mutex) RedisDB {
+ return RedisDB{
+ id: id,
+ master: l,
+ keys: map[string]string{},
+ stringKeys: map[string]string{},
+ hashKeys: map[string]hashKey{},
+ listKeys: map[string]listKey{},
+ setKeys: map[string]setKey{},
+ sortedsetKeys: map[string]sortedSet{},
+ ttl: map[string]time.Duration{},
+ keyVersion: map[string]uint{},
+ }
+}
+
+// Run creates and Start()s a Miniredis.
+func Run() (*Miniredis, error) {
+ m := NewMiniRedis()
+ return m, m.Start()
+}
+
+// Start starts a server. It listens on a random port on localhost. See also
+// Addr().
+func (m *Miniredis) Start() error {
+ s, err := server.NewServer(fmt.Sprintf("127.0.0.1:%d", m.port))
+ if err != nil {
+ return err
+ }
+ return m.start(s)
+}
+
+// StartAddr runs miniredis with a given addr. Examples: "127.0.0.1:6379",
+// ":6379", or "127.0.0.1:0"
+func (m *Miniredis) StartAddr(addr string) error {
+ s, err := server.NewServer(addr)
+ if err != nil {
+ return err
+ }
+ return m.start(s)
+}
+
+func (m *Miniredis) start(s *server.Server) error {
+ m.Lock()
+ defer m.Unlock()
+ m.srv = s
+ m.port = s.Addr().Port
+
+ commandsConnection(m)
+ commandsGeneric(m)
+ commandsServer(m)
+ commandsString(m)
+ commandsHash(m)
+ commandsList(m)
+ commandsSet(m)
+ commandsSortedSet(m)
+ commandsTransaction(m)
+ commandsScripting(m)
+
+ return nil
+}
+
+// Restart restarts a Close()d server on the same port. Values will be
+// preserved.
+func (m *Miniredis) Restart() error {
+ return m.Start()
+}
+
+// Close shuts down a Miniredis.
+func (m *Miniredis) Close() {
+ m.Lock()
+ defer m.Unlock()
+ if m.srv == nil {
+ return
+ }
+ m.srv.Close()
+ m.srv = nil
+}
+
+// RequireAuth makes every connection need to AUTH first. Disable again by
+// setting an empty string.
+func (m *Miniredis) RequireAuth(pw string) {
+ m.Lock()
+ defer m.Unlock()
+ m.password = pw
+}
+
+// DB returns a DB by ID.
+func (m *Miniredis) DB(i int) *RedisDB {
+ m.Lock()
+ defer m.Unlock()
+ return m.db(i)
+}
+
+// get DB. No locks!
+func (m *Miniredis) db(i int) *RedisDB {
+ if db, ok := m.dbs[i]; ok {
+ return db
+ }
+ db := newRedisDB(i, &m.Mutex) // the DB has our lock.
+ m.dbs[i] = &db
+ return &db
+}
+
+// Addr returns '127.0.0.1:12345'. Can be given to a Dial(). See also Host()
+// and Port(), which return the same things.
+func (m *Miniredis) Addr() string {
+ m.Lock()
+ defer m.Unlock()
+ return m.srv.Addr().String()
+}
+
+// Host returns the host part of Addr().
+func (m *Miniredis) Host() string {
+ m.Lock()
+ defer m.Unlock()
+ return m.srv.Addr().IP.String()
+}
+
+// Port returns the (random) port part of Addr().
+func (m *Miniredis) Port() string {
+ m.Lock()
+ defer m.Unlock()
+ return strconv.Itoa(m.srv.Addr().Port)
+}
+
+// CommandCount returns the number of processed commands.
+func (m *Miniredis) CommandCount() int {
+ m.Lock()
+ defer m.Unlock()
+ return int(m.srv.TotalCommands())
+}
+
+// CurrentConnectionCount returns the number of currently connected clients.
+func (m *Miniredis) CurrentConnectionCount() int {
+ m.Lock()
+ defer m.Unlock()
+ return m.srv.ClientsLen()
+}
+
+// TotalConnectionCount returns the number of client connections since server start.
+func (m *Miniredis) TotalConnectionCount() int {
+ m.Lock()
+ defer m.Unlock()
+ return int(m.srv.TotalConnections())
+}
+
+// FastForward decreases all TTLs by the given duration. All TTLs <= 0 will be
+// expired.
+func (m *Miniredis) FastForward(duration time.Duration) {
+ m.Lock()
+ defer m.Unlock()
+ for _, db := range m.dbs {
+ db.fastForward(duration)
+ }
+}
+
+// redigo returns a redigo.Conn, connected using net.Pipe
+func (m *Miniredis) redigo() redigo.Conn {
+ c1, c2 := net.Pipe()
+ m.srv.ServeConn(c1)
+ c := redigo.NewConn(c2, 0, 0)
+ if m.password != "" {
+ if _, err := c.Do("AUTH", m.password); err != nil {
+ // ?
+ }
+ }
+ return c
+}
+
+// Dump returns a text version of the selected DB, usable for debugging.
+func (m *Miniredis) Dump() string {
+ m.Lock()
+ defer m.Unlock()
+
+ var (
+ maxLen = 60
+ indent = " "
+ db = m.db(m.selectedDB)
+ r = ""
+ v = func(s string) string {
+ suffix := ""
+ if len(s) > maxLen {
+ suffix = fmt.Sprintf("...(%d)", len(s))
+ s = s[:maxLen-len(suffix)]
+ }
+ return fmt.Sprintf("%q%s", s, suffix)
+ }
+ )
+ for _, k := range db.allKeys() {
+ r += fmt.Sprintf("- %s\n", k)
+ t := db.t(k)
+ switch t {
+ case "string":
+ r += fmt.Sprintf("%s%s\n", indent, v(db.stringKeys[k]))
+ case "hash":
+ for _, hk := range db.hashFields(k) {
+ r += fmt.Sprintf("%s%s: %s\n", indent, hk, v(db.hashGet(k, hk)))
+ }
+ case "list":
+ for _, lk := range db.listKeys[k] {
+ r += fmt.Sprintf("%s%s\n", indent, v(lk))
+ }
+ case "set":
+ for _, mk := range db.setMembers(k) {
+ r += fmt.Sprintf("%s%s\n", indent, v(mk))
+ }
+ case "zset":
+ for _, el := range db.ssetElements(k) {
+ r += fmt.Sprintf("%s%f: %s\n", indent, el.score, v(el.member))
+ }
+ default:
+ r += fmt.Sprintf("%s(a %s, fixme!)\n", indent, t)
+ }
+ }
+ return r
+}
+
+// SetTime sets the time against which EXPIREAT values are compared. EXPIREAT
+// will use time.Now() if this is not set.
+func (m *Miniredis) SetTime(t time.Time) {
+ m.Lock()
+ defer m.Unlock()
+ m.now = t
+}
+
+// handleAuth returns false if connection has no access. It sends the reply.
+func (m *Miniredis) handleAuth(c *server.Peer) bool {
+ m.Lock()
+ defer m.Unlock()
+ if m.password == "" {
+ return true
+ }
+ if !getCtx(c).authenticated {
+ c.WriteError("NOAUTH Authentication required.")
+ return false
+ }
+ return true
+}
+
+func getCtx(c *server.Peer) *connCtx {
+ if c.Ctx == nil {
+ c.Ctx = &connCtx{}
+ }
+ return c.Ctx.(*connCtx)
+}
+
+func startTx(ctx *connCtx) {
+ ctx.transaction = []txCmd{}
+ ctx.dirtyTransaction = false
+}
+
+func stopTx(ctx *connCtx) {
+ ctx.transaction = nil
+ unwatch(ctx)
+}
+
+func inTx(ctx *connCtx) bool {
+ return ctx.transaction != nil
+}
+
+func addTxCmd(ctx *connCtx, cb txCmd) {
+ ctx.transaction = append(ctx.transaction, cb)
+}
+
+func watch(db *RedisDB, ctx *connCtx, key string) {
+ if ctx.watch == nil {
+ ctx.watch = map[dbKey]uint{}
+ }
+ ctx.watch[dbKey{db: db.id, key: key}] = db.keyVersion[key] // Can be 0.
+}
+
+func unwatch(ctx *connCtx) {
+ ctx.watch = nil
+}
+
+// setDirty can be called even when not in an tx. Is an no-op then.
+func setDirty(c *server.Peer) {
+ if c.Ctx == nil {
+ // No transaction. Not relevant.
+ return
+ }
+ getCtx(c).dirtyTransaction = true
+}
+
+func setAuthenticated(c *server.Peer) {
+ getCtx(c).authenticated = true
+}
diff --git a/vendor/github.com/alicebob/miniredis/redis.go b/vendor/github.com/alicebob/miniredis/redis.go
new file mode 100644
index 000000000..49ff7bc3c
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/redis.go
@@ -0,0 +1,208 @@
+package miniredis
+
+import (
+ "fmt"
+ "math"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/alicebob/miniredis/server"
+)
+
+const (
+ msgWrongType = "WRONGTYPE Operation against a key holding the wrong kind of value"
+ msgInvalidInt = "ERR value is not an integer or out of range"
+ msgInvalidFloat = "ERR value is not a valid float"
+ msgInvalidMinMax = "ERR min or max is not a float"
+ msgInvalidRangeItem = "ERR min or max not valid string range item"
+ msgInvalidTimeout = "ERR timeout is not an integer or out of range"
+ msgSyntaxError = "ERR syntax error"
+ msgKeyNotFound = "ERR no such key"
+ msgOutOfRange = "ERR index out of range"
+ msgInvalidCursor = "ERR invalid cursor"
+ msgXXandNX = "ERR XX and NX options at the same time are not compatible"
+ msgNegTimeout = "ERR timeout is negative"
+ msgInvalidSETime = "ERR invalid expire time in set"
+ msgInvalidSETEXTime = "ERR invalid expire time in setex"
+ msgInvalidPSETEXTime = "ERR invalid expire time in psetex"
+ msgInvalidKeysNumber = "ERR Number of keys can't be greater than number of args"
+ msgNegativeKeysNumber = "ERR Number of keys can't be negative"
+ msgFScriptUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try SCRIPT HELP."
+ msgSingleElementPair = "ERR INCR option supports a single increment-element pair"
+ msgNoScriptFound = "NOSCRIPT No matching script. Please use EVAL."
+)
+
+func errWrongNumber(cmd string) string {
+ return fmt.Sprintf("ERR wrong number of arguments for '%s' command", strings.ToLower(cmd))
+}
+
+func errLuaParseError(err error) string {
+ return fmt.Sprintf("ERR Error compiling script (new function): %s", err.Error())
+}
+
+// withTx wraps the non-argument-checking part of command handling code in
+// transaction logic.
+func withTx(
+ m *Miniredis,
+ c *server.Peer,
+ cb txCmd,
+) {
+ ctx := getCtx(c)
+ if inTx(ctx) {
+ addTxCmd(ctx, cb)
+ c.WriteInline("QUEUED")
+ return
+ }
+ m.Lock()
+ cb(c, ctx)
+ // done, wake up anyone who waits on anything.
+ m.signal.Broadcast()
+ m.Unlock()
+}
+
+// blockCmd is executed returns whether it is done
+type blockCmd func(*server.Peer, *connCtx) bool
+
+// blocking keeps trying a command until the callback returns true. Calls
+// onTimeout after the timeout (or when we call this in a transaction).
+func blocking(
+ m *Miniredis,
+ c *server.Peer,
+ timeout time.Duration,
+ cb blockCmd,
+ onTimeout func(*server.Peer),
+) {
+ var (
+ ctx = getCtx(c)
+ dl *time.Timer
+ dlc <-chan time.Time
+ )
+ if inTx(ctx) {
+ addTxCmd(ctx, func(c *server.Peer, ctx *connCtx) {
+ if !cb(c, ctx) {
+ onTimeout(c)
+ }
+ })
+ c.WriteInline("QUEUED")
+ return
+ }
+ if timeout != 0 {
+ dl = time.NewTimer(timeout)
+ defer dl.Stop()
+ dlc = dl.C
+ }
+
+ m.Lock()
+ defer m.Unlock()
+ for {
+ done := cb(c, ctx)
+ if done {
+ return
+ }
+ // there is no cond.WaitTimeout(), so hence the the goroutine to wait
+ // for a timeout
+ var (
+ wg sync.WaitGroup
+ wakeup = make(chan struct{}, 1)
+ )
+ wg.Add(1)
+ go func() {
+ m.signal.Wait()
+ wakeup <- struct{}{}
+ wg.Done()
+ }()
+ select {
+ case <-wakeup:
+ case <-dlc:
+ onTimeout(c)
+ m.signal.Broadcast() // to kill the wakeup go routine
+ wg.Wait()
+ return
+ }
+ wg.Wait()
+ }
+}
+
+// formatFloat formats a float the way redis does (sort-of)
+func formatFloat(v float64) string {
+ // Format with %f and strip trailing 0s. This is the most like Redis does
+ // it :(
+ // .12 is the magic number where most output is the same as Redis.
+ if math.IsInf(v, +1) {
+ return "inf"
+ }
+ if math.IsInf(v, -1) {
+ return "-inf"
+ }
+ sv := fmt.Sprintf("%.12f", v)
+ for strings.Contains(sv, ".") {
+ if sv[len(sv)-1] != '0' {
+ break
+ }
+ // Remove trailing 0s.
+ sv = sv[:len(sv)-1]
+ // Ends with a '.'.
+ if sv[len(sv)-1] == '.' {
+ sv = sv[:len(sv)-1]
+ break
+ }
+ }
+ return sv
+}
+
+// redisRange gives Go offsets for something l long with start/end in
+// Redis semantics. Both start and end can be negative.
+// Used for string range and list range things.
+// The results can be used as: v[start:end]
+// Note that GETRANGE (on a string key) never returns an empty string when end
+// is a large negative number.
+func redisRange(l, start, end int, stringSymantics bool) (int, int) {
+ if start < 0 {
+ start = l + start
+ if start < 0 {
+ start = 0
+ }
+ }
+ if start > l {
+ start = l
+ }
+
+ if end < 0 {
+ end = l + end
+ if end < 0 {
+ end = -1
+ if stringSymantics {
+ end = 0
+ }
+ }
+ }
+ end++ // end argument is inclusive in Redis.
+ if end > l {
+ end = l
+ }
+
+ if end < start {
+ return 0, 0
+ }
+ return start, end
+}
+
+// matchKeys filters only matching keys.
+// Will return an empty list on invalid match expression.
+func matchKeys(keys []string, match string) []string {
+ re := patternRE(match)
+ if re == nil {
+ // Special case, the given pattern won't match anything / is
+ // invalid.
+ return nil
+ }
+ res := []string{}
+ for _, k := range keys {
+ if !re.MatchString(k) {
+ continue
+ }
+ res = append(res, k)
+ }
+ return res
+}
diff --git a/vendor/github.com/alicebob/miniredis/server/Makefile b/vendor/github.com/alicebob/miniredis/server/Makefile
new file mode 100644
index 000000000..c82e336f9
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/server/Makefile
@@ -0,0 +1,9 @@
+.PHONY: all build test
+
+all: build test
+
+build:
+ go build
+
+test:
+ go test
diff --git a/vendor/github.com/alicebob/miniredis/server/proto.go b/vendor/github.com/alicebob/miniredis/server/proto.go
new file mode 100644
index 000000000..27e62d4f0
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/server/proto.go
@@ -0,0 +1,84 @@
+package server
+
+import (
+ "bufio"
+ "errors"
+ "strconv"
+)
+
+// ErrProtocol is the general error for unexpected input
+var ErrProtocol = errors.New("invalid request")
+
+// client always sends arrays with bulk strings
+func readArray(rd *bufio.Reader) ([]string, error) {
+ line, err := rd.ReadString('\n')
+ if err != nil {
+ return nil, err
+ }
+ if len(line) < 3 {
+ return nil, ErrProtocol
+ }
+
+ switch line[0] {
+ default:
+ return nil, ErrProtocol
+ case '*':
+ l, err := strconv.Atoi(line[1 : len(line)-2])
+ if err != nil {
+ return nil, err
+ }
+ // l can be -1
+ var fields []string
+ for ; l > 0; l-- {
+ s, err := readString(rd)
+ if err != nil {
+ return nil, err
+ }
+ fields = append(fields, s)
+ }
+ return fields, nil
+ }
+}
+
+func readString(rd *bufio.Reader) (string, error) {
+ line, err := rd.ReadString('\n')
+ if err != nil {
+ return "", err
+ }
+ if len(line) < 3 {
+ return "", ErrProtocol
+ }
+
+ switch line[0] {
+ default:
+ return "", ErrProtocol
+ case '+', '-', ':':
+ // +: simple string
+ // -: errors
+ // :: integer
+ // Simple line based replies.
+ return string(line[1 : len(line)-2]), nil
+ case '$':
+ // bulk strings are: `$5\r\nhello\r\n`
+ length, err := strconv.Atoi(line[1 : len(line)-2])
+ if err != nil {
+ return "", err
+ }
+ if length < 0 {
+ // -1 is a nil response
+ return "", nil
+ }
+ var (
+ buf = make([]byte, length+2)
+ pos = 0
+ )
+ for pos < length+2 {
+ n, err := rd.Read(buf[pos:])
+ if err != nil {
+ return "", err
+ }
+ pos += n
+ }
+ return string(buf[:length]), nil
+ }
+}
diff --git a/vendor/github.com/alicebob/miniredis/server/server.go b/vendor/github.com/alicebob/miniredis/server/server.go
new file mode 100644
index 000000000..1796453dd
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/server/server.go
@@ -0,0 +1,242 @@
+package server
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+func errUnknownCommand(cmd string, args []string) string {
+ s := fmt.Sprintf("ERR unknown command `%s`, with args beginning with: ", cmd)
+ if len(args) > 20 {
+ args = args[:20]
+ }
+ for _, a := range args {
+ s += fmt.Sprintf("`%s`, ", a)
+ }
+ return s
+}
+
+// Cmd is what Register expects
+type Cmd func(c *Peer, cmd string, args []string)
+
+// Server is a simple redis server
+type Server struct {
+ l net.Listener
+ cmds map[string]Cmd
+ peers map[net.Conn]struct{}
+ mu sync.Mutex
+ wg sync.WaitGroup
+ infoConns int
+ infoCmds int
+}
+
+// NewServer makes a server listening on addr. Close with .Close().
+func NewServer(addr string) (*Server, error) {
+ s := Server{
+ cmds: map[string]Cmd{},
+ peers: map[net.Conn]struct{}{},
+ }
+
+ l, err := net.Listen("tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+ s.l = l
+
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ s.serve(l)
+ }()
+ return &s, nil
+}
+
+func (s *Server) serve(l net.Listener) {
+ for {
+ conn, err := l.Accept()
+ if err != nil {
+ return
+ }
+ s.ServeConn(conn)
+ }
+}
+
+// ServeConn handles a net.Conn. Nice with net.Pipe()
+func (s *Server) ServeConn(conn net.Conn) {
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ defer conn.Close()
+ s.mu.Lock()
+ s.peers[conn] = struct{}{}
+ s.infoConns++
+ s.mu.Unlock()
+
+ s.servePeer(conn)
+
+ s.mu.Lock()
+ delete(s.peers, conn)
+ s.mu.Unlock()
+ }()
+}
+
+// Addr has the net.Addr struct
+func (s *Server) Addr() *net.TCPAddr {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.l == nil {
+ return nil
+ }
+ return s.l.Addr().(*net.TCPAddr)
+}
+
+// Close a server started with NewServer. It will wait until all clients are
+// closed.
+func (s *Server) Close() {
+ s.mu.Lock()
+ if s.l != nil {
+ s.l.Close()
+ }
+ s.l = nil
+ for c := range s.peers {
+ c.Close()
+ }
+ s.mu.Unlock()
+ s.wg.Wait()
+}
+
+// Register a command. It can't have been registered before. Safe to call on a
+// running server.
+func (s *Server) Register(cmd string, f Cmd) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ cmd = strings.ToUpper(cmd)
+ if _, ok := s.cmds[cmd]; ok {
+ return fmt.Errorf("command already registered: %s", cmd)
+ }
+ s.cmds[cmd] = f
+ return nil
+}
+
+func (s *Server) servePeer(c net.Conn) {
+ r := bufio.NewReader(c)
+ cl := &Peer{
+ w: bufio.NewWriter(c),
+ }
+ for {
+ args, err := readArray(r)
+ if err != nil {
+ return
+ }
+ s.dispatch(cl, args)
+ cl.w.Flush()
+ if cl.closed {
+ c.Close()
+ return
+ }
+ }
+}
+
+func (s *Server) dispatch(c *Peer, args []string) {
+ cmd, args := args[0], args[1:]
+ cmdUp := strings.ToUpper(cmd)
+ s.mu.Lock()
+ cb, ok := s.cmds[cmdUp]
+ s.mu.Unlock()
+ if !ok {
+ c.WriteError(errUnknownCommand(cmd, args))
+ return
+ }
+
+ s.mu.Lock()
+ s.infoCmds++
+ s.mu.Unlock()
+ cb(c, cmdUp, args)
+}
+
+// TotalCommands is total (known) commands since this the server started
+func (s *Server) TotalCommands() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.infoCmds
+}
+
+// ClientsLen gives the number of connected clients right now
+func (s *Server) ClientsLen() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return len(s.peers)
+}
+
+// TotalConnections give the number of clients connected since the server
+// started, including the currently connected ones
+func (s *Server) TotalConnections() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.infoConns
+}
+
+// Peer is a client connected to the server
+type Peer struct {
+ w *bufio.Writer
+ closed bool
+ Ctx interface{} // anything goes, server won't touch this
+}
+
+// Flush the write buffer. Called automatically after every redis command
+func (c *Peer) Flush() {
+ c.w.Flush()
+}
+
+// Close the client connection after the current command is done.
+func (c *Peer) Close() {
+ c.closed = true
+}
+
+// WriteError writes a redis 'Error'
+func (c *Peer) WriteError(e string) {
+ fmt.Fprintf(c.w, "-%s\r\n", toInline(e))
+}
+
+// WriteInline writes a redis inline string
+func (c *Peer) WriteInline(s string) {
+ fmt.Fprintf(c.w, "+%s\r\n", toInline(s))
+}
+
+// WriteOK write the inline string `OK`
+func (c *Peer) WriteOK() {
+ c.WriteInline("OK")
+}
+
+// WriteBulk writes a bulk string
+func (c *Peer) WriteBulk(s string) {
+ fmt.Fprintf(c.w, "$%d\r\n%s\r\n", len(s), s)
+}
+
+// WriteNull writes a redis Null element
+func (c *Peer) WriteNull() {
+ fmt.Fprintf(c.w, "$-1\r\n")
+}
+
+// WriteLen starts an array with the given length
+func (c *Peer) WriteLen(n int) {
+ fmt.Fprintf(c.w, "*%d\r\n", n)
+}
+
+// WriteInt writes an integer
+func (c *Peer) WriteInt(i int) {
+ fmt.Fprintf(c.w, ":%d\r\n", i)
+}
+
+func toInline(s string) string {
+ return strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return ' '
+ }
+ return r
+ }, s)
+}
diff --git a/vendor/github.com/alicebob/miniredis/sorted_set.go b/vendor/github.com/alicebob/miniredis/sorted_set.go
new file mode 100644
index 000000000..9b1894d8a
--- /dev/null
+++ b/vendor/github.com/alicebob/miniredis/sorted_set.go
@@ -0,0 +1,97 @@
+package miniredis
+
+// The most KISS way to implement a sorted set. Luckily we don't care about
+// performance that much.
+
+import (
+ "sort"
+)
+
+type direction int
+
+const (
+ asc direction = iota
+ desc
+)
+
+type sortedSet map[string]float64
+
+type ssElem struct {
+ score float64
+ member string
+}
+type ssElems []ssElem
+
+type byScore ssElems
+
+func (sse byScore) Len() int { return len(sse) }
+func (sse byScore) Swap(i, j int) { sse[i], sse[j] = sse[j], sse[i] }
+func (sse byScore) Less(i, j int) bool {
+ if sse[i].score != sse[j].score {
+ return sse[i].score < sse[j].score
+ }
+ return sse[i].member < sse[j].member
+}
+
+func newSortedSet() sortedSet {
+ return sortedSet{}
+}
+
+func (ss *sortedSet) card() int {
+ return len(*ss)
+}
+
+func (ss *sortedSet) set(score float64, member string) {
+ (*ss)[member] = score
+}
+
+func (ss *sortedSet) get(member string) (float64, bool) {
+ v, ok := (*ss)[member]
+ return v, ok
+}
+
+// elems gives the list of ssElem, ready to sort.
+func (ss *sortedSet) elems() ssElems {
+ elems := make(ssElems, 0, len(*ss))
+ for e, s := range *ss {
+ elems = append(elems, ssElem{s, e})
+ }
+ return elems
+}
+
+func (ss *sortedSet) byScore(d direction) ssElems {
+ elems := ss.elems()
+ sort.Sort(byScore(elems))
+ if d == desc {
+ reverseElems(elems)
+ }
+ return ssElems(elems)
+}
+
+// rankByScore gives the (0-based) index of member, or returns false.
+func (ss *sortedSet) rankByScore(member string, d direction) (int, bool) {
+ if _, ok := (*ss)[member]; !ok {
+ return 0, false
+ }
+ for i, e := range ss.byScore(d) {
+ if e.member == member {
+ return i, true
+ }
+ }
+ // Can't happen
+ return 0, false
+}
+
+func reverseSlice(o []string) {
+ for i := range make([]struct{}, len(o)/2) {
+ other := len(o) - 1 - i
+ o[i], o[other] = o[other], o[i]
+ }
+}
+
+func reverseElems(o ssElems) {
+ for i := range make([]struct{}, len(o)/2) {
+ other := len(o) - 1 - i
+ o[i], o[other] = o[other], o[i]
+ }
+}
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 000000000..339177be6
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 000000000..1602287d7
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 000000000..d7d14f8eb
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,316 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targetMap map[float64]float64) *Stream {
+ // Convert map to slice to avoid slow iterations on a map.
+ // ƒ is called on the hot path, so converting the map to a slice
+ // beforehand results in significant CPU savings.
+ targets := targetMapToSlice(targetMap)
+
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for _, t := range targets {
+ if t.quantile*s.n <= r {
+ f = (2 * t.epsilon * r) / t.quantile
+ } else {
+ f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+type target struct {
+ quantile float64
+ epsilon float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+ targets := make([]target, 0, len(targetMap))
+
+ for quantile, epsilon := range targetMap {
+ t := target{
+ quantile: quantile,
+ epsilon: epsilon,
+ }
+ targets = append(targets, t)
+ }
+
+ return targets
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(math.Ceil(float64(l) * q))
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/github.com/coreos/bbolt/.gitignore b/vendor/github.com/coreos/bbolt/.gitignore
new file mode 100644
index 000000000..3bcd8cbaf
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/.gitignore
@@ -0,0 +1,5 @@
+*.prof
+*.test
+*.swp
+/bin/
+cover.out
diff --git a/vendor/github.com/coreos/bbolt/.travis.yml b/vendor/github.com/coreos/bbolt/.travis.yml
new file mode 100644
index 000000000..a60300c55
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+go_import_path: go.etcd.io/bbolt
+
+sudo: false
+
+go:
+- 1.11
+
+before_install:
+- go get -v honnef.co/go/tools/...
+- go get -v github.com/kisielk/errcheck
+
+script:
+- make fmt
+- make test
+- make race
+# - make errcheck
diff --git a/vendor/github.com/coreos/bbolt/LICENSE b/vendor/github.com/coreos/bbolt/LICENSE
new file mode 100644
index 000000000..004e77fe5
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Ben Johnson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/github.com/coreos/bbolt/Makefile
new file mode 100644
index 000000000..2968aaa61
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/Makefile
@@ -0,0 +1,38 @@
+BRANCH=`git rev-parse --abbrev-ref HEAD`
+COMMIT=`git rev-parse --short HEAD`
+GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
+
+default: build
+
+race:
+ @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
+ @echo "array freelist test"
+ @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)"
+
+fmt:
+ !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
+
+# go get honnef.co/go/tools/simple
+gosimple:
+ gosimple ./...
+
+# go get honnef.co/go/tools/unused
+unused:
+ unused ./...
+
+# go get github.com/kisielk/errcheck
+errcheck:
+ @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
+
+test:
+ TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic
+ # Note: gets "program not an importable package" in out of path builds
+ TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
+
+ @echo "array freelist test"
+
+ @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic
+ # Note: gets "program not an importable package" in out of path builds
+ @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
+
+.PHONY: race fmt errcheck test gosimple unused
diff --git a/vendor/github.com/coreos/bbolt/README.md b/vendor/github.com/coreos/bbolt/README.md
new file mode 100644
index 000000000..e9989efc5
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/README.md
@@ -0,0 +1,954 @@
+bbolt
+=====
+
+[](https://goreportcard.com/report/github.com/etcd-io/bbolt)
+[](https://codecov.io/gh/etcd-io/bbolt)
+[](https://travis-ci.com/etcd-io/bbolt)
+[](https://godoc.org/github.com/etcd-io/bbolt)
+[](https://github.com/etcd-io/bbolt/releases)
+[](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
+
+bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
+store. The purpose of this fork is to provide the Go community with an active
+maintenance and development target for Bolt; the goal is improved reliability
+and stability. bbolt includes bug fixes, performance enhancements, and features
+not found in Bolt while preserving backwards compatibility with the Bolt API.
+
+Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
+[LMDB project][lmdb]. The goal of the project is to provide a simple,
+fast, and reliable database for projects that don't require a full database
+server such as Postgres or MySQL.
+
+Since Bolt is meant to be used as such a low-level piece of functionality,
+simplicity is key. The API will be small and only focus on getting values
+and setting values. That's it.
+
+[gh_ben]: https://github.com/benbjohnson
+[bolt]: https://github.com/boltdb/bolt
+[hyc_symas]: https://twitter.com/hyc_symas
+[lmdb]: http://symas.com/mdb/
+
+## Project Status
+
+Bolt is stable, the API is fixed, and the file format is fixed. Full unit
+test coverage and randomized black box testing are used to ensure database
+consistency and thread safety. Bolt is currently used in high-load production
+environments serving databases as large as 1TB. Many companies such as
+Shopify and Heroku use Bolt-backed services every day.
+
+## Project versioning
+
+bbolt uses [semantic versioning](http://semver.org).
+API should not change between patch and minor releases.
+New minor versions may add additional features to the API.
+
+## Table of Contents
+
+ - [Getting Started](#getting-started)
+ - [Installing](#installing)
+ - [Opening a database](#opening-a-database)
+ - [Transactions](#transactions)
+ - [Read-write transactions](#read-write-transactions)
+ - [Read-only transactions](#read-only-transactions)
+ - [Batch read-write transactions](#batch-read-write-transactions)
+ - [Managing transactions manually](#managing-transactions-manually)
+ - [Using buckets](#using-buckets)
+ - [Using key/value pairs](#using-keyvalue-pairs)
+ - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
+ - [Iterating over keys](#iterating-over-keys)
+ - [Prefix scans](#prefix-scans)
+ - [Range scans](#range-scans)
+ - [ForEach()](#foreach)
+ - [Nested buckets](#nested-buckets)
+ - [Database backups](#database-backups)
+ - [Statistics](#statistics)
+ - [Read-Only Mode](#read-only-mode)
+ - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
+ - [Resources](#resources)
+ - [Comparison with other databases](#comparison-with-other-databases)
+ - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
+ - [LevelDB, RocksDB](#leveldb-rocksdb)
+ - [LMDB](#lmdb)
+ - [Caveats & Limitations](#caveats--limitations)
+ - [Reading the Source](#reading-the-source)
+ - [Other Projects Using Bolt](#other-projects-using-bolt)
+
+## Getting Started
+
+### Installing
+
+To start using Bolt, install Go and run `go get`:
+
+```sh
+$ go get go.etcd.io/bbolt/...
+```
+
+This will retrieve the library and install the `bolt` command line utility into
+your `$GOBIN` path.
+
+
+### Importing bbolt
+
+To use bbolt as an embedded key-value store, import as:
+
+```go
+import bolt "go.etcd.io/bbolt"
+
+db, err := bolt.Open(path, 0666, nil)
+if err != nil {
+ return err
+}
+defer db.Close()
+```
+
+
+### Opening a database
+
+The top-level object in Bolt is a `DB`. It is represented as a single file on
+your disk and represents a consistent snapshot of your data.
+
+To open your database, simply use the `bolt.Open()` function:
+
+```go
+package main
+
+import (
+ "log"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+func main() {
+ // Open the my.db data file in your current directory.
+ // It will be created if it doesn't exist.
+ db, err := bolt.Open("my.db", 0600, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+
+ ...
+}
+```
+
+Please note that Bolt obtains a file lock on the data file so multiple processes
+cannot open the same database at the same time. Opening an already open Bolt
+database will cause it to hang until the other process closes it. To prevent
+an indefinite wait you can pass a timeout option to the `Open()` function:
+
+```go
+db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
+```
+
+
+### Transactions
+
+Bolt allows only one read-write transaction at a time but allows as many
+read-only transactions as you want at a time. Each transaction has a consistent
+view of the data as it existed when the transaction started.
+
+Individual transactions and all objects created from them (e.g. buckets, keys)
+are not thread safe. To work with data in multiple goroutines you must start
+a transaction for each one or use locking to ensure only one goroutine accesses
+a transaction at a time. Creating transaction from the `DB` is thread safe.
+
+Read-only transactions and read-write transactions should not depend on one
+another and generally shouldn't be opened simultaneously in the same goroutine.
+This can cause a deadlock as the read-write transaction needs to periodically
+re-map the data file but it cannot do so while a read-only transaction is open.
+
+
+#### Read-write transactions
+
+To start a read-write transaction, you can use the `DB.Update()` function:
+
+```go
+err := db.Update(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+Inside the closure, you have a consistent view of the database. You commit the
+transaction by returning `nil` at the end. You can also rollback the transaction
+at any point by returning an error. All database operations are allowed inside
+a read-write transaction.
+
+Always check the return error as it will report any disk failures that can cause
+your transaction to not complete. If you return an error within your closure
+it will be passed through.
+
+
+#### Read-only transactions
+
+To start a read-only transaction, you can use the `DB.View()` function:
+
+```go
+err := db.View(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+You also get a consistent view of the database within this closure, however,
+no mutating operations are allowed within a read-only transaction. You can only
+retrieve buckets, retrieve values, and copy the database within a read-only
+transaction.
+
+
+#### Batch read-write transactions
+
+Each `DB.Update()` waits for disk to commit the writes. This overhead
+can be minimized by combining multiple updates with the `DB.Batch()`
+function:
+
+```go
+err := db.Batch(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+Concurrent Batch calls are opportunistically combined into larger
+transactions. Batch is only useful when there are multiple goroutines
+calling it.
+
+The trade-off is that `Batch` can call the given
+function multiple times, if parts of the transaction fail. The
+function must be idempotent and side effects must take effect only
+after a successful return from `DB.Batch()`.
+
+For example: don't display messages from inside the function, instead
+set variables in the enclosing scope:
+
+```go
+var id uint64
+err := db.Batch(func(tx *bolt.Tx) error {
+ // Find last key in bucket, decode as bigendian uint64, increment
+ // by one, encode back to []byte, and add new key.
+ ...
+ id = newValue
+ return nil
+})
+if err != nil {
+ return ...
+}
+fmt.Println("Allocated ID %d", id)
+```
+
+
+#### Managing transactions manually
+
+The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
+function. These helper functions will start the transaction, execute a function,
+and then safely close your transaction if an error is returned. This is the
+recommended way to use Bolt transactions.
+
+However, sometimes you may want to manually start and end your transactions.
+You can use the `DB.Begin()` function directly but **please** be sure to close
+the transaction.
+
+```go
+// Start a writable transaction.
+tx, err := db.Begin(true)
+if err != nil {
+ return err
+}
+defer tx.Rollback()
+
+// Use the transaction...
+_, err := tx.CreateBucket([]byte("MyBucket"))
+if err != nil {
+ return err
+}
+
+// Commit the transaction and check for error.
+if err := tx.Commit(); err != nil {
+ return err
+}
+```
+
+The first argument to `DB.Begin()` is a boolean stating if the transaction
+should be writable.
+
+
+### Using buckets
+
+Buckets are collections of key/value pairs within the database. All keys in a
+bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
+function:
+
+```go
+db.Update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucket([]byte("MyBucket"))
+ if err != nil {
+ return fmt.Errorf("create bucket: %s", err)
+ }
+ return nil
+})
+```
+
+You can also create a bucket only if it doesn't exist by using the
+`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
+function for all your top-level buckets after you open your database so you can
+guarantee that they exist for future transactions.
+
+To delete a bucket, simply call the `Tx.DeleteBucket()` function.
+
+
+### Using key/value pairs
+
+To save a key/value pair to a bucket, use the `Bucket.Put()` function:
+
+```go
+db.Update(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte("MyBucket"))
+ err := b.Put([]byte("answer"), []byte("42"))
+ return err
+})
+```
+
+This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
+bucket. To retrieve this value, we can use the `Bucket.Get()` function:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte("MyBucket"))
+ v := b.Get([]byte("answer"))
+ fmt.Printf("The answer is: %s\n", v)
+ return nil
+})
+```
+
+The `Get()` function does not return an error because its operation is
+guaranteed to work (unless there is some kind of system failure). If the key
+exists then it will return its byte slice value. If it doesn't exist then it
+will return `nil`. It's important to note that you can have a zero-length value
+set to a key which is different than the key not existing.
+
+Use the `Bucket.Delete()` function to delete a key from the bucket.
+
+Please note that values returned from `Get()` are only valid while the
+transaction is open. If you need to use a value outside of the transaction
+then you must use `copy()` to copy it to another byte slice.
+
+
+### Autoincrementing integer for the bucket
+By using the `NextSequence()` function, you can let Bolt determine a sequence
+which can be used as the unique identifier for your key/value pairs. See the
+example below.
+
+```go
+// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
+func (s *Store) CreateUser(u *User) error {
+ return s.db.Update(func(tx *bolt.Tx) error {
+ // Retrieve the users bucket.
+ // This should be created when the DB is first opened.
+ b := tx.Bucket([]byte("users"))
+
+ // Generate ID for the user.
+ // This returns an error only if the Tx is closed or not writeable.
+ // That can't happen in an Update() call so I ignore the error check.
+ id, _ := b.NextSequence()
+ u.ID = int(id)
+
+ // Marshal user data into bytes.
+ buf, err := json.Marshal(u)
+ if err != nil {
+ return err
+ }
+
+ // Persist bytes to users bucket.
+ return b.Put(itob(u.ID), buf)
+ })
+}
+
+// itob returns an 8-byte big endian representation of v.
+func itob(v int) []byte {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(v))
+ return b
+}
+
+type User struct {
+ ID int
+ ...
+}
+```
+
+### Iterating over keys
+
+Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
+iteration over these keys extremely fast. To iterate over keys we'll use a
+`Cursor`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ b := tx.Bucket([]byte("MyBucket"))
+
+ c := b.Cursor()
+
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+The cursor allows you to move to a specific point in the list of keys and move
+forward or backward through the keys one at a time.
+
+The following functions are available on the cursor:
+
+```
+First() Move to the first key.
+Last() Move to the last key.
+Seek() Move to a specific key.
+Next() Move to the next key.
+Prev() Move to the previous key.
+```
+
+Each of those functions has a return signature of `(key []byte, value []byte)`.
+When you have iterated to the end of the cursor then `Next()` will return a
+`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
+before calling `Next()` or `Prev()`. If you do not seek to a position then
+these functions will return a `nil` key.
+
+During iteration, if the key is non-`nil` but the value is `nil`, that means
+the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
+access the sub-bucket.
+
+
+#### Prefix scans
+
+To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ c := tx.Bucket([]byte("MyBucket")).Cursor()
+
+ prefix := []byte("1234")
+ for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+#### Range scans
+
+Another common use case is scanning over a range such as a time range. If you
+use a sortable time encoding such as RFC3339 then you can query a specific
+date range like this:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume our events bucket exists and has RFC3339 encoded time keys.
+ c := tx.Bucket([]byte("Events")).Cursor()
+
+ // Our time range spans the 90's decade.
+ min := []byte("1990-01-01T00:00:00Z")
+ max := []byte("2000-01-01T00:00:00Z")
+
+ // Iterate over the 90's.
+ for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
+ fmt.Printf("%s: %s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
+
+
+#### ForEach()
+
+You can also use the function `ForEach()` if you know you'll be iterating over
+all the keys in a bucket:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ b := tx.Bucket([]byte("MyBucket"))
+
+ b.ForEach(func(k, v []byte) error {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ return nil
+ })
+ return nil
+})
+```
+
+Please note that keys and values in `ForEach()` are only valid while
+the transaction is open. If you need to use a key or value outside of
+the transaction, you must use `copy()` to copy it to another byte
+slice.
+
+### Nested buckets
+
+You can also store a bucket in a key to create nested buckets. The API is the
+same as the bucket management API on the `DB` object:
+
+```go
+func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
+func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
+func (*Bucket) DeleteBucket(key []byte) error
+```
+
+Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
+
+```go
+
+// createUser creates a new user in the given account.
+func createUser(accountID int, u *User) error {
+ // Start the transaction.
+ tx, err := db.Begin(true)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ // Retrieve the root bucket for the account.
+ // Assume this has already been created when the account was set up.
+ root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
+
+ // Setup the users bucket.
+ bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
+ if err != nil {
+ return err
+ }
+
+ // Generate an ID for the new user.
+ userID, err := bkt.NextSequence()
+ if err != nil {
+ return err
+ }
+ u.ID = userID
+
+ // Marshal and save the encoded user.
+ if buf, err := json.Marshal(u); err != nil {
+ return err
+ } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
+ return err
+ }
+
+ // Commit the transaction.
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+```
+
+
+
+
+### Database backups
+
+Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
+function to write a consistent view of the database to a writer. If you call
+this from a read-only transaction, it will perform a hot backup and not block
+your other database reads and writes.
+
+By default, it will use a regular file handle which will utilize the operating
+system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
+documentation for information about optimizing for larger-than-RAM datasets.
+
+One common use case is to backup over HTTP so you can use tools like `cURL` to
+do database backups:
+
+```go
+func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
+ err := db.View(func(tx *bolt.Tx) error {
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
+ w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
+ _, err := tx.WriteTo(w)
+ return err
+ })
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+```
+
+Then you can backup using this command:
+
+```sh
+$ curl http://localhost/backup > my.db
+```
+
+Or you can open your browser to `http://localhost/backup` and it will download
+automatically.
+
+If you want to backup to another file you can use the `Tx.CopyFile()` helper
+function.
+
+
+### Statistics
+
+The database keeps a running count of many of the internal operations it
+performs so you can better understand what's going on. By grabbing a snapshot
+of these stats at two points in time we can see what operations were performed
+in that time range.
+
+For example, we could start a goroutine to log stats every 10 seconds:
+
+```go
+go func() {
+ // Grab the initial stats.
+ prev := db.Stats()
+
+ for {
+ // Wait for 10s.
+ time.Sleep(10 * time.Second)
+
+ // Grab the current stats and diff them.
+ stats := db.Stats()
+ diff := stats.Sub(&prev)
+
+ // Encode stats to JSON and print to STDERR.
+ json.NewEncoder(os.Stderr).Encode(diff)
+
+ // Save stats for the next loop.
+ prev = stats
+ }
+}()
+```
+
+It's also useful to pipe these stats to a service such as statsd for monitoring
+or to provide an HTTP endpoint that will perform a fixed-length sample.
+
+
+### Read-Only Mode
+
+Sometimes it is useful to create a shared, read-only Bolt database. To this,
+set the `Options.ReadOnly` flag when opening your database. Read-only mode
+uses a shared lock to allow multiple processes to read from the database but
+it will block any processes from opening the database in read-write mode.
+
+```go
+db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+### Mobile Use (iOS/Android)
+
+Bolt is able to run on mobile devices by leveraging the binding feature of the
+[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
+contain your database logic and a reference to a `*bolt.DB` with a initializing
+constructor that takes in a filepath where the database file will be stored.
+Neither Android nor iOS require extra permissions or cleanup from using this method.
+
+```go
+func NewBoltDB(filepath string) *BoltDB {
+ db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ return &BoltDB{db}
+}
+
+type BoltDB struct {
+ db *bolt.DB
+ ...
+}
+
+func (b *BoltDB) Path() string {
+ return b.db.Path()
+}
+
+func (b *BoltDB) Close() {
+ b.db.Close()
+}
+```
+
+Database logic should be defined as methods on this wrapper struct.
+
+To initialize this struct from the native language (both platforms now sync
+their local storage to the cloud. These snippets disable that functionality for the
+database file):
+
+#### Android
+
+```java
+String path;
+if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
+ path = getNoBackupFilesDir().getAbsolutePath();
+} else{
+ path = getFilesDir().getAbsolutePath();
+}
+Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
+```
+
+#### iOS
+
+```objc
+- (void)demo {
+ NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
+ NSUserDomainMask,
+ YES) objectAtIndex:0];
+ GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
+ [self addSkipBackupAttributeToItemAtPath:demo.path];
+ //Some DB Logic would go here
+ [demo close];
+}
+
+- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
+{
+ NSURL* URL= [NSURL fileURLWithPath: filePathString];
+ assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
+
+ NSError *error = nil;
+ BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
+ forKey: NSURLIsExcludedFromBackupKey error: &error];
+ if(!success){
+ NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
+ }
+ return success;
+}
+
+```
+
+## Resources
+
+For more information on getting started with Bolt, check out the following articles:
+
+* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
+* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
+
+
+## Comparison with other databases
+
+### Postgres, MySQL, & other relational databases
+
+Relational databases structure data into rows and are only accessible through
+the use of SQL. This approach provides flexibility in how you store and query
+your data but also incurs overhead in parsing and planning SQL statements. Bolt
+accesses all data by a byte slice key. This makes Bolt fast to read and write
+data by key but provides no built-in support for joining values together.
+
+Most relational databases (with the exception of SQLite) are standalone servers
+that run separately from your application. This gives your systems
+flexibility to connect multiple application servers to a single database
+server but also adds overhead in serializing and transporting data over the
+network. Bolt runs as a library included in your application so all data access
+has to go through your application's process. This brings data closer to your
+application but limits multi-process access to the data.
+
+
+### LevelDB, RocksDB
+
+LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
+they are libraries bundled into the application, however, their underlying
+structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
+random writes by using a write ahead log and multi-tiered, sorted files called
+SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
+have trade-offs.
+
+If you require a high random write throughput (>10,000 w/sec) or you need to use
+spinning disks then LevelDB could be a good choice. If your application is
+read-heavy or does a lot of range scans then Bolt could be a good choice.
+
+One other important consideration is that LevelDB does not have transactions.
+It supports batch writing of key/values pairs and it supports read snapshots
+but it will not give you the ability to do a compare-and-swap operation safely.
+Bolt supports fully serializable ACID transactions.
+
+
+### LMDB
+
+Bolt was originally a port of LMDB so it is architecturally similar. Both use
+a B+tree, have ACID semantics with fully serializable transactions, and support
+lock-free MVCC using a single writer and multiple readers.
+
+The two projects have somewhat diverged. LMDB heavily focuses on raw performance
+while Bolt has focused on simplicity and ease of use. For example, LMDB allows
+several unsafe actions such as direct writes for the sake of performance. Bolt
+opts to disallow actions which can leave the database in a corrupted state. The
+only exception to this in Bolt is `DB.NoSync`.
+
+There are also a few differences in API. LMDB requires a maximum mmap size when
+opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
+automatically. LMDB overloads the getter and setter functions with multiple
+flags whereas Bolt splits these specialized cases into their own functions.
+
+
+## Caveats & Limitations
+
+It's important to pick the right tool for the job and Bolt is no exception.
+Here are a few things to note when evaluating and using Bolt:
+
+* Bolt is good for read intensive workloads. Sequential write performance is
+ also fast but random writes can be slow. You can use `DB.Batch()` or add a
+ write-ahead log to help mitigate this issue.
+
+* Bolt uses a B+tree internally so there can be a lot of random page access.
+ SSDs provide a significant performance boost over spinning disks.
+
+* Try to avoid long running read transactions. Bolt uses copy-on-write so
+ old pages cannot be reclaimed while an old transaction is using them.
+
+* Byte slices returned from Bolt are only valid during a transaction. Once the
+ transaction has been committed or rolled back then the memory they point to
+ can be reused by a new page or can be unmapped from virtual memory and you'll
+ see an `unexpected fault address` panic when accessing it.
+
+* Bolt uses an exclusive write lock on the database file so it cannot be
+ shared by multiple processes.
+
+* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
+ buckets that have random inserts will cause your database to have very poor
+ page utilization.
+
+* Use larger buckets in general. Smaller buckets causes poor page utilization
+ once they become larger than the page size (typically 4KB).
+
+* Bulk loading a lot of random writes into a new bucket can be slow as the
+ page will not split until the transaction is committed. Randomly inserting
+ more than 100,000 key/value pairs into a single new bucket in a single
+ transaction is not advised.
+
+* Bolt uses a memory-mapped file so the underlying operating system handles the
+ caching of the data. Typically, the OS will cache as much of the file as it
+ can in memory and will release memory as needed to other processes. This means
+ that Bolt can show very high memory usage when working with large databases.
+ However, this is expected and the OS will release memory as needed. Bolt can
+ handle databases much larger than the available physical RAM, provided its
+ memory-map fits in the process virtual address space. It may be problematic
+ on 32-bits systems.
+
+* The data structures in the Bolt database are memory mapped so the data file
+ will be endian specific. This means that you cannot copy a Bolt file from a
+ little endian machine to a big endian machine and have it work. For most
+ users this is not a concern since most modern CPUs are little endian.
+
+* Because of the way pages are laid out on disk, Bolt cannot truncate data files
+ and return free pages back to the disk. Instead, Bolt maintains a free list
+ of unused pages within its data file. These free pages can be reused by later
+ transactions. This works well for many use cases as databases generally tend
+ to grow. However, it's important to note that deleting large chunks of data
+ will not allow you to reclaim that space on disk.
+
+ For more information on page allocation, [see this comment][page-allocation].
+
+[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
+
+
+## Reading the Source
+
+Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
+transactional key/value database so it can be a good starting point for people
+interested in how databases work.
+
+The best places to start are the main entry points into Bolt:
+
+- `Open()` - Initializes the reference to the database. It's responsible for
+ creating the database if it doesn't exist, obtaining an exclusive lock on the
+ file, reading the meta pages, & memory-mapping the file.
+
+- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
+ value of the `writable` argument. This requires briefly obtaining the "meta"
+ lock to keep track of open transactions. Only one read-write transaction can
+ exist at a time so the "rwlock" is acquired during the life of a read-write
+ transaction.
+
+- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
+ arguments, a cursor is used to traverse the B+tree to the page and position
+ where they key & value will be written. Once the position is found, the bucket
+ materializes the underlying page and the page's parent pages into memory as
+ "nodes". These nodes are where mutations occur during read-write transactions.
+ These changes get flushed to disk during commit.
+
+- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
+ to move to the page & position of a key/value pair. During a read-only
+ transaction, the key and value data is returned as a direct reference to the
+ underlying mmap file so there's no allocation overhead. For read-write
+ transactions, this data may reference the mmap file or one of the in-memory
+ node values.
+
+- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
+ or in-memory nodes. It can seek to a specific key, move to the first or last
+ value, or it can move forward or backward. The cursor handles the movement up
+ and down the B+tree transparently to the end user.
+
+- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
+ into pages to be written to disk. Writing to disk then occurs in two phases.
+ First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
+ new meta page with an incremented transaction ID is written and another
+ `fsync()` occurs. This two phase write ensures that partially written data
+ pages are ignored in the event of a crash since the meta page pointing to them
+ is never written. Partially written meta pages are invalidated because they
+ are written with a checksum.
+
+If you have additional notes that could be helpful for others, please submit
+them via pull request.
+
+
+## Other Projects Using Bolt
+
+Below is a list of public, open source projects that use Bolt:
+
+* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
+* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
+* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
+* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
+* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
+* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
+* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
+* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
+* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
+* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
+* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
+ simple tx and key scans.
+* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
+* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
+* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
+* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
+* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
+* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
+* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
+* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
+* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
+* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
+* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
+* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
+* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
+* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
+* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
+* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
+* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
+* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
+* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
+* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
+* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
+* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
+* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
+* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
+* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
+* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
+* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
+* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
+* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
+* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
+* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
+ backed by boltdb.
+* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
+* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
+* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
+* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
+* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
+* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
+* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
+
+If you are using Bolt in a project please send a pull request to add it to the list.
diff --git a/vendor/github.com/coreos/bbolt/bolt_386.go b/vendor/github.com/coreos/bbolt/bolt_386.go
new file mode 100644
index 000000000..4d35ee7cf
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_386.go
@@ -0,0 +1,10 @@
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/coreos/bbolt/bolt_amd64.go b/vendor/github.com/coreos/bbolt/bolt_amd64.go
new file mode 100644
index 000000000..60a52dad5
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_amd64.go
@@ -0,0 +1,10 @@
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_arm.go
new file mode 100644
index 000000000..105d27ddb
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_arm.go
@@ -0,0 +1,28 @@
+package bbolt
+
+import "unsafe"
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned bool
+
+func init() {
+ // Simple check to see whether this arch handles unaligned load/stores
+ // correctly.
+
+ // ARM9 and older devices require load/stores to be from/to aligned
+ // addresses. If not, the lower 2 bits are cleared and that address is
+ // read in a jumbled up order.
+
+ // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
+
+ raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
+ val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
+
+ brokenUnaligned = val != 0x11222211
+}
diff --git a/vendor/github.com/coreos/bbolt/bolt_arm64.go b/vendor/github.com/coreos/bbolt/bolt_arm64.go
new file mode 100644
index 000000000..f5aa2a5ee
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_arm64.go
@@ -0,0 +1,12 @@
+// +build arm64
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/coreos/bbolt/bolt_linux.go b/vendor/github.com/coreos/bbolt/bolt_linux.go
new file mode 100644
index 000000000..7707bcacf
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_linux.go
@@ -0,0 +1,10 @@
+package bbolt
+
+import (
+ "syscall"
+)
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return syscall.Fdatasync(int(db.file.Fd()))
+}
diff --git a/vendor/github.com/coreos/bbolt/bolt_mips64x.go b/vendor/github.com/coreos/bbolt/bolt_mips64x.go
new file mode 100644
index 000000000..baeb289fd
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_mips64x.go
@@ -0,0 +1,12 @@
+// +build mips64 mips64le
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x8000000000 // 512GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/coreos/bbolt/bolt_mipsx.go b/vendor/github.com/coreos/bbolt/bolt_mipsx.go
new file mode 100644
index 000000000..2d9b1a91f
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_mipsx.go
@@ -0,0 +1,12 @@
+// +build mips mipsle
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x40000000 // 1GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/coreos/bbolt/bolt_openbsd.go b/vendor/github.com/coreos/bbolt/bolt_openbsd.go
new file mode 100644
index 000000000..d7f50358e
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_openbsd.go
@@ -0,0 +1,27 @@
+package bbolt
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ msAsync = 1 << iota // perform asynchronous writes
+ msSync // perform synchronous writes
+ msInvalidate // invalidate cached data
+)
+
+func msync(db *DB) error {
+ _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+func fdatasync(db *DB) error {
+ if db.data != nil {
+ return msync(db)
+ }
+ return db.file.Sync()
+}
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc.go b/vendor/github.com/coreos/bbolt/bolt_ppc.go
new file mode 100644
index 000000000..69804714a
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_ppc.go
@@ -0,0 +1,12 @@
+// +build ppc
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64.go b/vendor/github.com/coreos/bbolt/bolt_ppc64.go
new file mode 100644
index 000000000..356590857
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_ppc64.go
@@ -0,0 +1,12 @@
+// +build ppc64
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64le.go b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
new file mode 100644
index 000000000..422c7c69d
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
@@ -0,0 +1,12 @@
+// +build ppc64le
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/coreos/bbolt/bolt_riscv64.go b/vendor/github.com/coreos/bbolt/bolt_riscv64.go
new file mode 100644
index 000000000..07b4b47cd
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_riscv64.go
@@ -0,0 +1,12 @@
+// +build riscv64
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = true
diff --git a/vendor/github.com/coreos/bbolt/bolt_s390x.go b/vendor/github.com/coreos/bbolt/bolt_s390x.go
new file mode 100644
index 000000000..6d3fcb825
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_s390x.go
@@ -0,0 +1,12 @@
+// +build s390x
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/coreos/bbolt/bolt_unix.go b/vendor/github.com/coreos/bbolt/bolt_unix.go
new file mode 100644
index 000000000..5f2bb5145
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_unix.go
@@ -0,0 +1,93 @@
+// +build !windows,!plan9,!solaris
+
+package bbolt
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ flag := syscall.LOCK_NB
+ if exclusive {
+ flag |= syscall.LOCK_EX
+ } else {
+ flag |= syscall.LOCK_SH
+ }
+ for {
+ // Attempt to obtain an exclusive lock.
+ err := syscall.Flock(int(fd), flag)
+ if err == nil {
+ return nil
+ } else if err != syscall.EWOULDBLOCK {
+ return err
+ }
+
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ err = madvise(b, syscall.MADV_RANDOM)
+ if err != nil && err != syscall.ENOSYS {
+ // Ignore not implemented error in kernel because it still works.
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := syscall.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
+
+// NOTE: This function is copied from stdlib because it is not available on darwin.
+func madvise(b []byte, advice int) (err error) {
+ _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
new file mode 100644
index 000000000..babad6578
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
@@ -0,0 +1,88 @@
+package bbolt
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ var lockType int16
+ if exclusive {
+ lockType = syscall.F_WRLCK
+ } else {
+ lockType = syscall.F_RDLCK
+ }
+ for {
+ // Attempt to obtain an exclusive lock.
+ lock := syscall.Flock_t{Type: lockType}
+ err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
+ if err == nil {
+ return nil
+ } else if err != syscall.EAGAIN {
+ return err
+ }
+
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Type = syscall.F_UNLCK
+ lock.Whence = 0
+ return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := unix.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
diff --git a/vendor/github.com/coreos/bbolt/bolt_windows.go b/vendor/github.com/coreos/bbolt/bolt_windows.go
new file mode 100644
index 000000000..fca178bd2
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_windows.go
@@ -0,0 +1,141 @@
+package bbolt
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procLockFileEx = modkernel32.NewProc("LockFileEx")
+ procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
+)
+
+const (
+ // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+ flagLockExclusive = 2
+ flagLockFailImmediately = 1
+
+ // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
+ errLockViolation syscall.Errno = 0x21
+)
+
+func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+ r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
+ if r == 0 {
+ return err
+ }
+ return nil
+}
+
+func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+ r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
+ if r == 0 {
+ return err
+ }
+ return nil
+}
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return db.file.Sync()
+}
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ var flag uint32 = flagLockFailImmediately
+ if exclusive {
+ flag |= flagLockExclusive
+ }
+ for {
+ // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
+ // -1..0 as the lock on the database file.
+ var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
+ err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{
+ Offset: m1,
+ OffsetHigh: m1,
+ })
+
+ if err == nil {
+ return nil
+ } else if err != errLockViolation {
+ return err
+ }
+
+ // If we timed oumercit then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
+ err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{
+ Offset: m1,
+ OffsetHigh: m1,
+ })
+ return err
+}
+
+// mmap memory maps a DB's data file.
+// Based on: https://github.com/edsrzf/mmap-go
+func mmap(db *DB, sz int) error {
+ if !db.readOnly {
+ // Truncate the database to the size of the mmap.
+ if err := db.file.Truncate(int64(sz)); err != nil {
+ return fmt.Errorf("truncate: %s", err)
+ }
+ }
+
+ // Open a file mapping handle.
+ sizelo := uint32(sz >> 32)
+ sizehi := uint32(sz) & 0xffffffff
+ h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
+ if h == 0 {
+ return os.NewSyscallError("CreateFileMapping", errno)
+ }
+
+ // Create the memory map.
+ addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
+ if addr == 0 {
+ return os.NewSyscallError("MapViewOfFile", errno)
+ }
+
+ // Close mapping handle.
+ if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
+ return os.NewSyscallError("CloseHandle", err)
+ }
+
+ // Convert to a byte array.
+ db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
+ db.datasz = sz
+
+ return nil
+}
+
+// munmap unmaps a pointer from a file.
+// Based on: https://github.com/edsrzf/mmap-go
+func munmap(db *DB) error {
+ if db.data == nil {
+ return nil
+ }
+
+ addr := (uintptr)(unsafe.Pointer(&db.data[0]))
+ if err := syscall.UnmapViewOfFile(addr); err != nil {
+ return os.NewSyscallError("UnmapViewOfFile", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/coreos/bbolt/boltsync_unix.go b/vendor/github.com/coreos/bbolt/boltsync_unix.go
new file mode 100644
index 000000000..9587afefe
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/boltsync_unix.go
@@ -0,0 +1,8 @@
+// +build !windows,!plan9,!linux,!openbsd
+
+package bbolt
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return db.file.Sync()
+}
diff --git a/vendor/github.com/coreos/bbolt/bucket.go b/vendor/github.com/coreos/bbolt/bucket.go
new file mode 100644
index 000000000..84bfd4d6a
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bucket.go
@@ -0,0 +1,775 @@
+package bbolt
+
+import (
+ "bytes"
+ "fmt"
+ "unsafe"
+)
+
+const (
+ // MaxKeySize is the maximum length of a key, in bytes.
+ MaxKeySize = 32768
+
+ // MaxValueSize is the maximum length of a value, in bytes.
+ MaxValueSize = (1 << 31) - 2
+)
+
+const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
+
+const (
+ minFillPercent = 0.1
+ maxFillPercent = 1.0
+)
+
+// DefaultFillPercent is the percentage that split pages are filled.
+// This value can be changed by setting Bucket.FillPercent.
+const DefaultFillPercent = 0.5
+
+// Bucket represents a collection of key/value pairs inside the database.
+type Bucket struct {
+ *bucket
+ tx *Tx // the associated transaction
+ buckets map[string]*Bucket // subbucket cache
+ page *page // inline page reference
+ rootNode *node // materialized node for the root page.
+ nodes map[pgid]*node // node cache
+
+ // Sets the threshold for filling nodes when they split. By default,
+ // the bucket will fill to 50% but it can be useful to increase this
+ // amount if you know that your write workloads are mostly append-only.
+ //
+ // This is non-persisted across transactions so it must be set in every Tx.
+ FillPercent float64
+}
+
+// bucket represents the on-file representation of a bucket.
+// This is stored as the "value" of a bucket key. If the bucket is small enough,
+// then its root page can be stored inline in the "value", after the bucket
+// header. In the case of inline buckets, the "root" will be 0.
+type bucket struct {
+ root pgid // page id of the bucket's root-level page
+ sequence uint64 // monotonically incrementing, used by NextSequence()
+}
+
+// newBucket returns a new bucket associated with a transaction.
+func newBucket(tx *Tx) Bucket {
+ var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
+ if tx.writable {
+ b.buckets = make(map[string]*Bucket)
+ b.nodes = make(map[pgid]*node)
+ }
+ return b
+}
+
+// Tx returns the tx of the bucket.
+func (b *Bucket) Tx() *Tx {
+ return b.tx
+}
+
+// Root returns the root of the bucket.
+func (b *Bucket) Root() pgid {
+ return b.root
+}
+
+// Writable returns whether the bucket is writable.
+func (b *Bucket) Writable() bool {
+ return b.tx.writable
+}
+
+// Cursor creates a cursor associated with the bucket.
+// The cursor is only valid as long as the transaction is open.
+// Do not use a cursor after the transaction is closed.
+func (b *Bucket) Cursor() *Cursor {
+ // Update transaction statistics.
+ b.tx.stats.CursorCount++
+
+ // Allocate and return a cursor.
+ return &Cursor{
+ bucket: b,
+ stack: make([]elemRef, 0),
+ }
+}
+
+// Bucket retrieves a nested bucket by name.
+// Returns nil if the bucket does not exist.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) Bucket(name []byte) *Bucket {
+ if b.buckets != nil {
+ if child := b.buckets[string(name)]; child != nil {
+ return child
+ }
+ }
+
+ // Move cursor to key.
+ c := b.Cursor()
+ k, v, flags := c.seek(name)
+
+ // Return nil if the key doesn't exist or it is not a bucket.
+ if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
+ return nil
+ }
+
+ // Otherwise create a bucket and cache it.
+ var child = b.openBucket(v)
+ if b.buckets != nil {
+ b.buckets[string(name)] = child
+ }
+
+ return child
+}
+
+// Helper method that re-interprets a sub-bucket value
+// from a parent into a Bucket
+func (b *Bucket) openBucket(value []byte) *Bucket {
+ var child = newBucket(b.tx)
+
+ // If unaligned load/stores are broken on this arch and value is
+ // unaligned simply clone to an aligned byte array.
+ unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
+
+ if unaligned {
+ value = cloneBytes(value)
+ }
+
+ // If this is a writable transaction then we need to copy the bucket entry.
+ // Read-only transactions can point directly at the mmap entry.
+ if b.tx.writable && !unaligned {
+ child.bucket = &bucket{}
+ *child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
+ } else {
+ child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ }
+
+ // Save a reference to the inline page if the bucket is inline.
+ if child.root == 0 {
+ child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
+ }
+
+ return &child
+}
+
+// CreateBucket creates a new bucket at the given key and returns the new bucket.
+// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
+ if b.tx.db == nil {
+ return nil, ErrTxClosed
+ } else if !b.tx.writable {
+ return nil, ErrTxNotWritable
+ } else if len(key) == 0 {
+ return nil, ErrBucketNameRequired
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if there is an existing key.
+ if bytes.Equal(key, k) {
+ if (flags & bucketLeafFlag) != 0 {
+ return nil, ErrBucketExists
+ }
+ return nil, ErrIncompatibleValue
+ }
+
+ // Create empty, inline bucket.
+ var bucket = Bucket{
+ bucket: &bucket{},
+ rootNode: &node{isLeaf: true},
+ FillPercent: DefaultFillPercent,
+ }
+ var value = bucket.write()
+
+ // Insert into node.
+ key = cloneBytes(key)
+ c.node().put(key, key, value, 0, bucketLeafFlag)
+
+ // Since subbuckets are not allowed on inline buckets, we need to
+ // dereference the inline page, if it exists. This will cause the bucket
+ // to be treated as a regular, non-inline bucket for the rest of the tx.
+ b.page = nil
+
+ return b.Bucket(key), nil
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
+ child, err := b.CreateBucket(key)
+ if err == ErrBucketExists {
+ return b.Bucket(key), nil
+ } else if err != nil {
+ return nil, err
+ }
+ return child, nil
+}
+
+// DeleteBucket deletes a bucket at the given key.
+// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
+func (b *Bucket) DeleteBucket(key []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if bucket doesn't exist or is not a bucket.
+ if !bytes.Equal(key, k) {
+ return ErrBucketNotFound
+ } else if (flags & bucketLeafFlag) == 0 {
+ return ErrIncompatibleValue
+ }
+
+ // Recursively delete all child buckets.
+ child := b.Bucket(key)
+ err := child.ForEach(func(k, v []byte) error {
+ if v == nil {
+ if err := child.DeleteBucket(k); err != nil {
+ return fmt.Errorf("delete bucket: %s", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // Remove cached copy.
+ delete(b.buckets, string(key))
+
+ // Release all bucket pages to freelist.
+ child.nodes = nil
+ child.rootNode = nil
+ child.free()
+
+ // Delete the node if we have a matching key.
+ c.node().del(key)
+
+ return nil
+}
+
+// Get retrieves the value for a key in the bucket.
+// Returns a nil value if the key does not exist or if the key is a nested bucket.
+// The returned value is only valid for the life of the transaction.
+func (b *Bucket) Get(key []byte) []byte {
+ k, v, flags := b.Cursor().seek(key)
+
+ // Return nil if this is a bucket.
+ if (flags & bucketLeafFlag) != 0 {
+ return nil
+ }
+
+ // If our target node isn't the same key as what's passed in then return nil.
+ if !bytes.Equal(key, k) {
+ return nil
+ }
+ return v
+}
+
+// Put sets the value for a key in the bucket.
+// If the key exist then its previous value will be overwritten.
+// Supplied value must remain valid for the life of the transaction.
+// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
+func (b *Bucket) Put(key []byte, value []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ } else if len(key) == 0 {
+ return ErrKeyRequired
+ } else if len(key) > MaxKeySize {
+ return ErrKeyTooLarge
+ } else if int64(len(value)) > MaxValueSize {
+ return ErrValueTooLarge
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if there is an existing key with a bucket value.
+ if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
+
+ // Insert into node.
+ key = cloneBytes(key)
+ c.node().put(key, key, value, 0, 0)
+
+ return nil
+}
+
+// Delete removes a key from the bucket.
+// If the key does not exist then nothing is done and a nil error is returned.
+// Returns an error if the bucket was created from a read-only transaction.
+func (b *Bucket) Delete(key []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return nil if the key doesn't exist.
+ if !bytes.Equal(key, k) {
+ return nil
+ }
+
+ // Return an error if there is already existing bucket value.
+ if (flags & bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
+
+ // Delete the node if we have a matching key.
+ c.node().del(key)
+
+ return nil
+}
+
+// Sequence returns the current integer for the bucket without incrementing it.
+func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
+
+// SetSequence updates the sequence number for the bucket.
+func (b *Bucket) SetSequence(v uint64) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ }
+
+ // Materialize the root node if it hasn't been already so that the
+ // bucket will be saved during commit.
+ if b.rootNode == nil {
+ _ = b.node(b.root, nil)
+ }
+
+ // Increment and return the sequence.
+ b.bucket.sequence = v
+ return nil
+}
+
+// NextSequence returns an autoincrementing integer for the bucket.
+func (b *Bucket) NextSequence() (uint64, error) {
+ if b.tx.db == nil {
+ return 0, ErrTxClosed
+ } else if !b.Writable() {
+ return 0, ErrTxNotWritable
+ }
+
+ // Materialize the root node if it hasn't been already so that the
+ // bucket will be saved during commit.
+ if b.rootNode == nil {
+ _ = b.node(b.root, nil)
+ }
+
+ // Increment and return the sequence.
+ b.bucket.sequence++
+ return b.bucket.sequence, nil
+}
+
+// ForEach executes a function for each key/value pair in a bucket.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller. The provided function must not modify
+// the bucket; this will result in undefined behavior.
+func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ }
+ c := b.Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ if err := fn(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Stat returns stats on a bucket.
+func (b *Bucket) Stats() BucketStats {
+ var s, subStats BucketStats
+ pageSize := b.tx.db.pageSize
+ s.BucketN += 1
+ if b.root == 0 {
+ s.InlineBucketN += 1
+ }
+ b.forEachPage(func(p *page, depth int) {
+ if (p.flags & leafPageFlag) != 0 {
+ s.KeyN += int(p.count)
+
+ // used totals the used bytes for the page
+ used := pageHeaderSize
+
+ if p.count != 0 {
+ // If page has any elements, add all element headers.
+ used += leafPageElementSize * int(p.count-1)
+
+ // Add all element key, value sizes.
+ // The computation takes advantage of the fact that the position
+ // of the last element's key/value equals to the total of the sizes
+ // of all previous elements' keys and values.
+ // It also includes the last element's header.
+ lastElement := p.leafPageElement(p.count - 1)
+ used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
+ }
+
+ if b.root == 0 {
+ // For inlined bucket just update the inline stats
+ s.InlineBucketInuse += used
+ } else {
+ // For non-inlined bucket update all the leaf stats
+ s.LeafPageN++
+ s.LeafInuse += used
+ s.LeafOverflowN += int(p.overflow)
+
+ // Collect stats from sub-buckets.
+ // Do that by iterating over all element headers
+ // looking for the ones with the bucketLeafFlag.
+ for i := uint16(0); i < p.count; i++ {
+ e := p.leafPageElement(i)
+ if (e.flags & bucketLeafFlag) != 0 {
+ // For any bucket element, open the element value
+ // and recursively call Stats on the contained bucket.
+ subStats.Add(b.openBucket(e.value()).Stats())
+ }
+ }
+ }
+ } else if (p.flags & branchPageFlag) != 0 {
+ s.BranchPageN++
+ lastElement := p.branchPageElement(p.count - 1)
+
+ // used totals the used bytes for the page
+ // Add header and all element headers.
+ used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
+
+ // Add size of all keys and values.
+ // Again, use the fact that last element's position equals to
+ // the total of key, value sizes of all previous elements.
+ used += int(lastElement.pos + lastElement.ksize)
+ s.BranchInuse += used
+ s.BranchOverflowN += int(p.overflow)
+ }
+
+ // Keep track of maximum page depth.
+ if depth+1 > s.Depth {
+ s.Depth = (depth + 1)
+ }
+ })
+
+ // Alloc stats can be computed from page counts and pageSize.
+ s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
+ s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
+
+ // Add the max depth of sub-buckets to get total nested depth.
+ s.Depth += subStats.Depth
+ // Add the stats for all sub-buckets
+ s.Add(subStats)
+ return s
+}
+
+// forEachPage iterates over every page in a bucket, including inline pages.
+func (b *Bucket) forEachPage(fn func(*page, int)) {
+ // If we have an inline page then just use that.
+ if b.page != nil {
+ fn(b.page, 0)
+ return
+ }
+
+ // Otherwise traverse the page hierarchy.
+ b.tx.forEachPage(b.root, 0, fn)
+}
+
+// forEachPageNode iterates over every page (or node) in a bucket.
+// This also includes inline pages.
+func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
+ // If we have an inline page or root node then just use that.
+ if b.page != nil {
+ fn(b.page, nil, 0)
+ return
+ }
+ b._forEachPageNode(b.root, 0, fn)
+}
+
+func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
+ var p, n = b.pageNode(pgid)
+
+ // Execute function.
+ fn(p, n, depth)
+
+ // Recursively loop over children.
+ if p != nil {
+ if (p.flags & branchPageFlag) != 0 {
+ for i := 0; i < int(p.count); i++ {
+ elem := p.branchPageElement(uint16(i))
+ b._forEachPageNode(elem.pgid, depth+1, fn)
+ }
+ }
+ } else {
+ if !n.isLeaf {
+ for _, inode := range n.inodes {
+ b._forEachPageNode(inode.pgid, depth+1, fn)
+ }
+ }
+ }
+}
+
+// spill writes all the nodes for this bucket to dirty pages.
+func (b *Bucket) spill() error {
+ // Spill all child buckets first.
+ for name, child := range b.buckets {
+ // If the child bucket is small enough and it has no child buckets then
+ // write it inline into the parent bucket's page. Otherwise spill it
+ // like a normal bucket and make the parent value a pointer to the page.
+ var value []byte
+ if child.inlineable() {
+ child.free()
+ value = child.write()
+ } else {
+ if err := child.spill(); err != nil {
+ return err
+ }
+
+ // Update the child bucket header in this bucket.
+ value = make([]byte, unsafe.Sizeof(bucket{}))
+ var bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ *bucket = *child.bucket
+ }
+
+ // Skip writing the bucket if there are no materialized nodes.
+ if child.rootNode == nil {
+ continue
+ }
+
+ // Update parent node.
+ var c = b.Cursor()
+ k, _, flags := c.seek([]byte(name))
+ if !bytes.Equal([]byte(name), k) {
+ panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
+ }
+ if flags&bucketLeafFlag == 0 {
+ panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
+ }
+ c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
+ }
+
+ // Ignore if there's not a materialized root node.
+ if b.rootNode == nil {
+ return nil
+ }
+
+ // Spill nodes.
+ if err := b.rootNode.spill(); err != nil {
+ return err
+ }
+ b.rootNode = b.rootNode.root()
+
+ // Update the root node for this bucket.
+ if b.rootNode.pgid >= b.tx.meta.pgid {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
+ }
+ b.root = b.rootNode.pgid
+
+ return nil
+}
+
+// inlineable returns true if a bucket is small enough to be written inline
+// and if it contains no subbuckets. Otherwise returns false.
+func (b *Bucket) inlineable() bool {
+ var n = b.rootNode
+
+ // Bucket must only contain a single leaf node.
+ if n == nil || !n.isLeaf {
+ return false
+ }
+
+ // Bucket is not inlineable if it contains subbuckets or if it goes beyond
+ // our threshold for inline bucket size.
+ var size = pageHeaderSize
+ for _, inode := range n.inodes {
+ size += leafPageElementSize + len(inode.key) + len(inode.value)
+
+ if inode.flags&bucketLeafFlag != 0 {
+ return false
+ } else if size > b.maxInlineBucketSize() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Returns the maximum total size of a bucket to make it a candidate for inlining.
+func (b *Bucket) maxInlineBucketSize() int {
+ return b.tx.db.pageSize / 4
+}
+
+// write allocates and writes a bucket to a byte slice.
+func (b *Bucket) write() []byte {
+ // Allocate the appropriate size.
+ var n = b.rootNode
+ var value = make([]byte, bucketHeaderSize+n.size())
+
+ // Write a bucket header.
+ var bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ *bucket = *b.bucket
+
+ // Convert byte slice to a fake page and write the root node.
+ var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
+ n.write(p)
+
+ return value
+}
+
+// rebalance attempts to balance all nodes.
+func (b *Bucket) rebalance() {
+ for _, n := range b.nodes {
+ n.rebalance()
+ }
+ for _, child := range b.buckets {
+ child.rebalance()
+ }
+}
+
+// node creates a node from a page and associates it with a given parent.
+func (b *Bucket) node(pgid pgid, parent *node) *node {
+ _assert(b.nodes != nil, "nodes map expected")
+
+ // Retrieve node if it's already been created.
+ if n := b.nodes[pgid]; n != nil {
+ return n
+ }
+
+ // Otherwise create a node and cache it.
+ n := &node{bucket: b, parent: parent}
+ if parent == nil {
+ b.rootNode = n
+ } else {
+ parent.children = append(parent.children, n)
+ }
+
+ // Use the inline page if this is an inline bucket.
+ var p = b.page
+ if p == nil {
+ p = b.tx.page(pgid)
+ }
+
+ // Read the page into the node and cache it.
+ n.read(p)
+ b.nodes[pgid] = n
+
+ // Update statistics.
+ b.tx.stats.NodeCount++
+
+ return n
+}
+
+// free recursively frees all pages in the bucket.
+func (b *Bucket) free() {
+ if b.root == 0 {
+ return
+ }
+
+ var tx = b.tx
+ b.forEachPageNode(func(p *page, n *node, _ int) {
+ if p != nil {
+ tx.db.freelist.free(tx.meta.txid, p)
+ } else {
+ n.free()
+ }
+ })
+ b.root = 0
+}
+
+// dereference removes all references to the old mmap.
+func (b *Bucket) dereference() {
+ if b.rootNode != nil {
+ b.rootNode.root().dereference()
+ }
+
+ for _, child := range b.buckets {
+ child.dereference()
+ }
+}
+
+// pageNode returns the in-memory node, if it exists.
+// Otherwise returns the underlying page.
+func (b *Bucket) pageNode(id pgid) (*page, *node) {
+ // Inline buckets have a fake page embedded in their value so treat them
+ // differently. We'll return the rootNode (if available) or the fake page.
+ if b.root == 0 {
+ if id != 0 {
+ panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
+ }
+ if b.rootNode != nil {
+ return nil, b.rootNode
+ }
+ return b.page, nil
+ }
+
+ // Check the node cache for non-inline buckets.
+ if b.nodes != nil {
+ if n := b.nodes[id]; n != nil {
+ return nil, n
+ }
+ }
+
+ // Finally lookup the page from the transaction if no node is materialized.
+ return b.tx.page(id), nil
+}
+
+// BucketStats records statistics about resources used by a bucket.
+type BucketStats struct {
+ // Page count statistics.
+ BranchPageN int // number of logical branch pages
+ BranchOverflowN int // number of physical branch overflow pages
+ LeafPageN int // number of logical leaf pages
+ LeafOverflowN int // number of physical leaf overflow pages
+
+ // Tree statistics.
+ KeyN int // number of keys/value pairs
+ Depth int // number of levels in B+tree
+
+ // Page size utilization.
+ BranchAlloc int // bytes allocated for physical branch pages
+ BranchInuse int // bytes actually used for branch data
+ LeafAlloc int // bytes allocated for physical leaf pages
+ LeafInuse int // bytes actually used for leaf data
+
+ // Bucket statistics
+ BucketN int // total number of buckets including the top bucket
+ InlineBucketN int // total number on inlined buckets
+ InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
+}
+
+func (s *BucketStats) Add(other BucketStats) {
+ s.BranchPageN += other.BranchPageN
+ s.BranchOverflowN += other.BranchOverflowN
+ s.LeafPageN += other.LeafPageN
+ s.LeafOverflowN += other.LeafOverflowN
+ s.KeyN += other.KeyN
+ if s.Depth < other.Depth {
+ s.Depth = other.Depth
+ }
+ s.BranchAlloc += other.BranchAlloc
+ s.BranchInuse += other.BranchInuse
+ s.LeafAlloc += other.LeafAlloc
+ s.LeafInuse += other.LeafInuse
+
+ s.BucketN += other.BucketN
+ s.InlineBucketN += other.InlineBucketN
+ s.InlineBucketInuse += other.InlineBucketInuse
+}
+
+// cloneBytes returns a copy of a given slice.
+func cloneBytes(v []byte) []byte {
+ var clone = make([]byte, len(v))
+ copy(clone, v)
+ return clone
+}
diff --git a/vendor/github.com/coreos/bbolt/cursor.go b/vendor/github.com/coreos/bbolt/cursor.go
new file mode 100644
index 000000000..3000aced6
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/cursor.go
@@ -0,0 +1,396 @@
+package bbolt
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+)
+
+// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
+// Cursors see nested buckets with value == nil.
+// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
+//
+// Keys and values returned from the cursor are only valid for the life of the transaction.
+//
+// Changing data while traversing with a cursor may cause it to be invalidated
+// and return unexpected keys and/or values. You must reposition your cursor
+// after mutating data.
+type Cursor struct {
+ bucket *Bucket
+ stack []elemRef
+}
+
+// Bucket returns the bucket that this cursor was created from.
+func (c *Cursor) Bucket() *Bucket {
+ return c.bucket
+}
+
+// First moves the cursor to the first item in the bucket and returns its key and value.
+// If the bucket is empty then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) First() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ c.stack = c.stack[:0]
+ p, n := c.bucket.pageNode(c.bucket.root)
+ c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
+ c.first()
+
+ // If we land on an empty page then move to the next value.
+ // https://github.com/boltdb/bolt/issues/450
+ if c.stack[len(c.stack)-1].count() == 0 {
+ c.next()
+ }
+
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+
+}
+
+// Last moves the cursor to the last item in the bucket and returns its key and value.
+// If the bucket is empty then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Last() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ c.stack = c.stack[:0]
+ p, n := c.bucket.pageNode(c.bucket.root)
+ ref := elemRef{page: p, node: n}
+ ref.index = ref.count() - 1
+ c.stack = append(c.stack, ref)
+ c.last()
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Next moves the cursor to the next item in the bucket and returns its key and value.
+// If the cursor is at the end of the bucket then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Next() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ k, v, flags := c.next()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Prev moves the cursor to the previous item in the bucket and returns its key and value.
+// If the cursor is at the beginning of the bucket then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Prev() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+
+ // Attempt to move back one element until we're successful.
+ // Move up the stack as we hit the beginning of each page in our stack.
+ for i := len(c.stack) - 1; i >= 0; i-- {
+ elem := &c.stack[i]
+ if elem.index > 0 {
+ elem.index--
+ break
+ }
+ c.stack = c.stack[:i]
+ }
+
+ // If we've hit the end then return nil.
+ if len(c.stack) == 0 {
+ return nil, nil
+ }
+
+ // Move down the stack to find the last element of the last leaf under this branch.
+ c.last()
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Seek moves the cursor to a given key and returns it.
+// If the key does not exist then the next key is used. If no keys
+// follow, a nil key is returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
+ k, v, flags := c.seek(seek)
+
+ // If we ended up after the last element of a page then move to the next one.
+ if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
+ k, v, flags = c.next()
+ }
+
+ if k == nil {
+ return nil, nil
+ } else if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Delete removes the current key/value under the cursor from the bucket.
+// Delete fails if current key/value is a bucket or if the transaction is not writable.
+func (c *Cursor) Delete() error {
+ if c.bucket.tx.db == nil {
+ return ErrTxClosed
+ } else if !c.bucket.Writable() {
+ return ErrTxNotWritable
+ }
+
+ key, _, flags := c.keyValue()
+ // Return an error if current value is a bucket.
+ if (flags & bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
+ c.node().del(key)
+
+ return nil
+}
+
+// seek moves the cursor to a given key and returns it.
+// If the key does not exist then the next key is used.
+func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+
+ // Start from root page/node and traverse to correct page.
+ c.stack = c.stack[:0]
+ c.search(seek, c.bucket.root)
+
+ // If this is a bucket then return a nil value.
+ return c.keyValue()
+}
+
+// first moves the cursor to the first leaf element under the last page in the stack.
+func (c *Cursor) first() {
+ for {
+ // Exit when we hit a leaf page.
+ var ref = &c.stack[len(c.stack)-1]
+ if ref.isLeaf() {
+ break
+ }
+
+ // Keep adding pages pointing to the first element to the stack.
+ var pgid pgid
+ if ref.node != nil {
+ pgid = ref.node.inodes[ref.index].pgid
+ } else {
+ pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+ }
+ p, n := c.bucket.pageNode(pgid)
+ c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
+ }
+}
+
+// last moves the cursor to the last leaf element under the last page in the stack.
+func (c *Cursor) last() {
+ for {
+ // Exit when we hit a leaf page.
+ ref := &c.stack[len(c.stack)-1]
+ if ref.isLeaf() {
+ break
+ }
+
+ // Keep adding pages pointing to the last element in the stack.
+ var pgid pgid
+ if ref.node != nil {
+ pgid = ref.node.inodes[ref.index].pgid
+ } else {
+ pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+ }
+ p, n := c.bucket.pageNode(pgid)
+
+ var nextRef = elemRef{page: p, node: n}
+ nextRef.index = nextRef.count() - 1
+ c.stack = append(c.stack, nextRef)
+ }
+}
+
+// next moves to the next leaf element and returns the key and value.
+// If the cursor is at the last leaf element then it stays there and returns nil.
+func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
+ for {
+ // Attempt to move over one element until we're successful.
+ // Move up the stack as we hit the end of each page in our stack.
+ var i int
+ for i = len(c.stack) - 1; i >= 0; i-- {
+ elem := &c.stack[i]
+ if elem.index < elem.count()-1 {
+ elem.index++
+ break
+ }
+ }
+
+ // If we've hit the root page then stop and return. This will leave the
+ // cursor on the last element of the last page.
+ if i == -1 {
+ return nil, nil, 0
+ }
+
+ // Otherwise start from where we left off in the stack and find the
+ // first element of the first leaf page.
+ c.stack = c.stack[:i+1]
+ c.first()
+
+ // If this is an empty page then restart and move back up the stack.
+ // https://github.com/boltdb/bolt/issues/450
+ if c.stack[len(c.stack)-1].count() == 0 {
+ continue
+ }
+
+ return c.keyValue()
+ }
+}
+
+// search recursively performs a binary search against a given page/node until it finds a given key.
+func (c *Cursor) search(key []byte, pgid pgid) {
+ p, n := c.bucket.pageNode(pgid)
+ if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
+ panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
+ }
+ e := elemRef{page: p, node: n}
+ c.stack = append(c.stack, e)
+
+ // If we're on a leaf page/node then find the specific node.
+ if e.isLeaf() {
+ c.nsearch(key)
+ return
+ }
+
+ if n != nil {
+ c.searchNode(key, n)
+ return
+ }
+ c.searchPage(key, p)
+}
+
+func (c *Cursor) searchNode(key []byte, n *node) {
+ var exact bool
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
+ // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+ ret := bytes.Compare(n.inodes[i].key, key)
+ if ret == 0 {
+ exact = true
+ }
+ return ret != -1
+ })
+ if !exact && index > 0 {
+ index--
+ }
+ c.stack[len(c.stack)-1].index = index
+
+ // Recursively search to the next page.
+ c.search(key, n.inodes[index].pgid)
+}
+
+func (c *Cursor) searchPage(key []byte, p *page) {
+ // Binary search for the correct range.
+ inodes := p.branchPageElements()
+
+ var exact bool
+ index := sort.Search(int(p.count), func(i int) bool {
+ // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
+ // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+ ret := bytes.Compare(inodes[i].key(), key)
+ if ret == 0 {
+ exact = true
+ }
+ return ret != -1
+ })
+ if !exact && index > 0 {
+ index--
+ }
+ c.stack[len(c.stack)-1].index = index
+
+ // Recursively search to the next page.
+ c.search(key, inodes[index].pgid)
+}
+
+// nsearch searches the leaf node on the top of the stack for a key.
+func (c *Cursor) nsearch(key []byte) {
+ e := &c.stack[len(c.stack)-1]
+ p, n := e.page, e.node
+
+ // If we have a node then search its inodes.
+ if n != nil {
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ return bytes.Compare(n.inodes[i].key, key) != -1
+ })
+ e.index = index
+ return
+ }
+
+ // If we have a page then search its leaf elements.
+ inodes := p.leafPageElements()
+ index := sort.Search(int(p.count), func(i int) bool {
+ return bytes.Compare(inodes[i].key(), key) != -1
+ })
+ e.index = index
+}
+
+// keyValue returns the key and value of the current leaf element.
+func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
+ ref := &c.stack[len(c.stack)-1]
+
+ // If the cursor is pointing to the end of page/node then return nil.
+ if ref.count() == 0 || ref.index >= ref.count() {
+ return nil, nil, 0
+ }
+
+ // Retrieve value from node.
+ if ref.node != nil {
+ inode := &ref.node.inodes[ref.index]
+ return inode.key, inode.value, inode.flags
+ }
+
+ // Or retrieve value from page.
+ elem := ref.page.leafPageElement(uint16(ref.index))
+ return elem.key(), elem.value(), elem.flags
+}
+
+// node returns the node that the cursor is currently positioned on.
+func (c *Cursor) node() *node {
+ _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
+
+ // If the top of the stack is a leaf node then just return it.
+ if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
+ return ref.node
+ }
+
+ // Start from root and traverse down the hierarchy.
+ var n = c.stack[0].node
+ if n == nil {
+ n = c.bucket.node(c.stack[0].page.id, nil)
+ }
+ for _, ref := range c.stack[:len(c.stack)-1] {
+ _assert(!n.isLeaf, "expected branch node")
+ n = n.childAt(int(ref.index))
+ }
+ _assert(n.isLeaf, "expected leaf node")
+ return n
+}
+
+// elemRef represents a reference to an element on a given page/node.
+type elemRef struct {
+ page *page
+ node *node
+ index int
+}
+
+// isLeaf returns whether the ref is pointing at a leaf page/node.
+func (r *elemRef) isLeaf() bool {
+ if r.node != nil {
+ return r.node.isLeaf
+ }
+ return (r.page.flags & leafPageFlag) != 0
+}
+
+// count returns the number of inodes or page elements.
+func (r *elemRef) count() int {
+ if r.node != nil {
+ return len(r.node.inodes)
+ }
+ return int(r.page.count)
+}
diff --git a/vendor/github.com/coreos/bbolt/db.go b/vendor/github.com/coreos/bbolt/db.go
new file mode 100644
index 000000000..870c8b1cc
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/db.go
@@ -0,0 +1,1174 @@
+package bbolt
+
+import (
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "log"
+ "os"
+ "runtime"
+ "sort"
+ "sync"
+ "time"
+ "unsafe"
+)
+
+// The largest step that can be taken when remapping the mmap.
+const maxMmapStep = 1 << 30 // 1GB
+
+// The data file format version.
+const version = 2
+
+// Represents a marker value to indicate that a file is a Bolt DB.
+const magic uint32 = 0xED0CDAED
+
+const pgidNoFreelist pgid = 0xffffffffffffffff
+
+// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
+// syncing changes to a file. This is required as some operating systems,
+// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
+// must be synchronized using the msync(2) syscall.
+const IgnoreNoSync = runtime.GOOS == "openbsd"
+
+// Default values if not set in a DB instance.
+const (
+ DefaultMaxBatchSize int = 1000
+ DefaultMaxBatchDelay = 10 * time.Millisecond
+ DefaultAllocSize = 16 * 1024 * 1024
+)
+
+// default page size for db is set to the OS page size.
+var defaultPageSize = os.Getpagesize()
+
+// The time elapsed between consecutive file locking attempts.
+const flockRetryTimeout = 50 * time.Millisecond
+
+// FreelistType is the type of the freelist backend
+type FreelistType string
+
+const (
+ // FreelistArrayType indicates backend freelist type is array
+ FreelistArrayType = FreelistType("array")
+ // FreelistMapType indicates backend freelist type is hashmap
+ FreelistMapType = FreelistType("hashmap")
+)
+
+// DB represents a collection of buckets persisted to a file on disk.
+// All data access is performed through transactions which can be obtained through the DB.
+// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
+type DB struct {
+ // When enabled, the database will perform a Check() after every commit.
+ // A panic is issued if the database is in an inconsistent state. This
+ // flag has a large performance impact so it should only be used for
+ // debugging purposes.
+ StrictMode bool
+
+ // Setting the NoSync flag will cause the database to skip fsync()
+ // calls after each commit. This can be useful when bulk loading data
+ // into a database and you can restart the bulk load in the event of
+ // a system failure or database corruption. Do not set this flag for
+ // normal use.
+ //
+ // If the package global IgnoreNoSync constant is true, this value is
+ // ignored. See the comment on that constant for more details.
+ //
+ // THIS IS UNSAFE. PLEASE USE WITH CAUTION.
+ NoSync bool
+
+ // When true, skips syncing freelist to disk. This improves the database
+ // write performance under normal operation, but requires a full database
+ // re-sync during recovery.
+ NoFreelistSync bool
+
+ // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
+ // dramatic performance degradation if database is large and framentation in freelist is common.
+ // The alternative one is using hashmap, it is faster in almost all circumstances
+ // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
+ // The default type is array
+ FreelistType FreelistType
+
+ // When true, skips the truncate call when growing the database.
+ // Setting this to true is only safe on non-ext3/ext4 systems.
+ // Skipping truncation avoids preallocation of hard drive space and
+ // bypasses a truncate() and fsync() syscall on remapping.
+ //
+ // https://github.com/boltdb/bolt/issues/284
+ NoGrowSync bool
+
+ // If you want to read the entire database fast, you can set MmapFlag to
+ // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
+ MmapFlags int
+
+ // MaxBatchSize is the maximum size of a batch. Default value is
+ // copied from DefaultMaxBatchSize in Open.
+ //
+ // If <=0, disables batching.
+ //
+ // Do not change concurrently with calls to Batch.
+ MaxBatchSize int
+
+ // MaxBatchDelay is the maximum delay before a batch starts.
+ // Default value is copied from DefaultMaxBatchDelay in Open.
+ //
+ // If <=0, effectively disables batching.
+ //
+ // Do not change concurrently with calls to Batch.
+ MaxBatchDelay time.Duration
+
+ // AllocSize is the amount of space allocated when the database
+ // needs to create new pages. This is done to amortize the cost
+ // of truncate() and fsync() when growing the data file.
+ AllocSize int
+
+ path string
+ openFile func(string, int, os.FileMode) (*os.File, error)
+ file *os.File
+ dataref []byte // mmap'ed readonly, write throws SEGV
+ data *[maxMapSize]byte
+ datasz int
+ filesz int // current on disk file size
+ meta0 *meta
+ meta1 *meta
+ pageSize int
+ opened bool
+ rwtx *Tx
+ txs []*Tx
+ stats Stats
+
+ freelist *freelist
+ freelistLoad sync.Once
+
+ pagePool sync.Pool
+
+ batchMu sync.Mutex
+ batch *batch
+
+ rwlock sync.Mutex // Allows only one writer at a time.
+ metalock sync.Mutex // Protects meta page access.
+ mmaplock sync.RWMutex // Protects mmap access during remapping.
+ statlock sync.RWMutex // Protects stats access.
+
+ ops struct {
+ writeAt func(b []byte, off int64) (n int, err error)
+ }
+
+ // Read only mode.
+ // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
+ readOnly bool
+}
+
+// Path returns the path to currently open database file.
+func (db *DB) Path() string {
+ return db.path
+}
+
+// GoString returns the Go string representation of the database.
+func (db *DB) GoString() string {
+ return fmt.Sprintf("bolt.DB{path:%q}", db.path)
+}
+
+// String returns the string representation of the database.
+func (db *DB) String() string {
+ return fmt.Sprintf("DB<%q>", db.path)
+}
+
+// Open creates and opens a database at the given path.
+// If the file does not exist then it will be created automatically.
+// Passing in nil options will cause Bolt to open the database with the default options.
+func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
+ db := &DB{
+ opened: true,
+ }
+ // Set default options if no options are provided.
+ if options == nil {
+ options = DefaultOptions
+ }
+ db.NoSync = options.NoSync
+ db.NoGrowSync = options.NoGrowSync
+ db.MmapFlags = options.MmapFlags
+ db.NoFreelistSync = options.NoFreelistSync
+ db.FreelistType = options.FreelistType
+
+ // Set default values for later DB operations.
+ db.MaxBatchSize = DefaultMaxBatchSize
+ db.MaxBatchDelay = DefaultMaxBatchDelay
+ db.AllocSize = DefaultAllocSize
+
+ flag := os.O_RDWR
+ if options.ReadOnly {
+ flag = os.O_RDONLY
+ db.readOnly = true
+ }
+
+ db.openFile = options.OpenFile
+ if db.openFile == nil {
+ db.openFile = os.OpenFile
+ }
+
+ // Open data file and separate sync handler for metadata writes.
+ db.path = path
+ var err error
+ if db.file, err = db.openFile(db.path, flag|os.O_CREATE, mode); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+
+ // Lock file so that other processes using Bolt in read-write mode cannot
+ // use the database at the same time. This would cause corruption since
+ // the two processes would write meta pages and free pages separately.
+ // The database file is locked exclusively (only one process can grab the lock)
+ // if !options.ReadOnly.
+ // The database file is locked using the shared lock (more than one process may
+ // hold a lock at the same time) otherwise (options.ReadOnly is set).
+ if err := flock(db, !db.readOnly, options.Timeout); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+
+ // Default values for test hooks
+ db.ops.writeAt = db.file.WriteAt
+
+ if db.pageSize = options.PageSize; db.pageSize == 0 {
+ // Set the default page size to the OS page size.
+ db.pageSize = defaultPageSize
+ }
+
+ // Initialize the database if it doesn't exist.
+ if info, err := db.file.Stat(); err != nil {
+ _ = db.close()
+ return nil, err
+ } else if info.Size() == 0 {
+ // Initialize new files with meta pages.
+ if err := db.init(); err != nil {
+ // clean up file descriptor on initialization fail
+ _ = db.close()
+ return nil, err
+ }
+ } else {
+ // Read the first meta page to determine the page size.
+ var buf [0x1000]byte
+ // If we can't read the page size, but can read a page, assume
+ // it's the same as the OS or one given -- since that's how the
+ // page size was chosen in the first place.
+ //
+ // If the first page is invalid and this OS uses a different
+ // page size than what the database was created with then we
+ // are out of luck and cannot access the database.
+ //
+ // TODO: scan for next page
+ if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
+ if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
+ db.pageSize = int(m.pageSize)
+ }
+ } else {
+ _ = db.close()
+ return nil, ErrInvalid
+ }
+ }
+
+ // Initialize page pool.
+ db.pagePool = sync.Pool{
+ New: func() interface{} {
+ return make([]byte, db.pageSize)
+ },
+ }
+
+ // Memory map the data file.
+ if err := db.mmap(options.InitialMmapSize); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+
+ if db.readOnly {
+ return db, nil
+ }
+
+ db.loadFreelist()
+
+ // Flush freelist when transitioning from no sync to sync so
+ // NoFreelistSync unaware boltdb can open the db later.
+ if !db.NoFreelistSync && !db.hasSyncedFreelist() {
+ tx, err := db.Begin(true)
+ if tx != nil {
+ err = tx.Commit()
+ }
+ if err != nil {
+ _ = db.close()
+ return nil, err
+ }
+ }
+
+ // Mark the database as opened and return.
+ return db, nil
+}
+
+// loadFreelist reads the freelist if it is synced, or reconstructs it
+// by scanning the DB if it is not synced. It assumes there are no
+// concurrent accesses being made to the freelist.
+func (db *DB) loadFreelist() {
+ db.freelistLoad.Do(func() {
+ db.freelist = newFreelist(db.FreelistType)
+ if !db.hasSyncedFreelist() {
+ // Reconstruct free list by scanning the DB.
+ db.freelist.readIDs(db.freepages())
+ } else {
+ // Read free list from freelist page.
+ db.freelist.read(db.page(db.meta().freelist))
+ }
+ db.stats.FreePageN = db.freelist.free_count()
+ })
+}
+
+func (db *DB) hasSyncedFreelist() bool {
+ return db.meta().freelist != pgidNoFreelist
+}
+
+// mmap opens the underlying memory-mapped file and initializes the meta references.
+// minsz is the minimum size that the new mmap can be.
+func (db *DB) mmap(minsz int) error {
+ db.mmaplock.Lock()
+ defer db.mmaplock.Unlock()
+
+ info, err := db.file.Stat()
+ if err != nil {
+ return fmt.Errorf("mmap stat error: %s", err)
+ } else if int(info.Size()) < db.pageSize*2 {
+ return fmt.Errorf("file size too small")
+ }
+
+ // Ensure the size is at least the minimum size.
+ var size = int(info.Size())
+ if size < minsz {
+ size = minsz
+ }
+ size, err = db.mmapSize(size)
+ if err != nil {
+ return err
+ }
+
+ // Dereference all mmap references before unmapping.
+ if db.rwtx != nil {
+ db.rwtx.root.dereference()
+ }
+
+ // Unmap existing data before continuing.
+ if err := db.munmap(); err != nil {
+ return err
+ }
+
+ // Memory-map the data file as a byte slice.
+ if err := mmap(db, size); err != nil {
+ return err
+ }
+
+ // Save references to the meta pages.
+ db.meta0 = db.page(0).meta()
+ db.meta1 = db.page(1).meta()
+
+ // Validate the meta pages. We only return an error if both meta pages fail
+ // validation, since meta0 failing validation means that it wasn't saved
+ // properly -- but we can recover using meta1. And vice-versa.
+ err0 := db.meta0.validate()
+ err1 := db.meta1.validate()
+ if err0 != nil && err1 != nil {
+ return err0
+ }
+
+ return nil
+}
+
+// munmap unmaps the data file from memory.
+func (db *DB) munmap() error {
+ if err := munmap(db); err != nil {
+ return fmt.Errorf("unmap error: " + err.Error())
+ }
+ return nil
+}
+
+// mmapSize determines the appropriate size for the mmap given the current size
+// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
+// Returns an error if the new mmap size is greater than the max allowed.
+func (db *DB) mmapSize(size int) (int, error) {
+ // Double the size from 32KB until 1GB.
+ for i := uint(15); i <= 30; i++ {
+ if size <= 1< maxMapSize {
+ return 0, fmt.Errorf("mmap too large")
+ }
+
+ // If larger than 1GB then grow by 1GB at a time.
+ sz := int64(size)
+ if remainder := sz % int64(maxMmapStep); remainder > 0 {
+ sz += int64(maxMmapStep) - remainder
+ }
+
+ // Ensure that the mmap size is a multiple of the page size.
+ // This should always be true since we're incrementing in MBs.
+ pageSize := int64(db.pageSize)
+ if (sz % pageSize) != 0 {
+ sz = ((sz / pageSize) + 1) * pageSize
+ }
+
+ // If we've exceeded the max size then only grow up to the max size.
+ if sz > maxMapSize {
+ sz = maxMapSize
+ }
+
+ return int(sz), nil
+}
+
+// init creates a new database file and initializes its meta pages.
+func (db *DB) init() error {
+ // Create two meta pages on a buffer.
+ buf := make([]byte, db.pageSize*4)
+ for i := 0; i < 2; i++ {
+ p := db.pageInBuffer(buf[:], pgid(i))
+ p.id = pgid(i)
+ p.flags = metaPageFlag
+
+ // Initialize the meta page.
+ m := p.meta()
+ m.magic = magic
+ m.version = version
+ m.pageSize = uint32(db.pageSize)
+ m.freelist = 2
+ m.root = bucket{root: 3}
+ m.pgid = 4
+ m.txid = txid(i)
+ m.checksum = m.sum64()
+ }
+
+ // Write an empty freelist at page 3.
+ p := db.pageInBuffer(buf[:], pgid(2))
+ p.id = pgid(2)
+ p.flags = freelistPageFlag
+ p.count = 0
+
+ // Write an empty leaf page at page 4.
+ p = db.pageInBuffer(buf[:], pgid(3))
+ p.id = pgid(3)
+ p.flags = leafPageFlag
+ p.count = 0
+
+ // Write the buffer to our data file.
+ if _, err := db.ops.writeAt(buf, 0); err != nil {
+ return err
+ }
+ if err := fdatasync(db); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Close releases all database resources.
+// It will block waiting for any open transactions to finish
+// before closing the database and returning.
+func (db *DB) Close() error {
+ db.rwlock.Lock()
+ defer db.rwlock.Unlock()
+
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
+
+ db.mmaplock.Lock()
+ defer db.mmaplock.Unlock()
+
+ return db.close()
+}
+
+func (db *DB) close() error {
+ if !db.opened {
+ return nil
+ }
+
+ db.opened = false
+
+ db.freelist = nil
+
+ // Clear ops.
+ db.ops.writeAt = nil
+
+ // Close the mmap.
+ if err := db.munmap(); err != nil {
+ return err
+ }
+
+ // Close file handles.
+ if db.file != nil {
+ // No need to unlock read-only file.
+ if !db.readOnly {
+ // Unlock the file.
+ if err := funlock(db); err != nil {
+ log.Printf("bolt.Close(): funlock error: %s", err)
+ }
+ }
+
+ // Close the file descriptor.
+ if err := db.file.Close(); err != nil {
+ return fmt.Errorf("db file close: %s", err)
+ }
+ db.file = nil
+ }
+
+ db.path = ""
+ return nil
+}
+
+// Begin starts a new transaction.
+// Multiple read-only transactions can be used concurrently but only one
+// write transaction can be used at a time. Starting multiple write transactions
+// will cause the calls to block and be serialized until the current write
+// transaction finishes.
+//
+// Transactions should not be dependent on one another. Opening a read
+// transaction and a write transaction in the same goroutine can cause the
+// writer to deadlock because the database periodically needs to re-mmap itself
+// as it grows and it cannot do that while a read transaction is open.
+//
+// If a long running read transaction (for example, a snapshot transaction) is
+// needed, you might want to set DB.InitialMmapSize to a large enough value
+// to avoid potential blocking of write transaction.
+//
+// IMPORTANT: You must close read-only transactions after you are finished or
+// else the database will not reclaim old pages.
+func (db *DB) Begin(writable bool) (*Tx, error) {
+ if writable {
+ return db.beginRWTx()
+ }
+ return db.beginTx()
+}
+
+func (db *DB) beginTx() (*Tx, error) {
+ // Lock the meta pages while we initialize the transaction. We obtain
+ // the meta lock before the mmap lock because that's the order that the
+ // write transaction will obtain them.
+ db.metalock.Lock()
+
+ // Obtain a read-only lock on the mmap. When the mmap is remapped it will
+ // obtain a write lock so all transactions must finish before it can be
+ // remapped.
+ db.mmaplock.RLock()
+
+ // Exit if the database is not open yet.
+ if !db.opened {
+ db.mmaplock.RUnlock()
+ db.metalock.Unlock()
+ return nil, ErrDatabaseNotOpen
+ }
+
+ // Create a transaction associated with the database.
+ t := &Tx{}
+ t.init(db)
+
+ // Keep track of transaction until it closes.
+ db.txs = append(db.txs, t)
+ n := len(db.txs)
+
+ // Unlock the meta pages.
+ db.metalock.Unlock()
+
+ // Update the transaction stats.
+ db.statlock.Lock()
+ db.stats.TxN++
+ db.stats.OpenTxN = n
+ db.statlock.Unlock()
+
+ return t, nil
+}
+
+func (db *DB) beginRWTx() (*Tx, error) {
+ // If the database was opened with Options.ReadOnly, return an error.
+ if db.readOnly {
+ return nil, ErrDatabaseReadOnly
+ }
+
+ // Obtain writer lock. This is released by the transaction when it closes.
+ // This enforces only one writer transaction at a time.
+ db.rwlock.Lock()
+
+ // Once we have the writer lock then we can lock the meta pages so that
+ // we can set up the transaction.
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
+
+ // Exit if the database is not open yet.
+ if !db.opened {
+ db.rwlock.Unlock()
+ return nil, ErrDatabaseNotOpen
+ }
+
+ // Create a transaction associated with the database.
+ t := &Tx{writable: true}
+ t.init(db)
+ db.rwtx = t
+ db.freePages()
+ return t, nil
+}
+
+// freePages releases any pages associated with closed read-only transactions.
+func (db *DB) freePages() {
+ // Free all pending pages prior to earliest open transaction.
+ sort.Sort(txsById(db.txs))
+ minid := txid(0xFFFFFFFFFFFFFFFF)
+ if len(db.txs) > 0 {
+ minid = db.txs[0].meta.txid
+ }
+ if minid > 0 {
+ db.freelist.release(minid - 1)
+ }
+ // Release unused txid extents.
+ for _, t := range db.txs {
+ db.freelist.releaseRange(minid, t.meta.txid-1)
+ minid = t.meta.txid + 1
+ }
+ db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF))
+ // Any page both allocated and freed in an extent is safe to release.
+}
+
+type txsById []*Tx
+
+func (t txsById) Len() int { return len(t) }
+func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid }
+
+// removeTx removes a transaction from the database.
+func (db *DB) removeTx(tx *Tx) {
+ // Release the read lock on the mmap.
+ db.mmaplock.RUnlock()
+
+ // Use the meta lock to restrict access to the DB object.
+ db.metalock.Lock()
+
+ // Remove the transaction.
+ for i, t := range db.txs {
+ if t == tx {
+ last := len(db.txs) - 1
+ db.txs[i] = db.txs[last]
+ db.txs[last] = nil
+ db.txs = db.txs[:last]
+ break
+ }
+ }
+ n := len(db.txs)
+
+ // Unlock the meta pages.
+ db.metalock.Unlock()
+
+ // Merge statistics.
+ db.statlock.Lock()
+ db.stats.OpenTxN = n
+ db.stats.TxStats.add(&tx.stats)
+ db.statlock.Unlock()
+}
+
+// Update executes a function within the context of a read-write managed transaction.
+// If no error is returned from the function then the transaction is committed.
+// If an error is returned then the entire transaction is rolled back.
+// Any error that is returned from the function or returned from the commit is
+// returned from the Update() method.
+//
+// Attempting to manually commit or rollback within the function will cause a panic.
+func (db *DB) Update(fn func(*Tx) error) error {
+ t, err := db.Begin(true)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the transaction rolls back in the event of a panic.
+ defer func() {
+ if t.db != nil {
+ t.rollback()
+ }
+ }()
+
+ // Mark as a managed tx so that the inner function cannot manually commit.
+ t.managed = true
+
+ // If an error is returned from the function then rollback and return error.
+ err = fn(t)
+ t.managed = false
+ if err != nil {
+ _ = t.Rollback()
+ return err
+ }
+
+ return t.Commit()
+}
+
+// View executes a function within the context of a managed read-only transaction.
+// Any error that is returned from the function is returned from the View() method.
+//
+// Attempting to manually rollback within the function will cause a panic.
+func (db *DB) View(fn func(*Tx) error) error {
+ t, err := db.Begin(false)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the transaction rolls back in the event of a panic.
+ defer func() {
+ if t.db != nil {
+ t.rollback()
+ }
+ }()
+
+ // Mark as a managed tx so that the inner function cannot manually rollback.
+ t.managed = true
+
+ // If an error is returned from the function then pass it through.
+ err = fn(t)
+ t.managed = false
+ if err != nil {
+ _ = t.Rollback()
+ return err
+ }
+
+ return t.Rollback()
+}
+
+// Batch calls fn as part of a batch. It behaves similar to Update,
+// except:
+//
+// 1. concurrent Batch calls can be combined into a single Bolt
+// transaction.
+//
+// 2. the function passed to Batch may be called multiple times,
+// regardless of whether it returns error or not.
+//
+// This means that Batch function side effects must be idempotent and
+// take permanent effect only after a successful return is seen in
+// caller.
+//
+// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
+// and DB.MaxBatchDelay, respectively.
+//
+// Batch is only useful when there are multiple goroutines calling it.
+func (db *DB) Batch(fn func(*Tx) error) error {
+ errCh := make(chan error, 1)
+
+ db.batchMu.Lock()
+ if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
+ // There is no existing batch, or the existing batch is full; start a new one.
+ db.batch = &batch{
+ db: db,
+ }
+ db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
+ }
+ db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
+ if len(db.batch.calls) >= db.MaxBatchSize {
+ // wake up batch, it's ready to run
+ go db.batch.trigger()
+ }
+ db.batchMu.Unlock()
+
+ err := <-errCh
+ if err == trySolo {
+ err = db.Update(fn)
+ }
+ return err
+}
+
+type call struct {
+ fn func(*Tx) error
+ err chan<- error
+}
+
+type batch struct {
+ db *DB
+ timer *time.Timer
+ start sync.Once
+ calls []call
+}
+
+// trigger runs the batch if it hasn't already been run.
+func (b *batch) trigger() {
+ b.start.Do(b.run)
+}
+
+// run performs the transactions in the batch and communicates results
+// back to DB.Batch.
+func (b *batch) run() {
+ b.db.batchMu.Lock()
+ b.timer.Stop()
+ // Make sure no new work is added to this batch, but don't break
+ // other batches.
+ if b.db.batch == b {
+ b.db.batch = nil
+ }
+ b.db.batchMu.Unlock()
+
+retry:
+ for len(b.calls) > 0 {
+ var failIdx = -1
+ err := b.db.Update(func(tx *Tx) error {
+ for i, c := range b.calls {
+ if err := safelyCall(c.fn, tx); err != nil {
+ failIdx = i
+ return err
+ }
+ }
+ return nil
+ })
+
+ if failIdx >= 0 {
+ // take the failing transaction out of the batch. it's
+ // safe to shorten b.calls here because db.batch no longer
+ // points to us, and we hold the mutex anyway.
+ c := b.calls[failIdx]
+ b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
+ // tell the submitter re-run it solo, continue with the rest of the batch
+ c.err <- trySolo
+ continue retry
+ }
+
+ // pass success, or bolt internal errors, to all callers
+ for _, c := range b.calls {
+ c.err <- err
+ }
+ break retry
+ }
+}
+
+// trySolo is a special sentinel error value used for signaling that a
+// transaction function should be re-run. It should never be seen by
+// callers.
+var trySolo = errors.New("batch function returned an error and should be re-run solo")
+
+type panicked struct {
+ reason interface{}
+}
+
+func (p panicked) Error() string {
+ if err, ok := p.reason.(error); ok {
+ return err.Error()
+ }
+ return fmt.Sprintf("panic: %v", p.reason)
+}
+
+func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
+ defer func() {
+ if p := recover(); p != nil {
+ err = panicked{p}
+ }
+ }()
+ return fn(tx)
+}
+
+// Sync executes fdatasync() against the database file handle.
+//
+// This is not necessary under normal operation, however, if you use NoSync
+// then it allows you to force the database file to sync against the disk.
+func (db *DB) Sync() error { return fdatasync(db) }
+
+// Stats retrieves ongoing performance stats for the database.
+// This is only updated when a transaction closes.
+func (db *DB) Stats() Stats {
+ db.statlock.RLock()
+ defer db.statlock.RUnlock()
+ return db.stats
+}
+
+// This is for internal access to the raw data bytes from the C cursor, use
+// carefully, or not at all.
+func (db *DB) Info() *Info {
+ return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
+}
+
+// page retrieves a page reference from the mmap based on the current page size.
+func (db *DB) page(id pgid) *page {
+ pos := id * pgid(db.pageSize)
+ return (*page)(unsafe.Pointer(&db.data[pos]))
+}
+
+// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
+func (db *DB) pageInBuffer(b []byte, id pgid) *page {
+ return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)]))
+}
+
+// meta retrieves the current meta page reference.
+func (db *DB) meta() *meta {
+ // We have to return the meta with the highest txid which doesn't fail
+ // validation. Otherwise, we can cause errors when in fact the database is
+ // in a consistent state. metaA is the one with the higher txid.
+ metaA := db.meta0
+ metaB := db.meta1
+ if db.meta1.txid > db.meta0.txid {
+ metaA = db.meta1
+ metaB = db.meta0
+ }
+
+ // Use higher meta page if valid. Otherwise fallback to previous, if valid.
+ if err := metaA.validate(); err == nil {
+ return metaA
+ } else if err := metaB.validate(); err == nil {
+ return metaB
+ }
+
+ // This should never be reached, because both meta1 and meta0 were validated
+ // on mmap() and we do fsync() on every write.
+ panic("bolt.DB.meta(): invalid meta pages")
+}
+
+// allocate returns a contiguous block of memory starting at a given page.
+func (db *DB) allocate(txid txid, count int) (*page, error) {
+ // Allocate a temporary buffer for the page.
+ var buf []byte
+ if count == 1 {
+ buf = db.pagePool.Get().([]byte)
+ } else {
+ buf = make([]byte, count*db.pageSize)
+ }
+ p := (*page)(unsafe.Pointer(&buf[0]))
+ p.overflow = uint32(count - 1)
+
+ // Use pages from the freelist if they are available.
+ if p.id = db.freelist.allocate(txid, count); p.id != 0 {
+ return p, nil
+ }
+
+ // Resize mmap() if we're at the end.
+ p.id = db.rwtx.meta.pgid
+ var minsz = int((p.id+pgid(count))+1) * db.pageSize
+ if minsz >= db.datasz {
+ if err := db.mmap(minsz); err != nil {
+ return nil, fmt.Errorf("mmap allocate error: %s", err)
+ }
+ }
+
+ // Move the page id high water mark.
+ db.rwtx.meta.pgid += pgid(count)
+
+ return p, nil
+}
+
+// grow grows the size of the database to the given sz.
+func (db *DB) grow(sz int) error {
+ // Ignore if the new size is less than available file size.
+ if sz <= db.filesz {
+ return nil
+ }
+
+ // If the data is smaller than the alloc size then only allocate what's needed.
+ // Once it goes over the allocation size then allocate in chunks.
+ if db.datasz < db.AllocSize {
+ sz = db.datasz
+ } else {
+ sz += db.AllocSize
+ }
+
+ // Truncate and fsync to ensure file size metadata is flushed.
+ // https://github.com/boltdb/bolt/issues/284
+ if !db.NoGrowSync && !db.readOnly {
+ if runtime.GOOS != "windows" {
+ if err := db.file.Truncate(int64(sz)); err != nil {
+ return fmt.Errorf("file resize error: %s", err)
+ }
+ }
+ if err := db.file.Sync(); err != nil {
+ return fmt.Errorf("file sync error: %s", err)
+ }
+ }
+
+ db.filesz = sz
+ return nil
+}
+
+func (db *DB) IsReadOnly() bool {
+ return db.readOnly
+}
+
+func (db *DB) freepages() []pgid {
+ tx, err := db.beginTx()
+ defer func() {
+ err = tx.Rollback()
+ if err != nil {
+ panic("freepages: failed to rollback tx")
+ }
+ }()
+ if err != nil {
+ panic("freepages: failed to open read only tx")
+ }
+
+ reachable := make(map[pgid]*page)
+ nofreed := make(map[pgid]bool)
+ ech := make(chan error)
+ go func() {
+ for e := range ech {
+ panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
+ }
+ }()
+ tx.checkBucket(&tx.root, reachable, nofreed, ech)
+ close(ech)
+
+ var fids []pgid
+ for i := pgid(2); i < db.meta().pgid; i++ {
+ if _, ok := reachable[i]; !ok {
+ fids = append(fids, i)
+ }
+ }
+ return fids
+}
+
+// Options represents the options that can be set when opening a database.
+type Options struct {
+ // Timeout is the amount of time to wait to obtain a file lock.
+ // When set to zero it will wait indefinitely. This option is only
+ // available on Darwin and Linux.
+ Timeout time.Duration
+
+ // Sets the DB.NoGrowSync flag before memory mapping the file.
+ NoGrowSync bool
+
+ // Do not sync freelist to disk. This improves the database write performance
+ // under normal operation, but requires a full database re-sync during recovery.
+ NoFreelistSync bool
+
+ // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
+ // dramatic performance degradation if database is large and framentation in freelist is common.
+ // The alternative one is using hashmap, it is faster in almost all circumstances
+ // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
+ // The default type is array
+ FreelistType FreelistType
+
+ // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
+ // grab a shared lock (UNIX).
+ ReadOnly bool
+
+ // Sets the DB.MmapFlags flag before memory mapping the file.
+ MmapFlags int
+
+ // InitialMmapSize is the initial mmap size of the database
+ // in bytes. Read transactions won't block write transaction
+ // if the InitialMmapSize is large enough to hold database mmap
+ // size. (See DB.Begin for more information)
+ //
+ // If <=0, the initial map size is 0.
+ // If initialMmapSize is smaller than the previous database size,
+ // it takes no effect.
+ InitialMmapSize int
+
+ // PageSize overrides the default OS page size.
+ PageSize int
+
+ // NoSync sets the initial value of DB.NoSync. Normally this can just be
+ // set directly on the DB itself when returned from Open(), but this option
+ // is useful in APIs which expose Options but not the underlying DB.
+ NoSync bool
+
+ // OpenFile is used to open files. It defaults to os.OpenFile. This option
+ // is useful for writing hermetic tests.
+ OpenFile func(string, int, os.FileMode) (*os.File, error)
+}
+
+// DefaultOptions represent the options used if nil options are passed into Open().
+// No timeout is used which will cause Bolt to wait indefinitely for a lock.
+var DefaultOptions = &Options{
+ Timeout: 0,
+ NoGrowSync: false,
+ FreelistType: FreelistArrayType,
+}
+
+// Stats represents statistics about the database.
+type Stats struct {
+ // Freelist stats
+ FreePageN int // total number of free pages on the freelist
+ PendingPageN int // total number of pending pages on the freelist
+ FreeAlloc int // total bytes allocated in free pages
+ FreelistInuse int // total bytes used by the freelist
+
+ // Transaction stats
+ TxN int // total number of started read transactions
+ OpenTxN int // number of currently open read transactions
+
+ TxStats TxStats // global, ongoing stats.
+}
+
+// Sub calculates and returns the difference between two sets of database stats.
+// This is useful when obtaining stats at two different points and time and
+// you need the performance counters that occurred within that time span.
+func (s *Stats) Sub(other *Stats) Stats {
+ if other == nil {
+ return *s
+ }
+ var diff Stats
+ diff.FreePageN = s.FreePageN
+ diff.PendingPageN = s.PendingPageN
+ diff.FreeAlloc = s.FreeAlloc
+ diff.FreelistInuse = s.FreelistInuse
+ diff.TxN = s.TxN - other.TxN
+ diff.TxStats = s.TxStats.Sub(&other.TxStats)
+ return diff
+}
+
+type Info struct {
+ Data uintptr
+ PageSize int
+}
+
+type meta struct {
+ magic uint32
+ version uint32
+ pageSize uint32
+ flags uint32
+ root bucket
+ freelist pgid
+ pgid pgid
+ txid txid
+ checksum uint64
+}
+
+// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
+func (m *meta) validate() error {
+ if m.magic != magic {
+ return ErrInvalid
+ } else if m.version != version {
+ return ErrVersionMismatch
+ } else if m.checksum != 0 && m.checksum != m.sum64() {
+ return ErrChecksum
+ }
+ return nil
+}
+
+// copy copies one meta object to another.
+func (m *meta) copy(dest *meta) {
+ *dest = *m
+}
+
+// write writes the meta onto a page.
+func (m *meta) write(p *page) {
+ if m.root.root >= m.pgid {
+ panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
+ } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist {
+ // TODO: reject pgidNoFreeList if !NoFreelistSync
+ panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
+ }
+
+ // Page id is either going to be 0 or 1 which we can determine by the transaction ID.
+ p.id = pgid(m.txid % 2)
+ p.flags |= metaPageFlag
+
+ // Calculate the checksum.
+ m.checksum = m.sum64()
+
+ m.copy(p.meta())
+}
+
+// generates the checksum for the meta.
+func (m *meta) sum64() uint64 {
+ var h = fnv.New64a()
+ _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
+ return h.Sum64()
+}
+
+// _assert will panic with a given formatted message if the given condition is false.
+func _assert(condition bool, msg string, v ...interface{}) {
+ if !condition {
+ panic(fmt.Sprintf("assertion failed: "+msg, v...))
+ }
+}
diff --git a/vendor/github.com/coreos/bbolt/doc.go b/vendor/github.com/coreos/bbolt/doc.go
new file mode 100644
index 000000000..95f25f01c
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/doc.go
@@ -0,0 +1,44 @@
+/*
+package bbolt implements a low-level key/value store in pure Go. It supports
+fully serializable transactions, ACID semantics, and lock-free MVCC with
+multiple readers and a single writer. Bolt can be used for projects that
+want a simple data store without the need to add large dependencies such as
+Postgres or MySQL.
+
+Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
+optimized for fast read access and does not require recovery in the event of a
+system crash. Transactions which have not finished committing will simply be
+rolled back in the event of a crash.
+
+The design of Bolt is based on Howard Chu's LMDB database project.
+
+Bolt currently works on Windows, Mac OS X, and Linux.
+
+
+Basics
+
+There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
+a collection of buckets and is represented by a single file on disk. A bucket is
+a collection of unique keys that are associated with values.
+
+Transactions provide either read-only or read-write access to the database.
+Read-only transactions can retrieve key/value pairs and can use Cursors to
+iterate over the dataset sequentially. Read-write transactions can create and
+delete buckets and can insert and remove keys. Only one read-write transaction
+is allowed at a time.
+
+
+Caveats
+
+The database uses a read-only, memory-mapped data file to ensure that
+applications cannot corrupt the database, however, this means that keys and
+values returned from Bolt cannot be changed. Writing to a read-only byte slice
+will cause Go to panic.
+
+Keys and values retrieved from the database are only valid for the life of
+the transaction. When used outside the transaction, these byte slices can
+point to different data or can point to invalid memory which will cause a panic.
+
+
+*/
+package bbolt
diff --git a/vendor/github.com/coreos/bbolt/errors.go b/vendor/github.com/coreos/bbolt/errors.go
new file mode 100644
index 000000000..48758ca57
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/errors.go
@@ -0,0 +1,71 @@
+package bbolt
+
+import "errors"
+
+// These errors can be returned when opening or calling methods on a DB.
+var (
+ // ErrDatabaseNotOpen is returned when a DB instance is accessed before it
+ // is opened or after it is closed.
+ ErrDatabaseNotOpen = errors.New("database not open")
+
+ // ErrDatabaseOpen is returned when opening a database that is
+ // already open.
+ ErrDatabaseOpen = errors.New("database already open")
+
+ // ErrInvalid is returned when both meta pages on a database are invalid.
+ // This typically occurs when a file is not a bolt database.
+ ErrInvalid = errors.New("invalid database")
+
+ // ErrVersionMismatch is returned when the data file was created with a
+ // different version of Bolt.
+ ErrVersionMismatch = errors.New("version mismatch")
+
+ // ErrChecksum is returned when either meta page checksum does not match.
+ ErrChecksum = errors.New("checksum error")
+
+ // ErrTimeout is returned when a database cannot obtain an exclusive lock
+ // on the data file after the timeout passed to Open().
+ ErrTimeout = errors.New("timeout")
+)
+
+// These errors can occur when beginning or committing a Tx.
+var (
+ // ErrTxNotWritable is returned when performing a write operation on a
+ // read-only transaction.
+ ErrTxNotWritable = errors.New("tx not writable")
+
+ // ErrTxClosed is returned when committing or rolling back a transaction
+ // that has already been committed or rolled back.
+ ErrTxClosed = errors.New("tx closed")
+
+ // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
+ // read-only database.
+ ErrDatabaseReadOnly = errors.New("database is in read-only mode")
+)
+
+// These errors can occur when putting or deleting a value or a bucket.
+var (
+ // ErrBucketNotFound is returned when trying to access a bucket that has
+ // not been created yet.
+ ErrBucketNotFound = errors.New("bucket not found")
+
+ // ErrBucketExists is returned when creating a bucket that already exists.
+ ErrBucketExists = errors.New("bucket already exists")
+
+ // ErrBucketNameRequired is returned when creating a bucket with a blank name.
+ ErrBucketNameRequired = errors.New("bucket name required")
+
+ // ErrKeyRequired is returned when inserting a zero-length key.
+ ErrKeyRequired = errors.New("key required")
+
+ // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
+ ErrKeyTooLarge = errors.New("key too large")
+
+ // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
+ ErrValueTooLarge = errors.New("value too large")
+
+ // ErrIncompatibleValue is returned when trying create or delete a bucket
+ // on an existing non-bucket key or when trying to create or delete a
+ // non-bucket key on an existing bucket key.
+ ErrIncompatibleValue = errors.New("incompatible value")
+)
diff --git a/vendor/github.com/coreos/bbolt/freelist.go b/vendor/github.com/coreos/bbolt/freelist.go
new file mode 100644
index 000000000..587b8cc02
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/freelist.go
@@ -0,0 +1,392 @@
+package bbolt
+
+import (
+ "fmt"
+ "sort"
+ "unsafe"
+)
+
+// txPending holds a list of pgids and corresponding allocation txns
+// that are pending to be freed.
+type txPending struct {
+ ids []pgid
+ alloctx []txid // txids allocating the ids
+ lastReleaseBegin txid // beginning txid of last matching releaseRange
+}
+
+// pidSet holds the set of starting pgids which have the same span size
+type pidSet map[pgid]struct{}
+
+// freelist represents a list of all pages that are available for allocation.
+// It also tracks pages that have been freed but are still in use by open transactions.
+type freelist struct {
+ freelistType FreelistType // freelist type
+ ids []pgid // all free and available free page ids.
+ allocs map[pgid]txid // mapping of txid that allocated a pgid.
+ pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
+ cache map[pgid]bool // fast lookup of all free and pending page ids.
+ freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
+ forwardMap map[pgid]uint64 // key is start pgid, value is its span size
+ backwardMap map[pgid]uint64 // key is end pgid, value is its span size
+ allocate func(txid txid, n int) pgid // the freelist allocate func
+ free_count func() int // the function which gives you free page number
+ mergeSpans func(ids pgids) // the mergeSpan func
+ getFreePageIDs func() []pgid // get free pgids func
+ readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist
+}
+
+// newFreelist returns an empty, initialized freelist.
+func newFreelist(freelistType FreelistType) *freelist {
+ f := &freelist{
+ freelistType: freelistType,
+ allocs: make(map[pgid]txid),
+ pending: make(map[txid]*txPending),
+ cache: make(map[pgid]bool),
+ freemaps: make(map[uint64]pidSet),
+ forwardMap: make(map[pgid]uint64),
+ backwardMap: make(map[pgid]uint64),
+ }
+
+ if freelistType == FreelistMapType {
+ f.allocate = f.hashmapAllocate
+ f.free_count = f.hashmapFreeCount
+ f.mergeSpans = f.hashmapMergeSpans
+ f.getFreePageIDs = f.hashmapGetFreePageIDs
+ f.readIDs = f.hashmapReadIDs
+ } else {
+ f.allocate = f.arrayAllocate
+ f.free_count = f.arrayFreeCount
+ f.mergeSpans = f.arrayMergeSpans
+ f.getFreePageIDs = f.arrayGetFreePageIDs
+ f.readIDs = f.arrayReadIDs
+ }
+
+ return f
+}
+
+// size returns the size of the page after serialization.
+func (f *freelist) size() int {
+ n := f.count()
+ if n >= 0xFFFF {
+ // The first element will be used to store the count. See freelist.write.
+ n++
+ }
+ return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
+}
+
+// count returns count of pages on the freelist
+func (f *freelist) count() int {
+ return f.free_count() + f.pending_count()
+}
+
+// arrayFreeCount returns count of free pages(array version)
+func (f *freelist) arrayFreeCount() int {
+ return len(f.ids)
+}
+
+// pending_count returns count of pending pages
+func (f *freelist) pending_count() int {
+ var count int
+ for _, txp := range f.pending {
+ count += len(txp.ids)
+ }
+ return count
+}
+
+// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
+// f.count returns the minimum length required for dst.
+func (f *freelist) copyall(dst []pgid) {
+ m := make(pgids, 0, f.pending_count())
+ for _, txp := range f.pending {
+ m = append(m, txp.ids...)
+ }
+ sort.Sort(m)
+ mergepgids(dst, f.getFreePageIDs(), m)
+}
+
+// arrayAllocate returns the starting page id of a contiguous list of pages of a given size.
+// If a contiguous block cannot be found then 0 is returned.
+func (f *freelist) arrayAllocate(txid txid, n int) pgid {
+ if len(f.ids) == 0 {
+ return 0
+ }
+
+ var initial, previd pgid
+ for i, id := range f.ids {
+ if id <= 1 {
+ panic(fmt.Sprintf("invalid page allocation: %d", id))
+ }
+
+ // Reset initial page if this is not contiguous.
+ if previd == 0 || id-previd != 1 {
+ initial = id
+ }
+
+ // If we found a contiguous block then remove it and return it.
+ if (id-initial)+1 == pgid(n) {
+ // If we're allocating off the beginning then take the fast path
+ // and just adjust the existing slice. This will use extra memory
+ // temporarily but the append() in free() will realloc the slice
+ // as is necessary.
+ if (i + 1) == n {
+ f.ids = f.ids[i+1:]
+ } else {
+ copy(f.ids[i-n+1:], f.ids[i+1:])
+ f.ids = f.ids[:len(f.ids)-n]
+ }
+
+ // Remove from the free cache.
+ for i := pgid(0); i < pgid(n); i++ {
+ delete(f.cache, initial+i)
+ }
+ f.allocs[initial] = txid
+ return initial
+ }
+
+ previd = id
+ }
+ return 0
+}
+
+// free releases a page and its overflow for a given transaction id.
+// If the page is already free then a panic will occur.
+func (f *freelist) free(txid txid, p *page) {
+ if p.id <= 1 {
+ panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
+ }
+
+ // Free page and all its overflow pages.
+ txp := f.pending[txid]
+ if txp == nil {
+ txp = &txPending{}
+ f.pending[txid] = txp
+ }
+ allocTxid, ok := f.allocs[p.id]
+ if ok {
+ delete(f.allocs, p.id)
+ } else if (p.flags & freelistPageFlag) != 0 {
+ // Freelist is always allocated by prior tx.
+ allocTxid = txid - 1
+ }
+
+ for id := p.id; id <= p.id+pgid(p.overflow); id++ {
+ // Verify that page is not already free.
+ if f.cache[id] {
+ panic(fmt.Sprintf("page %d already freed", id))
+ }
+ // Add to the freelist and cache.
+ txp.ids = append(txp.ids, id)
+ txp.alloctx = append(txp.alloctx, allocTxid)
+ f.cache[id] = true
+ }
+}
+
+// release moves all page ids for a transaction id (or older) to the freelist.
+func (f *freelist) release(txid txid) {
+ m := make(pgids, 0)
+ for tid, txp := range f.pending {
+ if tid <= txid {
+ // Move transaction's pending pages to the available freelist.
+ // Don't remove from the cache since the page is still free.
+ m = append(m, txp.ids...)
+ delete(f.pending, tid)
+ }
+ }
+ f.mergeSpans(m)
+}
+
+// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
+func (f *freelist) releaseRange(begin, end txid) {
+ if begin > end {
+ return
+ }
+ var m pgids
+ for tid, txp := range f.pending {
+ if tid < begin || tid > end {
+ continue
+ }
+ // Don't recompute freed pages if ranges haven't updated.
+ if txp.lastReleaseBegin == begin {
+ continue
+ }
+ for i := 0; i < len(txp.ids); i++ {
+ if atx := txp.alloctx[i]; atx < begin || atx > end {
+ continue
+ }
+ m = append(m, txp.ids[i])
+ txp.ids[i] = txp.ids[len(txp.ids)-1]
+ txp.ids = txp.ids[:len(txp.ids)-1]
+ txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
+ txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
+ i--
+ }
+ txp.lastReleaseBegin = begin
+ if len(txp.ids) == 0 {
+ delete(f.pending, tid)
+ }
+ }
+ f.mergeSpans(m)
+}
+
+// rollback removes the pages from a given pending tx.
+func (f *freelist) rollback(txid txid) {
+ // Remove page ids from cache.
+ txp := f.pending[txid]
+ if txp == nil {
+ return
+ }
+ var m pgids
+ for i, pgid := range txp.ids {
+ delete(f.cache, pgid)
+ tx := txp.alloctx[i]
+ if tx == 0 {
+ continue
+ }
+ if tx != txid {
+ // Pending free aborted; restore page back to alloc list.
+ f.allocs[pgid] = tx
+ } else {
+ // Freed page was allocated by this txn; OK to throw away.
+ m = append(m, pgid)
+ }
+ }
+ // Remove pages from pending list and mark as free if allocated by txid.
+ delete(f.pending, txid)
+ f.mergeSpans(m)
+}
+
+// freed returns whether a given page is in the free list.
+func (f *freelist) freed(pgid pgid) bool {
+ return f.cache[pgid]
+}
+
+// read initializes the freelist from a freelist page.
+func (f *freelist) read(p *page) {
+ if (p.flags & freelistPageFlag) == 0 {
+ panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
+ }
+ // If the page.count is at the max uint16 value (64k) then it's considered
+ // an overflow and the size of the freelist is stored as the first element.
+ idx, count := 0, int(p.count)
+ if count == 0xFFFF {
+ idx = 1
+ count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
+ }
+
+ // Copy the list of page ids from the freelist.
+ if count == 0 {
+ f.ids = nil
+ } else {
+ ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count]
+
+ // copy the ids, so we don't modify on the freelist page directly
+ idsCopy := make([]pgid, count)
+ copy(idsCopy, ids)
+ // Make sure they're sorted.
+ sort.Sort(pgids(idsCopy))
+
+ f.readIDs(idsCopy)
+ }
+}
+
+// arrayReadIDs initializes the freelist from a given list of ids.
+func (f *freelist) arrayReadIDs(ids []pgid) {
+ f.ids = ids
+ f.reindex()
+}
+
+func (f *freelist) arrayGetFreePageIDs() []pgid {
+ return f.ids
+}
+
+// write writes the page ids onto a freelist page. All free and pending ids are
+// saved to disk since in the event of a program crash, all pending ids will
+// become free.
+func (f *freelist) write(p *page) error {
+ // Combine the old free pgids and pgids waiting on an open transaction.
+
+ // Update the header flag.
+ p.flags |= freelistPageFlag
+
+ // The page.count can only hold up to 64k elements so if we overflow that
+ // number then we handle it by putting the size in the first element.
+ lenids := f.count()
+ if lenids == 0 {
+ p.count = uint16(lenids)
+ } else if lenids < 0xFFFF {
+ p.count = uint16(lenids)
+ f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
+ } else {
+ p.count = 0xFFFF
+ ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
+ f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
+ }
+
+ return nil
+}
+
+// reload reads the freelist from a page and filters out pending items.
+func (f *freelist) reload(p *page) {
+ f.read(p)
+
+ // Build a cache of only pending pages.
+ pcache := make(map[pgid]bool)
+ for _, txp := range f.pending {
+ for _, pendingID := range txp.ids {
+ pcache[pendingID] = true
+ }
+ }
+
+ // Check each page in the freelist and build a new available freelist
+ // with any pages not in the pending lists.
+ var a []pgid
+ for _, id := range f.getFreePageIDs() {
+ if !pcache[id] {
+ a = append(a, id)
+ }
+ }
+
+ f.readIDs(a)
+}
+
+// noSyncReload reads the freelist from pgids and filters out pending items.
+func (f *freelist) noSyncReload(pgids []pgid) {
+ // Build a cache of only pending pages.
+ pcache := make(map[pgid]bool)
+ for _, txp := range f.pending {
+ for _, pendingID := range txp.ids {
+ pcache[pendingID] = true
+ }
+ }
+
+ // Check each page in the freelist and build a new available freelist
+ // with any pages not in the pending lists.
+ var a []pgid
+ for _, id := range pgids {
+ if !pcache[id] {
+ a = append(a, id)
+ }
+ }
+
+ f.readIDs(a)
+}
+
+// reindex rebuilds the free cache based on available and pending free lists.
+func (f *freelist) reindex() {
+ ids := f.getFreePageIDs()
+ f.cache = make(map[pgid]bool, len(ids))
+ for _, id := range ids {
+ f.cache[id] = true
+ }
+ for _, txp := range f.pending {
+ for _, pendingID := range txp.ids {
+ f.cache[pendingID] = true
+ }
+ }
+}
+
+// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array
+func (f *freelist) arrayMergeSpans(ids pgids) {
+ sort.Sort(ids)
+ f.ids = pgids(f.ids).merge(ids)
+}
diff --git a/vendor/github.com/coreos/bbolt/freelist_hmap.go b/vendor/github.com/coreos/bbolt/freelist_hmap.go
new file mode 100644
index 000000000..6a03a6c3c
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/freelist_hmap.go
@@ -0,0 +1,178 @@
+package bbolt
+
+import "sort"
+
+// hashmapFreeCount returns count of free pages(hashmap version)
+func (f *freelist) hashmapFreeCount() int {
+ // use the forwardmap to get the total count
+ count := 0
+ for _, size := range f.forwardMap {
+ count += int(size)
+ }
+ return count
+}
+
+// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend
+func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
+ if n == 0 {
+ return 0
+ }
+
+ // if we have a exact size match just return short path
+ if bm, ok := f.freemaps[uint64(n)]; ok {
+ for pid := range bm {
+ // remove the span
+ f.delSpan(pid, uint64(n))
+
+ f.allocs[pid] = txid
+
+ for i := pgid(0); i < pgid(n); i++ {
+ delete(f.cache, pid+pgid(i))
+ }
+ return pid
+ }
+ }
+
+ // lookup the map to find larger span
+ for size, bm := range f.freemaps {
+ if size < uint64(n) {
+ continue
+ }
+
+ for pid := range bm {
+ // remove the initial
+ f.delSpan(pid, uint64(size))
+
+ f.allocs[pid] = txid
+
+ remain := size - uint64(n)
+
+ // add remain span
+ f.addSpan(pid+pgid(n), remain)
+
+ for i := pgid(0); i < pgid(n); i++ {
+ delete(f.cache, pid+pgid(i))
+ }
+ return pid
+ }
+ }
+
+ return 0
+}
+
+// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version)
+func (f *freelist) hashmapReadIDs(pgids []pgid) {
+ f.init(pgids)
+
+ // Rebuild the page cache.
+ f.reindex()
+}
+
+// hashmapGetFreePageIDs returns the sorted free page ids
+func (f *freelist) hashmapGetFreePageIDs() []pgid {
+ count := f.free_count()
+ if count == 0 {
+ return nil
+ }
+
+ m := make([]pgid, 0, count)
+ for start, size := range f.forwardMap {
+ for i := 0; i < int(size); i++ {
+ m = append(m, start+pgid(i))
+ }
+ }
+ sort.Sort(pgids(m))
+
+ return m
+}
+
+// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans
+func (f *freelist) hashmapMergeSpans(ids pgids) {
+ for _, id := range ids {
+ // try to see if we can merge and update
+ f.mergeWithExistingSpan(id)
+ }
+}
+
+// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
+func (f *freelist) mergeWithExistingSpan(pid pgid) {
+ prev := pid - 1
+ next := pid + 1
+
+ preSize, mergeWithPrev := f.backwardMap[prev]
+ nextSize, mergeWithNext := f.forwardMap[next]
+ newStart := pid
+ newSize := uint64(1)
+
+ if mergeWithPrev {
+ //merge with previous span
+ start := prev + 1 - pgid(preSize)
+ f.delSpan(start, preSize)
+
+ newStart -= pgid(preSize)
+ newSize += preSize
+ }
+
+ if mergeWithNext {
+ // merge with next span
+ f.delSpan(next, nextSize)
+ newSize += nextSize
+ }
+
+ f.addSpan(newStart, newSize)
+}
+
+func (f *freelist) addSpan(start pgid, size uint64) {
+ f.backwardMap[start-1+pgid(size)] = size
+ f.forwardMap[start] = size
+ if _, ok := f.freemaps[size]; !ok {
+ f.freemaps[size] = make(map[pgid]struct{})
+ }
+
+ f.freemaps[size][start] = struct{}{}
+}
+
+func (f *freelist) delSpan(start pgid, size uint64) {
+ delete(f.forwardMap, start)
+ delete(f.backwardMap, start+pgid(size-1))
+ delete(f.freemaps[size], start)
+ if len(f.freemaps[size]) == 0 {
+ delete(f.freemaps, size)
+ }
+}
+
+// initial from pgids using when use hashmap version
+// pgids must be sorted
+func (f *freelist) init(pgids []pgid) {
+ if len(pgids) == 0 {
+ return
+ }
+
+ size := uint64(1)
+ start := pgids[0]
+
+ if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
+ panic("pgids not sorted")
+ }
+
+ f.freemaps = make(map[uint64]pidSet)
+ f.forwardMap = make(map[pgid]uint64)
+ f.backwardMap = make(map[pgid]uint64)
+
+ for i := 1; i < len(pgids); i++ {
+ // continuous page
+ if pgids[i] == pgids[i-1]+1 {
+ size++
+ } else {
+ f.addSpan(start, size)
+
+ size = 1
+ start = pgids[i]
+ }
+ }
+
+ // init the tail
+ if size != 0 && start != 0 {
+ f.addSpan(start, size)
+ }
+}
diff --git a/vendor/github.com/coreos/bbolt/node.go b/vendor/github.com/coreos/bbolt/node.go
new file mode 100644
index 000000000..6c3fa553e
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/node.go
@@ -0,0 +1,604 @@
+package bbolt
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "unsafe"
+)
+
+// node represents an in-memory, deserialized page.
+type node struct {
+ bucket *Bucket
+ isLeaf bool
+ unbalanced bool
+ spilled bool
+ key []byte
+ pgid pgid
+ parent *node
+ children nodes
+ inodes inodes
+}
+
+// root returns the top-level node this node is attached to.
+func (n *node) root() *node {
+ if n.parent == nil {
+ return n
+ }
+ return n.parent.root()
+}
+
+// minKeys returns the minimum number of inodes this node should have.
+func (n *node) minKeys() int {
+ if n.isLeaf {
+ return 1
+ }
+ return 2
+}
+
+// size returns the size of the node after serialization.
+func (n *node) size() int {
+ sz, elsz := pageHeaderSize, n.pageElementSize()
+ for i := 0; i < len(n.inodes); i++ {
+ item := &n.inodes[i]
+ sz += elsz + len(item.key) + len(item.value)
+ }
+ return sz
+}
+
+// sizeLessThan returns true if the node is less than a given size.
+// This is an optimization to avoid calculating a large node when we only need
+// to know if it fits inside a certain page size.
+func (n *node) sizeLessThan(v int) bool {
+ sz, elsz := pageHeaderSize, n.pageElementSize()
+ for i := 0; i < len(n.inodes); i++ {
+ item := &n.inodes[i]
+ sz += elsz + len(item.key) + len(item.value)
+ if sz >= v {
+ return false
+ }
+ }
+ return true
+}
+
+// pageElementSize returns the size of each page element based on the type of node.
+func (n *node) pageElementSize() int {
+ if n.isLeaf {
+ return leafPageElementSize
+ }
+ return branchPageElementSize
+}
+
+// childAt returns the child node at a given index.
+func (n *node) childAt(index int) *node {
+ if n.isLeaf {
+ panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
+ }
+ return n.bucket.node(n.inodes[index].pgid, n)
+}
+
+// childIndex returns the index of a given child node.
+func (n *node) childIndex(child *node) int {
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
+ return index
+}
+
+// numChildren returns the number of children.
+func (n *node) numChildren() int {
+ return len(n.inodes)
+}
+
+// nextSibling returns the next node with the same parent.
+func (n *node) nextSibling() *node {
+ if n.parent == nil {
+ return nil
+ }
+ index := n.parent.childIndex(n)
+ if index >= n.parent.numChildren()-1 {
+ return nil
+ }
+ return n.parent.childAt(index + 1)
+}
+
+// prevSibling returns the previous node with the same parent.
+func (n *node) prevSibling() *node {
+ if n.parent == nil {
+ return nil
+ }
+ index := n.parent.childIndex(n)
+ if index == 0 {
+ return nil
+ }
+ return n.parent.childAt(index - 1)
+}
+
+// put inserts a key/value.
+func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
+ if pgid >= n.bucket.tx.meta.pgid {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
+ } else if len(oldKey) <= 0 {
+ panic("put: zero-length old key")
+ } else if len(newKey) <= 0 {
+ panic("put: zero-length new key")
+ }
+
+ // Find insertion index.
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
+
+ // Add capacity and shift nodes if we don't have an exact match and need to insert.
+ exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
+ if !exact {
+ n.inodes = append(n.inodes, inode{})
+ copy(n.inodes[index+1:], n.inodes[index:])
+ }
+
+ inode := &n.inodes[index]
+ inode.flags = flags
+ inode.key = newKey
+ inode.value = value
+ inode.pgid = pgid
+ _assert(len(inode.key) > 0, "put: zero-length inode key")
+}
+
+// del removes a key from the node.
+func (n *node) del(key []byte) {
+ // Find index of key.
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
+
+ // Exit if the key isn't found.
+ if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
+ return
+ }
+
+ // Delete inode from the node.
+ n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
+
+ // Mark the node as needing rebalancing.
+ n.unbalanced = true
+}
+
+// read initializes the node from a page.
+func (n *node) read(p *page) {
+ n.pgid = p.id
+ n.isLeaf = ((p.flags & leafPageFlag) != 0)
+ n.inodes = make(inodes, int(p.count))
+
+ for i := 0; i < int(p.count); i++ {
+ inode := &n.inodes[i]
+ if n.isLeaf {
+ elem := p.leafPageElement(uint16(i))
+ inode.flags = elem.flags
+ inode.key = elem.key()
+ inode.value = elem.value()
+ } else {
+ elem := p.branchPageElement(uint16(i))
+ inode.pgid = elem.pgid
+ inode.key = elem.key()
+ }
+ _assert(len(inode.key) > 0, "read: zero-length inode key")
+ }
+
+ // Save first key so we can find the node in the parent when we spill.
+ if len(n.inodes) > 0 {
+ n.key = n.inodes[0].key
+ _assert(len(n.key) > 0, "read: zero-length node key")
+ } else {
+ n.key = nil
+ }
+}
+
+// write writes the items onto one or more pages.
+func (n *node) write(p *page) {
+ // Initialize page.
+ if n.isLeaf {
+ p.flags |= leafPageFlag
+ } else {
+ p.flags |= branchPageFlag
+ }
+
+ if len(n.inodes) >= 0xFFFF {
+ panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
+ }
+ p.count = uint16(len(n.inodes))
+
+ // Stop here if there are no items to write.
+ if p.count == 0 {
+ return
+ }
+
+ // Loop over each item and write it to the page.
+ b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
+ for i, item := range n.inodes {
+ _assert(len(item.key) > 0, "write: zero-length inode key")
+
+ // Write the page element.
+ if n.isLeaf {
+ elem := p.leafPageElement(uint16(i))
+ elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
+ elem.flags = item.flags
+ elem.ksize = uint32(len(item.key))
+ elem.vsize = uint32(len(item.value))
+ } else {
+ elem := p.branchPageElement(uint16(i))
+ elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
+ elem.ksize = uint32(len(item.key))
+ elem.pgid = item.pgid
+ _assert(elem.pgid != p.id, "write: circular dependency occurred")
+ }
+
+ // If the length of key+value is larger than the max allocation size
+ // then we need to reallocate the byte array pointer.
+ //
+ // See: https://github.com/boltdb/bolt/pull/335
+ klen, vlen := len(item.key), len(item.value)
+ if len(b) < klen+vlen {
+ b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
+ }
+
+ // Write data for the element to the end of the page.
+ copy(b[0:], item.key)
+ b = b[klen:]
+ copy(b[0:], item.value)
+ b = b[vlen:]
+ }
+
+ // DEBUG ONLY: n.dump()
+}
+
+// split breaks up a node into multiple smaller nodes, if appropriate.
+// This should only be called from the spill() function.
+func (n *node) split(pageSize int) []*node {
+ var nodes []*node
+
+ node := n
+ for {
+ // Split node into two.
+ a, b := node.splitTwo(pageSize)
+ nodes = append(nodes, a)
+
+ // If we can't split then exit the loop.
+ if b == nil {
+ break
+ }
+
+ // Set node to b so it gets split on the next iteration.
+ node = b
+ }
+
+ return nodes
+}
+
+// splitTwo breaks up a node into two smaller nodes, if appropriate.
+// This should only be called from the split() function.
+func (n *node) splitTwo(pageSize int) (*node, *node) {
+ // Ignore the split if the page doesn't have at least enough nodes for
+ // two pages or if the nodes can fit in a single page.
+ if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
+ return n, nil
+ }
+
+ // Determine the threshold before starting a new node.
+ var fillPercent = n.bucket.FillPercent
+ if fillPercent < minFillPercent {
+ fillPercent = minFillPercent
+ } else if fillPercent > maxFillPercent {
+ fillPercent = maxFillPercent
+ }
+ threshold := int(float64(pageSize) * fillPercent)
+
+ // Determine split position and sizes of the two pages.
+ splitIndex, _ := n.splitIndex(threshold)
+
+ // Split node into two separate nodes.
+ // If there's no parent then we'll need to create one.
+ if n.parent == nil {
+ n.parent = &node{bucket: n.bucket, children: []*node{n}}
+ }
+
+ // Create a new node and add it to the parent.
+ next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
+ n.parent.children = append(n.parent.children, next)
+
+ // Split inodes across two nodes.
+ next.inodes = n.inodes[splitIndex:]
+ n.inodes = n.inodes[:splitIndex]
+
+ // Update the statistics.
+ n.bucket.tx.stats.Split++
+
+ return n, next
+}
+
+// splitIndex finds the position where a page will fill a given threshold.
+// It returns the index as well as the size of the first page.
+// This is only be called from split().
+func (n *node) splitIndex(threshold int) (index, sz int) {
+ sz = pageHeaderSize
+
+ // Loop until we only have the minimum number of keys required for the second page.
+ for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
+ index = i
+ inode := n.inodes[i]
+ elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
+
+ // If we have at least the minimum number of keys and adding another
+ // node would put us over the threshold then exit and return.
+ if i >= minKeysPerPage && sz+elsize > threshold {
+ break
+ }
+
+ // Add the element size to the total size.
+ sz += elsize
+ }
+
+ return
+}
+
+// spill writes the nodes to dirty pages and splits nodes as it goes.
+// Returns an error if dirty pages cannot be allocated.
+func (n *node) spill() error {
+ var tx = n.bucket.tx
+ if n.spilled {
+ return nil
+ }
+
+ // Spill child nodes first. Child nodes can materialize sibling nodes in
+ // the case of split-merge so we cannot use a range loop. We have to check
+ // the children size on every loop iteration.
+ sort.Sort(n.children)
+ for i := 0; i < len(n.children); i++ {
+ if err := n.children[i].spill(); err != nil {
+ return err
+ }
+ }
+
+ // We no longer need the child list because it's only used for spill tracking.
+ n.children = nil
+
+ // Split nodes into appropriate sizes. The first node will always be n.
+ var nodes = n.split(tx.db.pageSize)
+ for _, node := range nodes {
+ // Add node's page to the freelist if it's not new.
+ if node.pgid > 0 {
+ tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
+ node.pgid = 0
+ }
+
+ // Allocate contiguous space for the node.
+ p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
+ if err != nil {
+ return err
+ }
+
+ // Write the node.
+ if p.id >= tx.meta.pgid {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
+ }
+ node.pgid = p.id
+ node.write(p)
+ node.spilled = true
+
+ // Insert into parent inodes.
+ if node.parent != nil {
+ var key = node.key
+ if key == nil {
+ key = node.inodes[0].key
+ }
+
+ node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
+ node.key = node.inodes[0].key
+ _assert(len(node.key) > 0, "spill: zero-length node key")
+ }
+
+ // Update the statistics.
+ tx.stats.Spill++
+ }
+
+ // If the root node split and created a new root then we need to spill that
+ // as well. We'll clear out the children to make sure it doesn't try to respill.
+ if n.parent != nil && n.parent.pgid == 0 {
+ n.children = nil
+ return n.parent.spill()
+ }
+
+ return nil
+}
+
+// rebalance attempts to combine the node with sibling nodes if the node fill
+// size is below a threshold or if there are not enough keys.
+func (n *node) rebalance() {
+ if !n.unbalanced {
+ return
+ }
+ n.unbalanced = false
+
+ // Update statistics.
+ n.bucket.tx.stats.Rebalance++
+
+ // Ignore if node is above threshold (25%) and has enough keys.
+ var threshold = n.bucket.tx.db.pageSize / 4
+ if n.size() > threshold && len(n.inodes) > n.minKeys() {
+ return
+ }
+
+ // Root node has special handling.
+ if n.parent == nil {
+ // If root node is a branch and only has one node then collapse it.
+ if !n.isLeaf && len(n.inodes) == 1 {
+ // Move root's child up.
+ child := n.bucket.node(n.inodes[0].pgid, n)
+ n.isLeaf = child.isLeaf
+ n.inodes = child.inodes[:]
+ n.children = child.children
+
+ // Reparent all child nodes being moved.
+ for _, inode := range n.inodes {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child.parent = n
+ }
+ }
+
+ // Remove old child.
+ child.parent = nil
+ delete(n.bucket.nodes, child.pgid)
+ child.free()
+ }
+
+ return
+ }
+
+ // If node has no keys then just remove it.
+ if n.numChildren() == 0 {
+ n.parent.del(n.key)
+ n.parent.removeChild(n)
+ delete(n.bucket.nodes, n.pgid)
+ n.free()
+ n.parent.rebalance()
+ return
+ }
+
+ _assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
+
+ // Destination node is right sibling if idx == 0, otherwise left sibling.
+ var target *node
+ var useNextSibling = (n.parent.childIndex(n) == 0)
+ if useNextSibling {
+ target = n.nextSibling()
+ } else {
+ target = n.prevSibling()
+ }
+
+ // If both this node and the target node are too small then merge them.
+ if useNextSibling {
+ // Reparent all child nodes being moved.
+ for _, inode := range target.inodes {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child.parent.removeChild(child)
+ child.parent = n
+ child.parent.children = append(child.parent.children, child)
+ }
+ }
+
+ // Copy over inodes from target and remove target.
+ n.inodes = append(n.inodes, target.inodes...)
+ n.parent.del(target.key)
+ n.parent.removeChild(target)
+ delete(n.bucket.nodes, target.pgid)
+ target.free()
+ } else {
+ // Reparent all child nodes being moved.
+ for _, inode := range n.inodes {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child.parent.removeChild(child)
+ child.parent = target
+ child.parent.children = append(child.parent.children, child)
+ }
+ }
+
+ // Copy over inodes to target and remove node.
+ target.inodes = append(target.inodes, n.inodes...)
+ n.parent.del(n.key)
+ n.parent.removeChild(n)
+ delete(n.bucket.nodes, n.pgid)
+ n.free()
+ }
+
+ // Either this node or the target node was deleted from the parent so rebalance it.
+ n.parent.rebalance()
+}
+
+// removes a node from the list of in-memory children.
+// This does not affect the inodes.
+func (n *node) removeChild(target *node) {
+ for i, child := range n.children {
+ if child == target {
+ n.children = append(n.children[:i], n.children[i+1:]...)
+ return
+ }
+ }
+}
+
+// dereference causes the node to copy all its inode key/value references to heap memory.
+// This is required when the mmap is reallocated so inodes are not pointing to stale data.
+func (n *node) dereference() {
+ if n.key != nil {
+ key := make([]byte, len(n.key))
+ copy(key, n.key)
+ n.key = key
+ _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
+ }
+
+ for i := range n.inodes {
+ inode := &n.inodes[i]
+
+ key := make([]byte, len(inode.key))
+ copy(key, inode.key)
+ inode.key = key
+ _assert(len(inode.key) > 0, "dereference: zero-length inode key")
+
+ value := make([]byte, len(inode.value))
+ copy(value, inode.value)
+ inode.value = value
+ }
+
+ // Recursively dereference children.
+ for _, child := range n.children {
+ child.dereference()
+ }
+
+ // Update statistics.
+ n.bucket.tx.stats.NodeDeref++
+}
+
+// free adds the node's underlying page to the freelist.
+func (n *node) free() {
+ if n.pgid != 0 {
+ n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
+ n.pgid = 0
+ }
+}
+
+// dump writes the contents of the node to STDERR for debugging purposes.
+/*
+func (n *node) dump() {
+ // Write node header.
+ var typ = "branch"
+ if n.isLeaf {
+ typ = "leaf"
+ }
+ warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
+
+ // Write out abbreviated version of each item.
+ for _, item := range n.inodes {
+ if n.isLeaf {
+ if item.flags&bucketLeafFlag != 0 {
+ bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
+ warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
+ } else {
+ warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
+ }
+ } else {
+ warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
+ }
+ }
+ warn("")
+}
+*/
+
+type nodes []*node
+
+func (s nodes) Len() int { return len(s) }
+func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
+
+// inode represents an internal node inside of a node.
+// It can be used to point to elements in a page or point
+// to an element which hasn't been added to a page yet.
+type inode struct {
+ flags uint32
+ pgid pgid
+ key []byte
+ value []byte
+}
+
+type inodes []inode
diff --git a/vendor/github.com/coreos/bbolt/page.go b/vendor/github.com/coreos/bbolt/page.go
new file mode 100644
index 000000000..bca9615f0
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/page.go
@@ -0,0 +1,197 @@
+package bbolt
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "unsafe"
+)
+
+const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
+
+const minKeysPerPage = 2
+
+const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
+const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
+
+const (
+ branchPageFlag = 0x01
+ leafPageFlag = 0x02
+ metaPageFlag = 0x04
+ freelistPageFlag = 0x10
+)
+
+const (
+ bucketLeafFlag = 0x01
+)
+
+type pgid uint64
+
+type page struct {
+ id pgid
+ flags uint16
+ count uint16
+ overflow uint32
+ ptr uintptr
+}
+
+// typ returns a human readable page type string used for debugging.
+func (p *page) typ() string {
+ if (p.flags & branchPageFlag) != 0 {
+ return "branch"
+ } else if (p.flags & leafPageFlag) != 0 {
+ return "leaf"
+ } else if (p.flags & metaPageFlag) != 0 {
+ return "meta"
+ } else if (p.flags & freelistPageFlag) != 0 {
+ return "freelist"
+ }
+ return fmt.Sprintf("unknown<%02x>", p.flags)
+}
+
+// meta returns a pointer to the metadata section of the page.
+func (p *page) meta() *meta {
+ return (*meta)(unsafe.Pointer(&p.ptr))
+}
+
+// leafPageElement retrieves the leaf node by index
+func (p *page) leafPageElement(index uint16) *leafPageElement {
+ n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
+ return n
+}
+
+// leafPageElements retrieves a list of leaf nodes.
+func (p *page) leafPageElements() []leafPageElement {
+ if p.count == 0 {
+ return nil
+ }
+ return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
+}
+
+// branchPageElement retrieves the branch node by index
+func (p *page) branchPageElement(index uint16) *branchPageElement {
+ return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
+}
+
+// branchPageElements retrieves a list of branch nodes.
+func (p *page) branchPageElements() []branchPageElement {
+ if p.count == 0 {
+ return nil
+ }
+ return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
+}
+
+// dump writes n bytes of the page to STDERR as hex output.
+func (p *page) hexdump(n int) {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
+ fmt.Fprintf(os.Stderr, "%x\n", buf)
+}
+
+type pages []*page
+
+func (s pages) Len() int { return len(s) }
+func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
+
+// branchPageElement represents a node on a branch page.
+type branchPageElement struct {
+ pos uint32
+ ksize uint32
+ pgid pgid
+}
+
+// key returns a byte slice of the node key.
+func (n *branchPageElement) key() []byte {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
+}
+
+// leafPageElement represents a node on a leaf page.
+type leafPageElement struct {
+ flags uint32
+ pos uint32
+ ksize uint32
+ vsize uint32
+}
+
+// key returns a byte slice of the node key.
+func (n *leafPageElement) key() []byte {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
+}
+
+// value returns a byte slice of the node value.
+func (n *leafPageElement) value() []byte {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
+}
+
+// PageInfo represents human readable information about a page.
+type PageInfo struct {
+ ID int
+ Type string
+ Count int
+ OverflowCount int
+}
+
+type pgids []pgid
+
+func (s pgids) Len() int { return len(s) }
+func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
+
+// merge returns the sorted union of a and b.
+func (a pgids) merge(b pgids) pgids {
+ // Return the opposite slice if one is nil.
+ if len(a) == 0 {
+ return b
+ }
+ if len(b) == 0 {
+ return a
+ }
+ merged := make(pgids, len(a)+len(b))
+ mergepgids(merged, a, b)
+ return merged
+}
+
+// mergepgids copies the sorted union of a and b into dst.
+// If dst is too small, it panics.
+func mergepgids(dst, a, b pgids) {
+ if len(dst) < len(a)+len(b) {
+ panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
+ }
+ // Copy in the opposite slice if one is nil.
+ if len(a) == 0 {
+ copy(dst, b)
+ return
+ }
+ if len(b) == 0 {
+ copy(dst, a)
+ return
+ }
+
+ // Merged will hold all elements from both lists.
+ merged := dst[:0]
+
+ // Assign lead to the slice with a lower starting value, follow to the higher value.
+ lead, follow := a, b
+ if b[0] < a[0] {
+ lead, follow = b, a
+ }
+
+ // Continue while there are elements in the lead.
+ for len(lead) > 0 {
+ // Merge largest prefix of lead that is ahead of follow[0].
+ n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
+ merged = append(merged, lead[:n]...)
+ if n >= len(lead) {
+ break
+ }
+
+ // Swap lead and follow.
+ lead, follow = follow, lead[n:]
+ }
+
+ // Append what's left in follow.
+ _ = append(merged, follow...)
+}
diff --git a/vendor/github.com/coreos/bbolt/tx.go b/vendor/github.com/coreos/bbolt/tx.go
new file mode 100644
index 000000000..2df7688c2
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/tx.go
@@ -0,0 +1,726 @@
+package bbolt
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+ "time"
+ "unsafe"
+)
+
+// txid represents the internal transaction identifier.
+type txid uint64
+
+// Tx represents a read-only or read/write transaction on the database.
+// Read-only transactions can be used for retrieving values for keys and creating cursors.
+// Read/write transactions can create and remove buckets and create and remove keys.
+//
+// IMPORTANT: You must commit or rollback transactions when you are done with
+// them. Pages can not be reclaimed by the writer until no more transactions
+// are using them. A long running read transaction can cause the database to
+// quickly grow.
+type Tx struct {
+ writable bool
+ managed bool
+ db *DB
+ meta *meta
+ root Bucket
+ pages map[pgid]*page
+ stats TxStats
+ commitHandlers []func()
+
+ // WriteFlag specifies the flag for write-related methods like WriteTo().
+ // Tx opens the database file with the specified flag to copy the data.
+ //
+ // By default, the flag is unset, which works well for mostly in-memory
+ // workloads. For databases that are much larger than available RAM,
+ // set the flag to syscall.O_DIRECT to avoid trashing the page cache.
+ WriteFlag int
+}
+
+// init initializes the transaction.
+func (tx *Tx) init(db *DB) {
+ tx.db = db
+ tx.pages = nil
+
+ // Copy the meta page since it can be changed by the writer.
+ tx.meta = &meta{}
+ db.meta().copy(tx.meta)
+
+ // Copy over the root bucket.
+ tx.root = newBucket(tx)
+ tx.root.bucket = &bucket{}
+ *tx.root.bucket = tx.meta.root
+
+ // Increment the transaction id and add a page cache for writable transactions.
+ if tx.writable {
+ tx.pages = make(map[pgid]*page)
+ tx.meta.txid += txid(1)
+ }
+}
+
+// ID returns the transaction id.
+func (tx *Tx) ID() int {
+ return int(tx.meta.txid)
+}
+
+// DB returns a reference to the database that created the transaction.
+func (tx *Tx) DB() *DB {
+ return tx.db
+}
+
+// Size returns current database size in bytes as seen by this transaction.
+func (tx *Tx) Size() int64 {
+ return int64(tx.meta.pgid) * int64(tx.db.pageSize)
+}
+
+// Writable returns whether the transaction can perform write operations.
+func (tx *Tx) Writable() bool {
+ return tx.writable
+}
+
+// Cursor creates a cursor associated with the root bucket.
+// All items in the cursor will return a nil value because all root bucket keys point to buckets.
+// The cursor is only valid as long as the transaction is open.
+// Do not use a cursor after the transaction is closed.
+func (tx *Tx) Cursor() *Cursor {
+ return tx.root.Cursor()
+}
+
+// Stats retrieves a copy of the current transaction statistics.
+func (tx *Tx) Stats() TxStats {
+ return tx.stats
+}
+
+// Bucket retrieves a bucket by name.
+// Returns nil if the bucket does not exist.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) Bucket(name []byte) *Bucket {
+ return tx.root.Bucket(name)
+}
+
+// CreateBucket creates a new bucket.
+// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
+ return tx.root.CreateBucket(name)
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
+ return tx.root.CreateBucketIfNotExists(name)
+}
+
+// DeleteBucket deletes a bucket.
+// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
+func (tx *Tx) DeleteBucket(name []byte) error {
+ return tx.root.DeleteBucket(name)
+}
+
+// ForEach executes a function for each bucket in the root.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller.
+func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
+ return tx.root.ForEach(func(k, v []byte) error {
+ return fn(k, tx.root.Bucket(k))
+ })
+}
+
+// OnCommit adds a handler function to be executed after the transaction successfully commits.
+func (tx *Tx) OnCommit(fn func()) {
+ tx.commitHandlers = append(tx.commitHandlers, fn)
+}
+
+// Commit writes all changes to disk and updates the meta page.
+// Returns an error if a disk write error occurs, or if Commit is
+// called on a read-only transaction.
+func (tx *Tx) Commit() error {
+ _assert(!tx.managed, "managed tx commit not allowed")
+ if tx.db == nil {
+ return ErrTxClosed
+ } else if !tx.writable {
+ return ErrTxNotWritable
+ }
+
+ // TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
+
+ // Rebalance nodes which have had deletions.
+ var startTime = time.Now()
+ tx.root.rebalance()
+ if tx.stats.Rebalance > 0 {
+ tx.stats.RebalanceTime += time.Since(startTime)
+ }
+
+ // spill data onto dirty pages.
+ startTime = time.Now()
+ if err := tx.root.spill(); err != nil {
+ tx.rollback()
+ return err
+ }
+ tx.stats.SpillTime += time.Since(startTime)
+
+ // Free the old root bucket.
+ tx.meta.root.root = tx.root.root
+
+ // Free the old freelist because commit writes out a fresh freelist.
+ if tx.meta.freelist != pgidNoFreelist {
+ tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
+ }
+
+ if !tx.db.NoFreelistSync {
+ err := tx.commitFreelist()
+ if err != nil {
+ return err
+ }
+ } else {
+ tx.meta.freelist = pgidNoFreelist
+ }
+
+ // Write dirty pages to disk.
+ startTime = time.Now()
+ if err := tx.write(); err != nil {
+ tx.rollback()
+ return err
+ }
+
+ // If strict mode is enabled then perform a consistency check.
+ // Only the first consistency error is reported in the panic.
+ if tx.db.StrictMode {
+ ch := tx.Check()
+ var errs []string
+ for {
+ err, ok := <-ch
+ if !ok {
+ break
+ }
+ errs = append(errs, err.Error())
+ }
+ if len(errs) > 0 {
+ panic("check fail: " + strings.Join(errs, "\n"))
+ }
+ }
+
+ // Write meta to disk.
+ if err := tx.writeMeta(); err != nil {
+ tx.rollback()
+ return err
+ }
+ tx.stats.WriteTime += time.Since(startTime)
+
+ // Finalize the transaction.
+ tx.close()
+
+ // Execute commit handlers now that the locks have been removed.
+ for _, fn := range tx.commitHandlers {
+ fn()
+ }
+
+ return nil
+}
+
+func (tx *Tx) commitFreelist() error {
+ // Allocate new pages for the new free list. This will overestimate
+ // the size of the freelist but not underestimate the size (which would be bad).
+ opgid := tx.meta.pgid
+ p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
+ if err != nil {
+ tx.rollback()
+ return err
+ }
+ if err := tx.db.freelist.write(p); err != nil {
+ tx.rollback()
+ return err
+ }
+ tx.meta.freelist = p.id
+ // If the high water mark has moved up then attempt to grow the database.
+ if tx.meta.pgid > opgid {
+ if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
+ tx.rollback()
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Rollback closes the transaction and ignores all previous updates. Read-only
+// transactions must be rolled back and not committed.
+func (tx *Tx) Rollback() error {
+ _assert(!tx.managed, "managed tx rollback not allowed")
+ if tx.db == nil {
+ return ErrTxClosed
+ }
+ tx.nonPhysicalRollback()
+ return nil
+}
+
+// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk.
+func (tx *Tx) nonPhysicalRollback() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ tx.db.freelist.rollback(tx.meta.txid)
+ }
+ tx.close()
+}
+
+// rollback needs to reload the free pages from disk in case some system error happens like fsync error.
+func (tx *Tx) rollback() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ tx.db.freelist.rollback(tx.meta.txid)
+ if !tx.db.hasSyncedFreelist() {
+ // Reconstruct free page list by scanning the DB to get the whole free page list.
+ // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
+ tx.db.freelist.noSyncReload(tx.db.freepages())
+ } else {
+ // Read free page list from freelist page.
+ tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
+ }
+ }
+ tx.close()
+}
+
+func (tx *Tx) close() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ // Grab freelist stats.
+ var freelistFreeN = tx.db.freelist.free_count()
+ var freelistPendingN = tx.db.freelist.pending_count()
+ var freelistAlloc = tx.db.freelist.size()
+
+ // Remove transaction ref & writer lock.
+ tx.db.rwtx = nil
+ tx.db.rwlock.Unlock()
+
+ // Merge statistics.
+ tx.db.statlock.Lock()
+ tx.db.stats.FreePageN = freelistFreeN
+ tx.db.stats.PendingPageN = freelistPendingN
+ tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
+ tx.db.stats.FreelistInuse = freelistAlloc
+ tx.db.stats.TxStats.add(&tx.stats)
+ tx.db.statlock.Unlock()
+ } else {
+ tx.db.removeTx(tx)
+ }
+
+ // Clear all references.
+ tx.db = nil
+ tx.meta = nil
+ tx.root = Bucket{tx: tx}
+ tx.pages = nil
+}
+
+// Copy writes the entire database to a writer.
+// This function exists for backwards compatibility.
+//
+// Deprecated; Use WriteTo() instead.
+func (tx *Tx) Copy(w io.Writer) error {
+ _, err := tx.WriteTo(w)
+ return err
+}
+
+// WriteTo writes the entire database to a writer.
+// If err == nil then exactly tx.Size() bytes will be written into the writer.
+func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
+ // Attempt to open reader with WriteFlag
+ f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ if cerr := f.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ // Generate a meta page. We use the same page data for both meta pages.
+ buf := make([]byte, tx.db.pageSize)
+ page := (*page)(unsafe.Pointer(&buf[0]))
+ page.flags = metaPageFlag
+ *page.meta() = *tx.meta
+
+ // Write meta 0.
+ page.id = 0
+ page.meta().checksum = page.meta().sum64()
+ nn, err := w.Write(buf)
+ n += int64(nn)
+ if err != nil {
+ return n, fmt.Errorf("meta 0 copy: %s", err)
+ }
+
+ // Write meta 1 with a lower transaction id.
+ page.id = 1
+ page.meta().txid -= 1
+ page.meta().checksum = page.meta().sum64()
+ nn, err = w.Write(buf)
+ n += int64(nn)
+ if err != nil {
+ return n, fmt.Errorf("meta 1 copy: %s", err)
+ }
+
+ // Move past the meta pages in the file.
+ if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
+ return n, fmt.Errorf("seek: %s", err)
+ }
+
+ // Copy data pages.
+ wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
+ n += wn
+ if err != nil {
+ return n, err
+ }
+
+ return n, nil
+}
+
+// CopyFile copies the entire database to file at the given path.
+// A reader transaction is maintained during the copy so it is safe to continue
+// using the database while a copy is in progress.
+func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
+ f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
+ if err != nil {
+ return err
+ }
+
+ err = tx.Copy(f)
+ if err != nil {
+ _ = f.Close()
+ return err
+ }
+ return f.Close()
+}
+
+// Check performs several consistency checks on the database for this transaction.
+// An error is returned if any inconsistency is found.
+//
+// It can be safely run concurrently on a writable transaction. However, this
+// incurs a high cost for large databases and databases with a lot of subbuckets
+// because of caching. This overhead can be removed if running on a read-only
+// transaction, however, it is not safe to execute other writer transactions at
+// the same time.
+func (tx *Tx) Check() <-chan error {
+ ch := make(chan error)
+ go tx.check(ch)
+ return ch
+}
+
+func (tx *Tx) check(ch chan error) {
+ // Force loading free list if opened in ReadOnly mode.
+ tx.db.loadFreelist()
+
+ // Check if any pages are double freed.
+ freed := make(map[pgid]bool)
+ all := make([]pgid, tx.db.freelist.count())
+ tx.db.freelist.copyall(all)
+ for _, id := range all {
+ if freed[id] {
+ ch <- fmt.Errorf("page %d: already freed", id)
+ }
+ freed[id] = true
+ }
+
+ // Track every reachable page.
+ reachable := make(map[pgid]*page)
+ reachable[0] = tx.page(0) // meta0
+ reachable[1] = tx.page(1) // meta1
+ if tx.meta.freelist != pgidNoFreelist {
+ for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
+ reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
+ }
+ }
+
+ // Recursively check buckets.
+ tx.checkBucket(&tx.root, reachable, freed, ch)
+
+ // Ensure all pages below high water mark are either reachable or freed.
+ for i := pgid(0); i < tx.meta.pgid; i++ {
+ _, isReachable := reachable[i]
+ if !isReachable && !freed[i] {
+ ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
+ }
+ }
+
+ // Close the channel to signal completion.
+ close(ch)
+}
+
+func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
+ // Ignore inline buckets.
+ if b.root == 0 {
+ return
+ }
+
+ // Check every page used by this bucket.
+ b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
+ if p.id > tx.meta.pgid {
+ ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
+ }
+
+ // Ensure each page is only referenced once.
+ for i := pgid(0); i <= pgid(p.overflow); i++ {
+ var id = p.id + i
+ if _, ok := reachable[id]; ok {
+ ch <- fmt.Errorf("page %d: multiple references", int(id))
+ }
+ reachable[id] = p
+ }
+
+ // We should only encounter un-freed leaf and branch pages.
+ if freed[p.id] {
+ ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
+ } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
+ ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
+ }
+ })
+
+ // Check each bucket within this bucket.
+ _ = b.ForEach(func(k, v []byte) error {
+ if child := b.Bucket(k); child != nil {
+ tx.checkBucket(child, reachable, freed, ch)
+ }
+ return nil
+ })
+}
+
+// allocate returns a contiguous block of memory starting at a given page.
+func (tx *Tx) allocate(count int) (*page, error) {
+ p, err := tx.db.allocate(tx.meta.txid, count)
+ if err != nil {
+ return nil, err
+ }
+
+ // Save to our page cache.
+ tx.pages[p.id] = p
+
+ // Update statistics.
+ tx.stats.PageCount += count
+ tx.stats.PageAlloc += count * tx.db.pageSize
+
+ return p, nil
+}
+
+// write writes any dirty pages to disk.
+func (tx *Tx) write() error {
+ // Sort pages by id.
+ pages := make(pages, 0, len(tx.pages))
+ for _, p := range tx.pages {
+ pages = append(pages, p)
+ }
+ // Clear out page cache early.
+ tx.pages = make(map[pgid]*page)
+ sort.Sort(pages)
+
+ // Write pages to disk in order.
+ for _, p := range pages {
+ size := (int(p.overflow) + 1) * tx.db.pageSize
+ offset := int64(p.id) * int64(tx.db.pageSize)
+
+ // Write out page in "max allocation" sized chunks.
+ ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
+ for {
+ // Limit our write to our max allocation size.
+ sz := size
+ if sz > maxAllocSize-1 {
+ sz = maxAllocSize - 1
+ }
+
+ // Write chunk to disk.
+ buf := ptr[:sz]
+ if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
+ return err
+ }
+
+ // Update statistics.
+ tx.stats.Write++
+
+ // Exit inner for loop if we've written all the chunks.
+ size -= sz
+ if size == 0 {
+ break
+ }
+
+ // Otherwise move offset forward and move pointer to next chunk.
+ offset += int64(sz)
+ ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
+ }
+ }
+
+ // Ignore file sync if flag is set on DB.
+ if !tx.db.NoSync || IgnoreNoSync {
+ if err := fdatasync(tx.db); err != nil {
+ return err
+ }
+ }
+
+ // Put small pages back to page pool.
+ for _, p := range pages {
+ // Ignore page sizes over 1 page.
+ // These are allocated using make() instead of the page pool.
+ if int(p.overflow) != 0 {
+ continue
+ }
+
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
+
+ // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
+ for i := range buf {
+ buf[i] = 0
+ }
+ tx.db.pagePool.Put(buf)
+ }
+
+ return nil
+}
+
+// writeMeta writes the meta to the disk.
+func (tx *Tx) writeMeta() error {
+ // Create a temporary buffer for the meta page.
+ buf := make([]byte, tx.db.pageSize)
+ p := tx.db.pageInBuffer(buf, 0)
+ tx.meta.write(p)
+
+ // Write the meta page to file.
+ if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
+ return err
+ }
+ if !tx.db.NoSync || IgnoreNoSync {
+ if err := fdatasync(tx.db); err != nil {
+ return err
+ }
+ }
+
+ // Update statistics.
+ tx.stats.Write++
+
+ return nil
+}
+
+// page returns a reference to the page with a given id.
+// If page has been written to then a temporary buffered page is returned.
+func (tx *Tx) page(id pgid) *page {
+ // Check the dirty pages first.
+ if tx.pages != nil {
+ if p, ok := tx.pages[id]; ok {
+ return p
+ }
+ }
+
+ // Otherwise return directly from the mmap.
+ return tx.db.page(id)
+}
+
+// forEachPage iterates over every page within a given page and executes a function.
+func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
+ p := tx.page(pgid)
+
+ // Execute function.
+ fn(p, depth)
+
+ // Recursively loop over children.
+ if (p.flags & branchPageFlag) != 0 {
+ for i := 0; i < int(p.count); i++ {
+ elem := p.branchPageElement(uint16(i))
+ tx.forEachPage(elem.pgid, depth+1, fn)
+ }
+ }
+}
+
+// Page returns page information for a given page number.
+// This is only safe for concurrent use when used by a writable transaction.
+func (tx *Tx) Page(id int) (*PageInfo, error) {
+ if tx.db == nil {
+ return nil, ErrTxClosed
+ } else if pgid(id) >= tx.meta.pgid {
+ return nil, nil
+ }
+
+ // Build the page info.
+ p := tx.db.page(pgid(id))
+ info := &PageInfo{
+ ID: id,
+ Count: int(p.count),
+ OverflowCount: int(p.overflow),
+ }
+
+ // Determine the type (or if it's free).
+ if tx.db.freelist.freed(pgid(id)) {
+ info.Type = "free"
+ } else {
+ info.Type = p.typ()
+ }
+
+ return info, nil
+}
+
+// TxStats represents statistics about the actions performed by the transaction.
+type TxStats struct {
+ // Page statistics.
+ PageCount int // number of page allocations
+ PageAlloc int // total bytes allocated
+
+ // Cursor statistics.
+ CursorCount int // number of cursors created
+
+ // Node statistics
+ NodeCount int // number of node allocations
+ NodeDeref int // number of node dereferences
+
+ // Rebalance statistics.
+ Rebalance int // number of node rebalances
+ RebalanceTime time.Duration // total time spent rebalancing
+
+ // Split/Spill statistics.
+ Split int // number of nodes split
+ Spill int // number of nodes spilled
+ SpillTime time.Duration // total time spent spilling
+
+ // Write statistics.
+ Write int // number of writes performed
+ WriteTime time.Duration // total time spent writing to disk
+}
+
+func (s *TxStats) add(other *TxStats) {
+ s.PageCount += other.PageCount
+ s.PageAlloc += other.PageAlloc
+ s.CursorCount += other.CursorCount
+ s.NodeCount += other.NodeCount
+ s.NodeDeref += other.NodeDeref
+ s.Rebalance += other.Rebalance
+ s.RebalanceTime += other.RebalanceTime
+ s.Split += other.Split
+ s.Spill += other.Spill
+ s.SpillTime += other.SpillTime
+ s.Write += other.Write
+ s.WriteTime += other.WriteTime
+}
+
+// Sub calculates and returns the difference between two sets of transaction stats.
+// This is useful when obtaining stats at two different points and time and
+// you need the performance counters that occurred within that time span.
+func (s *TxStats) Sub(other *TxStats) TxStats {
+ var diff TxStats
+ diff.PageCount = s.PageCount - other.PageCount
+ diff.PageAlloc = s.PageAlloc - other.PageAlloc
+ diff.CursorCount = s.CursorCount - other.CursorCount
+ diff.NodeCount = s.NodeCount - other.NodeCount
+ diff.NodeDeref = s.NodeDeref - other.NodeDeref
+ diff.Rebalance = s.Rebalance - other.Rebalance
+ diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
+ diff.Split = s.Split - other.Split
+ diff.Spill = s.Spill - other.Spill
+ diff.SpillTime = s.SpillTime - other.SpillTime
+ diff.Write = s.Write - other.Write
+ diff.WriteTime = s.WriteTime - other.WriteTime
+ return diff
+}
diff --git a/vendor/github.com/dgraph-io/badger/.gitignore b/vendor/github.com/dgraph-io/badger/.gitignore
new file mode 100644
index 000000000..e3efdf58f
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/.gitignore
@@ -0,0 +1,2 @@
+p/
+badger-test*/
diff --git a/vendor/github.com/dgraph-io/badger/.golangci.yml b/vendor/github.com/dgraph-io/badger/.golangci.yml
new file mode 100644
index 000000000..fecb8644b
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/.golangci.yml
@@ -0,0 +1,27 @@
+run:
+ tests: false
+
+linters-settings:
+ lll:
+ line-length: 100
+
+linters:
+ disable-all: true
+ enable:
+ - errcheck
+ - ineffassign
+ - gas
+ - gofmt
+ - golint
+ - gosimple
+ - govet
+ - lll
+ - varcheck
+ - unused
+
+issues:
+ exclude-rules:
+ - linters:
+ - gosec
+ text: "G404: "
+
\ No newline at end of file
diff --git a/vendor/github.com/dgraph-io/badger/.travis.yml b/vendor/github.com/dgraph-io/badger/.travis.yml
new file mode 100644
index 000000000..7c58e56d2
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/.travis.yml
@@ -0,0 +1,24 @@
+language: go
+
+go:
+ - "1.11"
+ - "1.12"
+
+matrix:
+ include:
+ - os: osx
+notifications:
+ email: false
+ slack:
+ secure: X7uBLWYbuUhf8QFE16CoS5z7WvFR8EN9j6cEectMW6mKZ3vwXGwVXRIPsgUq/606DsQdCCx34MR8MRWYGlu6TBolbSe9y0EP0i46yipPz22YtuT7umcVUbGEyx8MZKgG0v1u/zA0O4aCsOBpGAA3gxz8h3JlEHDt+hv6U8xRsSllVLzLSNb5lwxDtcfEDxVVqP47GMEgjLPM28Pyt5qwjk7o5a4YSVzkfdxBXxd3gWzFUWzJ5E3cTacli50dK4GVfiLcQY2aQYoYO7AAvDnvP+TPfjDkBlUEE4MUz5CDIN51Xb+WW33sX7g+r3Bj7V5IRcF973RiYkpEh+3eoiPnyWyxhDZBYilty3b+Hysp6d4Ov/3I3ll7Bcny5+cYjakjkMH3l9w3gs6Y82GlpSLSJshKWS8vPRsxFe0Pstj6QSJXTd9EBaFr+l1ScXjJv/Sya9j8N9FfTuOTESWuaL1auX4Y7zEEVHlA8SCNOO8K0eTfxGZnC/YcIHsR8rePEAcFxfOYQppkyLF/XvAtnb/LMUuu0g4y2qNdme6Oelvyar1tFEMRtbl4mRCdu/krXBFtkrsfUaVY6WTPdvXAGotsFJ0wuA53zGVhlcd3+xAlSlR3c1QX95HIMeivJKb5L4nTjP+xnrmQNtnVk+tG4LSH2ltuwcZSSczModtcBmRefrk=
+
+env:
+ global:
+ - secure: CRkV2+/jlO0gXzzS50XGxfMS117FNwiVjxNY/LeWq06RKD+dDCPxTJl3JCNe3l0cYEPAglV2uMMYukDiTqJ7e+HI4nh4N4mv6lwx39N8dAvJe1x5ITS2T4qk4kTjuQb1Q1vw/ZOxoQqmvNKj2uRmBdJ/HHmysbRJ1OzCWML3OXdUwJf0AYlJzTjpMfkOKr7sTtE4rwyyQtd4tKH1fGdurgI9ZuFd9qvYxK2qcJhsQ6CNqMXt+7FkVkN1rIPmofjjBTNryzUr4COFXuWH95aDAif19DeBW4lbNgo1+FpDsrgmqtuhl6NAuptI8q/imow2KXBYJ8JPXsxW8DVFj0IIp0RCd3GjaEnwBEbxAyiIHLfW7AudyTS/dJOvZffPqXnuJ8xj3OPIdNe4xY0hWl8Ju2HhKfLOAHq7VadHZWd3IHLil70EiL4/JLD1rNbMImUZisFaA8pyrcIvYYebjOnk4TscwKFLedClRSX1XsMjWWd0oykQtrdkHM2IxknnBpaLu7mFnfE07f6dkG0nlpyu4SCLey7hr5FdcEmljA0nIxTSYDg6035fQkBEAbe7hlESOekkVNT9IZPwG+lmt3vU4ofi6NqNbJecOuSB+h36IiZ9s4YQtxYNnLgW14zjuFGGyT5smc3IjBT7qngDjKIgyrSVoRkY/8udy9qbUgvBeW8=
+
+before_script:
+- go get github.com/mattn/goveralls
+script:
+- bash contrib/cover.sh $HOME/build coverage.out || travis_terminate 1
+- goveralls -service=travis-ci -coverprofile=coverage.out || true
+- goveralls -coverprofile=coverage.out -service=travis-ci
diff --git a/vendor/github.com/dgraph-io/badger/CHANGELOG.md b/vendor/github.com/dgraph-io/badger/CHANGELOG.md
new file mode 100644
index 000000000..e381a4b7c
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/CHANGELOG.md
@@ -0,0 +1,190 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Serialization Versioning](VERSIONING.md).
+
+## [Unreleased]
+
+## [1.6.0] - 2019-07-01
+
+This is a release including almost 200 commits, so expect many changes - some of them
+not backward compatible.
+
+Regarding backward compatibility in Badger versions, you might be interested on reading
+[VERSIONING.md](VERSIONING.md).
+
+_Note_: The hashes in parentheses correspond to the commits that impacted the given feature.
+
+### New APIs
+
+- badger.DB
+ - DropPrefix (291295e)
+ - Flatten (7e41bba)
+ - KeySplits (4751ef1)
+ - MaxBatchCount (b65e2a3)
+ - MaxBatchSize (b65e2a3)
+ - PrintKeyValueHistogram (fd59907)
+ - Subscribe (26128a7)
+ - Sync (851e462)
+
+- badger.DefaultOptions() and badger.LSMOnlyOptions() (91ce687)
+ - badger.Options.WithX methods
+
+- badger.Entry (e9447c9)
+ - NewEntry
+ - WithMeta
+ - WithDiscard
+ - WithTTL
+
+- badger.Item
+ - KeySize (fd59907)
+ - ValueSize (5242a99)
+
+- badger.IteratorOptions
+ - PickTable (7d46029, 49a49e3)
+ - Prefix (7d46029)
+
+- badger.Logger (fbb2778)
+
+- badger.Options
+ - CompactL0OnClose (7e41bba)
+ - Logger (3f66663)
+ - LogRotatesToFlush (2237832)
+
+- badger.Stream (14cbd89, 3258067)
+- badger.StreamWriter (7116e16)
+- badger.TableInfo.KeyCount (fd59907)
+- badger.TableManifest (2017987)
+- badger.Tx.NewKeyIterator (49a49e3)
+- badger.WriteBatch (6daccf9, 7e78e80)
+
+### Modified APIs
+
+#### Breaking changes:
+
+- badger.DefaultOptions and badger.LSMOnlyOptions are now functions rather than variables (91ce687)
+- badger.Item.Value now receives a function that returns an error (439fd46)
+- badger.Txn.Commit doesn't receive any params now (6daccf9)
+- badger.DB.Tables now receives a boolean (76b5341)
+
+#### Not breaking changes:
+
+- badger.LSMOptions changed values (799c33f)
+- badger.DB.NewIterator now allows multiple iterators per RO txn (41d9656)
+- badger.Options.TableLoadingMode's new default is options.MemoryMap (6b97bac)
+
+### Removed APIs
+
+- badger.ManagedDB (d22c0e8)
+- badger.Options.DoNotCompact (7e41bba)
+- badger.Txn.SetWithX (e9447c9)
+
+### Tools:
+
+- badger bank disect (13db058)
+- badger bank test (13db058) --mmap (03870e3)
+- badger fill (7e41bba)
+- badger flatten (7e41bba)
+- badger info --histogram (fd59907) --history --lookup --show-keys --show-meta --with-prefix (09e9b63) --show-internal (fb2eed9)
+- badger benchmark read (239041e)
+- badger benchmark write (6d3b67d)
+
+## [1.5.5] - 2019-06-20
+
+* Introduce support for Go Modules
+
+## [1.5.3] - 2018-07-11
+Bug Fixes:
+* Fix a panic caused due to item.vptr not copying over vs.Value, when looking
+ for a move key.
+
+## [1.5.2] - 2018-06-19
+Bug Fixes:
+* Fix the way move key gets generated.
+* If a transaction has unclosed, or multiple iterators running simultaneously,
+ throw a panic. Every iterator must be properly closed. At any point in time,
+ only one iterator per transaction can be running. This is to avoid bugs in a
+ transaction data structure which is thread unsafe.
+
+* *Warning: This change might cause panics in user code. Fix is to properly
+ close your iterators, and only have one running at a time per transaction.*
+
+## [1.5.1] - 2018-06-04
+Bug Fixes:
+* Fix for infinite yieldItemValue recursion. #503
+* Fix recursive addition of `badgerMove` prefix. https://github.com/dgraph-io/badger/commit/2e3a32f0ccac3066fb4206b28deb39c210c5266f
+* Use file size based window size for sampling, instead of fixing it to 10MB. #501
+
+Cleanup:
+* Clarify comments and documentation.
+* Move badger tool one directory level up.
+
+## [1.5.0] - 2018-05-08
+* Introduce `NumVersionsToKeep` option. This option is used to discard many
+ versions of the same key, which saves space.
+* Add a new `SetWithDiscard` method, which would indicate that all the older
+ versions of the key are now invalid. Those versions would be discarded during
+ compactions.
+* Value log GC moves are now bound to another keyspace to ensure latest versions
+ of data are always at the top in LSM tree.
+* Introduce `ValueLogMaxEntries` to restrict the number of key-value pairs per
+ value log file. This helps bound the time it takes to garbage collect one
+ file.
+
+## [1.4.0] - 2018-05-04
+* Make mmap-ing of value log optional.
+* Run GC multiple times, based on recorded discard statistics.
+* Add MergeOperator.
+* Force compact L0 on clsoe (#439).
+* Add truncate option to warn about data loss (#452).
+* Discard key versions during compaction (#464).
+* Introduce new `LSMOnlyOptions`, to make Badger act like a typical LSM based DB.
+
+Bug fix:
+* (Temporary) Check max version across all tables in Get (removed in next
+ release).
+* Update commit and read ts while loading from backup.
+* Ensure all transaction entries are part of the same value log file.
+* On commit, run unlock callbacks before doing writes (#413).
+* Wait for goroutines to finish before closing iterators (#421).
+
+## [1.3.0] - 2017-12-12
+* Add `DB.NextSequence()` method to generate monotonically increasing integer
+ sequences.
+* Add `DB.Size()` method to return the size of LSM and value log files.
+* Tweaked mmap code to make Windows 32-bit builds work.
+* Tweaked build tags on some files to make iOS builds work.
+* Fix `DB.PurgeOlderVersions()` to not violate some constraints.
+
+## [1.2.0] - 2017-11-30
+* Expose a `Txn.SetEntry()` method to allow setting the key-value pair
+ and all the metadata at the same time.
+
+## [1.1.1] - 2017-11-28
+* Fix bug where txn.Get was returing key deleted in same transaction.
+* Fix race condition while decrementing reference in oracle.
+* Update doneCommit in the callback for CommitAsync.
+* Iterator see writes of current txn.
+
+## [1.1.0] - 2017-11-13
+* Create Badger directory if it does not exist when `badger.Open` is called.
+* Added `Item.ValueCopy()` to avoid deadlocks in long-running iterations
+* Fixed 64-bit alignment issues to make Badger run on Arm v7
+
+## [1.0.1] - 2017-11-06
+* Fix an uint16 overflow when resizing key slice
+
+[Unreleased]: https://github.com/dgraph-io/badger/compare/v1.6.0...HEAD
+[1.6.0]: https://github.com/dgraph-io/badger/compare/v1.5.5...v1.6.0
+[1.5.5]: https://github.com/dgraph-io/badger/compare/v1.5.3...v1.5.5
+[1.5.3]: https://github.com/dgraph-io/badger/compare/v1.5.2...v1.5.3
+[1.5.2]: https://github.com/dgraph-io/badger/compare/v1.5.1...v1.5.2
+[1.5.1]: https://github.com/dgraph-io/badger/compare/v1.5.0...v1.5.1
+[1.5.0]: https://github.com/dgraph-io/badger/compare/v1.4.0...v1.5.0
+[1.4.0]: https://github.com/dgraph-io/badger/compare/v1.3.0...v1.4.0
+[1.3.0]: https://github.com/dgraph-io/badger/compare/v1.2.0...v1.3.0
+[1.2.0]: https://github.com/dgraph-io/badger/compare/v1.1.1...v1.2.0
+[1.1.1]: https://github.com/dgraph-io/badger/compare/v1.1.0...v1.1.1
+[1.1.0]: https://github.com/dgraph-io/badger/compare/v1.0.1...v1.1.0
+[1.0.1]: https://github.com/dgraph-io/badger/compare/v1.0.0...v1.0.1
diff --git a/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md b/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..bf7bbc29d
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md
@@ -0,0 +1,5 @@
+# Code of Conduct
+
+Our Code of Conduct can be found here:
+
+https://dgraph.io/conduct
diff --git a/vendor/github.com/dgraph-io/badger/LICENSE b/vendor/github.com/dgraph-io/badger/LICENSE
new file mode 100644
index 000000000..d9a10c0d8
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/LICENSE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/vendor/github.com/dgraph-io/badger/README.md b/vendor/github.com/dgraph-io/badger/README.md
new file mode 100644
index 000000000..fe033d9cb
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/README.md
@@ -0,0 +1,859 @@
+# BadgerDB [](https://godoc.org/github.com/dgraph-io/badger) [](https://goreportcard.com/report/github.com/dgraph-io/badger) [](https://sourcegraph.com/github.com/dgraph-io/badger?badge) [/statusIcon.svg)](https://teamcity.dgraph.io/viewLog.html?buildTypeId=Badger_UnitTests&buildId=lastFinished&guest=1)  [](https://coveralls.io/github/dgraph-io/badger?branch=master)
+
+
+
+BadgerDB is an embeddable, persistent and fast key-value (KV) database
+written in pure Go. It's meant to be a performant alternative to non-Go-based
+key-value stores like [RocksDB](https://github.com/facebook/rocksdb).
+
+## Project Status [Jun 26, 2019]
+
+Badger is stable and is being used to serve data sets worth hundreds of
+terabytes. Badger supports concurrent ACID transactions with serializable
+snapshot isolation (SSI) guarantees. A Jepsen-style bank test runs nightly for
+8h, with `--race` flag and ensures maintainance of transactional guarantees.
+Badger has also been tested to work with filesystem level anomalies, to ensure
+persistence and consistency.
+
+Badger v1.0 was released in Nov 2017, and the latest version that is data-compatible
+with v1.0 is v1.6.0.
+
+Badger v2.0, a new release coming up very soon will use a new storage format which won't
+be compatible with all of the v1.x. The [Changelog] is kept fairly up-to-date.
+
+For more details on our version naming schema please read [Choosing a version](#choosing-a-version).
+
+[Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md
+
+## Table of Contents
+ * [Getting Started](#getting-started)
+ + [Installing](#installing)
+ - [Choosing a version](#choosing-a-version)
+ + [Opening a database](#opening-a-database)
+ + [Transactions](#transactions)
+ - [Read-only transactions](#read-only-transactions)
+ - [Read-write transactions](#read-write-transactions)
+ - [Managing transactions manually](#managing-transactions-manually)
+ + [Using key/value pairs](#using-keyvalue-pairs)
+ + [Monotonically increasing integers](#monotonically-increasing-integers)
+ * [Merge Operations](#merge-operations)
+ + [Setting Time To Live(TTL) and User Metadata on Keys](#setting-time-to-livettl-and-user-metadata-on-keys)
+ + [Iterating over keys](#iterating-over-keys)
+ - [Prefix scans](#prefix-scans)
+ - [Key-only iteration](#key-only-iteration)
+ + [Stream](#stream)
+ + [Garbage Collection](#garbage-collection)
+ + [Database backup](#database-backup)
+ + [Memory usage](#memory-usage)
+ + [Statistics](#statistics)
+ * [Resources](#resources)
+ + [Blog Posts](#blog-posts)
+ * [Contact](#contact)
+ * [Design](#design)
+ + [Comparisons](#comparisons)
+ + [Benchmarks](#benchmarks)
+ * [Other Projects Using Badger](#other-projects-using-badger)
+ * [Frequently Asked Questions](#frequently-asked-questions)
+
+## Getting Started
+
+### Installing
+To start using Badger, install Go 1.11 or above and run `go get`:
+
+```sh
+$ go get github.com/dgraph-io/badger/...
+```
+
+This will retrieve the library and install the `badger` command line
+utility into your `$GOBIN` path.
+
+#### Choosing a version
+
+BadgerDB is a pretty special package from the point of view that the most important change we can
+make to it is not on its API but rather on how data is stored on disk.
+
+This is why we follow a version naming schema that differs from Semantic Versioning.
+
+- New major versions are released when the data format on disk changes in an incompatible way.
+- New minor versions are released whenever the API changes but data compatibility is maintained.
+ Note that the changes on the API could be backward-incompatible - unlike Semantic Versioning.
+- New patch versions are released when there's no changes to the data format nor the API.
+
+Following these rules:
+
+- v1.5.0 and v1.6.0 can be used on top of the same files without any concerns, as their major
+ version is the same, therefore the data format on disk is compatible.
+- v1.6.0 and v2.0.0 are data incompatible as their major version implies, so files created with
+ v1.6.0 will need to be converted into the new format before they can be used by v2.0.0.
+
+For a longer explanation on the reasons behind using a new versioning naming schema, you can read
+[VERSIONING.md](VERSIONING.md).
+
+### Opening a database
+The top-level object in Badger is a `DB`. It represents multiple files on disk
+in specific directories, which contain the data for a single database.
+
+To open your database, use the `badger.Open()` function, with the appropriate
+options. The `Dir` and `ValueDir` options are mandatory and must be
+specified by the client. They can be set to the same value to simplify things.
+
+```go
+package main
+
+import (
+ "log"
+
+ badger "github.com/dgraph-io/badger"
+)
+
+func main() {
+ // Open the Badger database located in the /tmp/badger directory.
+ // It will be created if it doesn't exist.
+ db, err := badger.Open(badger.DefaultOptions("tmp/badger"))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+ // Your code here…
+}
+```
+
+Please note that Badger obtains a lock on the directories so multiple processes
+cannot open the same database at the same time.
+
+### Transactions
+
+#### Read-only transactions
+To start a read-only transaction, you can use the `DB.View()` method:
+
+```go
+err := db.View(func(txn *badger.Txn) error {
+ // Your code here…
+ return nil
+})
+```
+
+You cannot perform any writes or deletes within this transaction. Badger
+ensures that you get a consistent view of the database within this closure. Any
+writes that happen elsewhere after the transaction has started, will not be
+seen by calls made within the closure.
+
+#### Read-write transactions
+To start a read-write transaction, you can use the `DB.Update()` method:
+
+```go
+err := db.Update(func(txn *badger.Txn) error {
+ // Your code here…
+ return nil
+})
+```
+
+All database operations are allowed inside a read-write transaction.
+
+Always check the returned error value. If you return an error
+within your closure it will be passed through.
+
+An `ErrConflict` error will be reported in case of a conflict. Depending on the state
+of your application, you have the option to retry the operation if you receive
+this error.
+
+An `ErrTxnTooBig` will be reported in case the number of pending writes/deletes in
+the transaction exceed a certain limit. In that case, it is best to commit the
+transaction and start a new transaction immediately. Here is an example (we are
+not checking for errors in some places for simplicity):
+
+```go
+updates := make(map[string]string)
+txn := db.NewTransaction(true)
+for k,v := range updates {
+ if err := txn.Set([]byte(k),[]byte(v)); err == ErrTxnTooBig {
+ _ = txn.Commit()
+ txn = db.NewTransaction(true)
+ _ = txn.Set([]byte(k),[]byte(v))
+ }
+}
+_ = txn.Commit()
+```
+
+#### Managing transactions manually
+The `DB.View()` and `DB.Update()` methods are wrappers around the
+`DB.NewTransaction()` and `Txn.Commit()` methods (or `Txn.Discard()` in case of
+read-only transactions). These helper methods will start the transaction,
+execute a function, and then safely discard your transaction if an error is
+returned. This is the recommended way to use Badger transactions.
+
+However, sometimes you may want to manually create and commit your
+transactions. You can use the `DB.NewTransaction()` function directly, which
+takes in a boolean argument to specify whether a read-write transaction is
+required. For read-write transactions, it is necessary to call `Txn.Commit()`
+to ensure the transaction is committed. For read-only transactions, calling
+`Txn.Discard()` is sufficient. `Txn.Commit()` also calls `Txn.Discard()`
+internally to cleanup the transaction, so just calling `Txn.Commit()` is
+sufficient for read-write transaction. However, if your code doesn’t call
+`Txn.Commit()` for some reason (for e.g it returns prematurely with an error),
+then please make sure you call `Txn.Discard()` in a `defer` block. Refer to the
+code below.
+
+```go
+// Start a writable transaction.
+txn := db.NewTransaction(true)
+defer txn.Discard()
+
+// Use the transaction...
+err := txn.Set([]byte("answer"), []byte("42"))
+if err != nil {
+ return err
+}
+
+// Commit the transaction and check for error.
+if err := txn.Commit(); err != nil {
+ return err
+}
+```
+
+The first argument to `DB.NewTransaction()` is a boolean stating if the transaction
+should be writable.
+
+Badger allows an optional callback to the `Txn.Commit()` method. Normally, the
+callback can be set to `nil`, and the method will return after all the writes
+have succeeded. However, if this callback is provided, the `Txn.Commit()`
+method returns as soon as it has checked for any conflicts. The actual writing
+to the disk happens asynchronously, and the callback is invoked once the
+writing has finished, or an error has occurred. This can improve the throughput
+of the application in some cases. But it also means that a transaction is not
+durable until the callback has been invoked with a `nil` error value.
+
+### Using key/value pairs
+To save a key/value pair, use the `Txn.Set()` method:
+
+```go
+err := db.Update(func(txn *badger.Txn) error {
+ err := txn.Set([]byte("answer"), []byte("42"))
+ return err
+})
+```
+
+Key/Value pair can also be saved by first creating `Entry`, then setting this
+`Entry` using `Txn.SetEntry()`. `Entry` also exposes methods to set properties
+on it.
+
+```go
+err := db.Update(func(txn *badger.Txn) error {
+ e := NewEntry([]byte("answer"), []byte("42"))
+ err := txn.SetEntry(e)
+ return err
+})
+```
+
+This will set the value of the `"answer"` key to `"42"`. To retrieve this
+value, we can use the `Txn.Get()` method:
+
+```go
+err := db.View(func(txn *badger.Txn) error {
+ item, err := txn.Get([]byte("answer"))
+ handle(err)
+
+ var valNot, valCopy []byte
+ err := item.Value(func(val []byte) error {
+ // This func with val would only be called if item.Value encounters no error.
+
+ // Accessing val here is valid.
+ fmt.Printf("The answer is: %s\n", val)
+
+ // Copying or parsing val is valid.
+ valCopy = append([]byte{}, val...)
+
+ // Assigning val slice to another variable is NOT OK.
+ valNot = val // Do not do this.
+ return nil
+ })
+ handle(err)
+
+ // DO NOT access val here. It is the most common cause of bugs.
+ fmt.Printf("NEVER do this. %s\n", valNot)
+
+ // You must copy it to use it outside item.Value(...).
+ fmt.Printf("The answer is: %s\n", valCopy)
+
+ // Alternatively, you could also use item.ValueCopy().
+ valCopy, err = item.ValueCopy(nil)
+ handle(err)
+ fmt.Printf("The answer is: %s\n", valCopy)
+
+ return nil
+})
+```
+
+`Txn.Get()` returns `ErrKeyNotFound` if the value is not found.
+
+Please note that values returned from `Get()` are only valid while the
+transaction is open. If you need to use a value outside of the transaction
+then you must use `copy()` to copy it to another byte slice.
+
+Use the `Txn.Delete()` method to delete a key.
+
+### Monotonically increasing integers
+
+To get unique monotonically increasing integers with strong durability, you can
+use the `DB.GetSequence` method. This method returns a `Sequence` object, which
+is thread-safe and can be used concurrently via various goroutines.
+
+Badger would lease a range of integers to hand out from memory, with the
+bandwidth provided to `DB.GetSequence`. The frequency at which disk writes are
+done is determined by this lease bandwidth and the frequency of `Next`
+invocations. Setting a bandwith too low would do more disk writes, setting it
+too high would result in wasted integers if Badger is closed or crashes.
+To avoid wasted integers, call `Release` before closing Badger.
+
+```go
+seq, err := db.GetSequence(key, 1000)
+defer seq.Release()
+for {
+ num, err := seq.Next()
+}
+```
+
+### Merge Operations
+Badger provides support for ordered merge operations. You can define a func
+of type `MergeFunc` which takes in an existing value, and a value to be
+_merged_ with it. It returns a new value which is the result of the _merge_
+operation. All values are specified in byte arrays. For e.g., here is a merge
+function (`add`) which appends a `[]byte` value to an existing `[]byte` value.
+
+```Go
+// Merge function to append one byte slice to another
+func add(originalValue, newValue []byte) []byte {
+ return append(originalValue, newValue...)
+}
+```
+
+This function can then be passed to the `DB.GetMergeOperator()` method, along
+with a key, and a duration value. The duration specifies how often the merge
+function is run on values that have been added using the `MergeOperator.Add()`
+method.
+
+`MergeOperator.Get()` method can be used to retrieve the cumulative value of the key
+associated with the merge operation.
+
+```Go
+key := []byte("merge")
+
+m := db.GetMergeOperator(key, add, 200*time.Millisecond)
+defer m.Stop()
+
+m.Add([]byte("A"))
+m.Add([]byte("B"))
+m.Add([]byte("C"))
+
+res, _ := m.Get() // res should have value ABC encoded
+```
+
+Example: Merge operator which increments a counter
+
+```Go
+func uint64ToBytes(i uint64) []byte {
+ var buf [8]byte
+ binary.BigEndian.PutUint64(buf[:], i)
+ return buf[:]
+}
+
+func bytesToUint64(b []byte) uint64 {
+ return binary.BigEndian.Uint64(b)
+}
+
+// Merge function to add two uint64 numbers
+func add(existing, new []byte) []byte {
+ return uint64ToBytes(bytesToUint64(existing) + bytesToUint64(new))
+}
+```
+It can be used as
+```Go
+key := []byte("merge")
+
+m := db.GetMergeOperator(key, add, 200*time.Millisecond)
+defer m.Stop()
+
+m.Add(uint64ToBytes(1))
+m.Add(uint64ToBytes(2))
+m.Add(uint64ToBytes(3))
+
+res, _ := m.Get() // res should have value 6 encoded
+```
+
+### Setting Time To Live(TTL) and User Metadata on Keys
+Badger allows setting an optional Time to Live (TTL) value on keys. Once the TTL has
+elapsed, the key will no longer be retrievable and will be eligible for garbage
+collection. A TTL can be set as a `time.Duration` value using the `Entry.WithTTL()`
+and `Txn.SetEntry()` API methods.
+
+```go
+err := db.Update(func(txn *badger.Txn) error {
+ e := NewEntry([]byte("answer"), []byte("42")).WithTTL(time.Hour)
+ err := txn.SetEntry(e)
+ return err
+})
+```
+
+An optional user metadata value can be set on each key. A user metadata value
+is represented by a single byte. It can be used to set certain bits along
+with the key to aid in interpreting or decoding the key-value pair. User
+metadata can be set using `Entry.WithMeta()` and `Txn.SetEntry()` API methods.
+
+```go
+err := db.Update(func(txn *badger.Txn) error {
+ e := NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1))
+ err := txn.SetEntry(e)
+ return err
+})
+```
+
+`Entry` APIs can be used to add the user metadata and TTL for same key. This `Entry`
+then can be set using `Txn.SetEntry()`.
+
+```go
+err := db.Update(func(txn *badger.Txn) error {
+ e := NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1)).WithTTL(time.Hour)
+ err := txn.SetEntry(e)
+ return err
+})
+```
+
+### Iterating over keys
+To iterate over keys, we can use an `Iterator`, which can be obtained using the
+`Txn.NewIterator()` method. Iteration happens in byte-wise lexicographical sorting
+order.
+
+
+```go
+err := db.View(func(txn *badger.Txn) error {
+ opts := badger.DefaultIteratorOptions
+ opts.PrefetchSize = 10
+ it := txn.NewIterator(opts)
+ defer it.Close()
+ for it.Rewind(); it.Valid(); it.Next() {
+ item := it.Item()
+ k := item.Key()
+ err := item.Value(func(v []byte) error {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+})
+```
+
+The iterator allows you to move to a specific point in the list of keys and move
+forward or backward through the keys one at a time.
+
+By default, Badger prefetches the values of the next 100 items. You can adjust
+that with the `IteratorOptions.PrefetchSize` field. However, setting it to
+a value higher than GOMAXPROCS (which we recommend to be 128 or higher)
+shouldn’t give any additional benefits. You can also turn off the fetching of
+values altogether. See section below on key-only iteration.
+
+#### Prefix scans
+To iterate over a key prefix, you can combine `Seek()` and `ValidForPrefix()`:
+
+```go
+db.View(func(txn *badger.Txn) error {
+ it := txn.NewIterator(badger.DefaultIteratorOptions)
+ defer it.Close()
+ prefix := []byte("1234")
+ for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
+ item := it.Item()
+ k := item.Key()
+ err := item.Value(func(v []byte) error {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+})
+```
+
+#### Key-only iteration
+Badger supports a unique mode of iteration called _key-only_ iteration. It is
+several order of magnitudes faster than regular iteration, because it involves
+access to the LSM-tree only, which is usually resident entirely in RAM. To
+enable key-only iteration, you need to set the `IteratorOptions.PrefetchValues`
+field to `false`. This can also be used to do sparse reads for selected keys
+during an iteration, by calling `item.Value()` only when required.
+
+```go
+err := db.View(func(txn *badger.Txn) error {
+ opts := badger.DefaultIteratorOptions
+ opts.PrefetchValues = false
+ it := txn.NewIterator(opts)
+ defer it.Close()
+ for it.Rewind(); it.Valid(); it.Next() {
+ item := it.Item()
+ k := item.Key()
+ fmt.Printf("key=%s\n", k)
+ }
+ return nil
+})
+```
+
+### Stream
+Badger provides a Stream framework, which concurrently iterates over all or a
+portion of the DB, converting data into custom key-values, and streams it out
+serially to be sent over network, written to disk, or even written back to
+Badger. This is a lot faster way to iterate over Badger than using a single
+Iterator. Stream supports Badger in both managed and normal mode.
+
+Stream uses the natural boundaries created by SSTables within the LSM tree, to
+quickly generate key ranges. Each goroutine then picks a range and runs an
+iterator to iterate over it. Each iterator iterates over all versions of values
+and is created from the same transaction, thus working over a snapshot of the
+DB. Every time a new key is encountered, it calls `ChooseKey(item)`, followed
+by `KeyToList(key, itr)`. This allows a user to select or reject that key, and
+if selected, convert the value versions into custom key-values. The goroutine
+batches up 4MB worth of key-values, before sending it over to a channel.
+Another goroutine further batches up data from this channel using *smart
+batching* algorithm and calls `Send` serially.
+
+This framework is designed for high throughput key-value iteration, spreading
+the work of iteration across many goroutines. `DB.Backup` uses this framework to
+provide full and incremental backups quickly. Dgraph is a heavy user of this
+framework. In fact, this framework was developed and used within Dgraph, before
+getting ported over to Badger.
+
+```go
+stream := db.NewStream()
+// db.NewStreamAt(readTs) for managed mode.
+
+// -- Optional settings
+stream.NumGo = 16 // Set number of goroutines to use for iteration.
+stream.Prefix = []byte("some-prefix") // Leave nil for iteration over the whole DB.
+stream.LogPrefix = "Badger.Streaming" // For identifying stream logs. Outputs to Logger.
+
+// ChooseKey is called concurrently for every key. If left nil, assumes true by default.
+stream.ChooseKey = func(item *badger.Item) bool {
+ return bytes.HasSuffix(item.Key(), []byte("er"))
+}
+
+// KeyToList is called concurrently for chosen keys. This can be used to convert
+// Badger data into custom key-values. If nil, uses stream.ToList, a default
+// implementation, which picks all valid key-values.
+stream.KeyToList = nil
+
+// -- End of optional settings.
+
+// Send is called serially, while Stream.Orchestrate is running.
+stream.Send = func(list *pb.KVList) error {
+ return proto.MarshalText(w, list) // Write to w.
+}
+
+// Run the stream
+if err := stream.Orchestrate(context.Background()); err != nil {
+ return err
+}
+// Done.
+```
+
+### Garbage Collection
+Badger values need to be garbage collected, because of two reasons:
+
+* Badger keeps values separately from the LSM tree. This means that the compaction operations
+that clean up the LSM tree do not touch the values at all. Values need to be cleaned up
+separately.
+
+* Concurrent read/write transactions could leave behind multiple values for a single key, because they
+are stored with different versions. These could accumulate, and take up unneeded space beyond the
+time these older versions are needed.
+
+Badger relies on the client to perform garbage collection at a time of their choosing. It provides
+the following method, which can be invoked at an appropriate time:
+
+* `DB.RunValueLogGC()`: This method is designed to do garbage collection while
+ Badger is online. Along with randomly picking a file, it uses statistics generated by the
+ LSM-tree compactions to pick files that are likely to lead to maximum space
+ reclamation. It is recommended to be called during periods of low activity in
+ your system, or periodically. One call would only result in removal of at max
+ one log file. As an optimization, you could also immediately re-run it whenever
+ it returns nil error (indicating a successful value log GC), as shown below.
+
+ ```go
+ ticker := time.NewTicker(5 * time.Minute)
+ defer ticker.Stop()
+ for range ticker.C {
+ again:
+ err := db.RunValueLogGC(0.7)
+ if err == nil {
+ goto again
+ }
+ }
+ ```
+
+* `DB.PurgeOlderVersions()`: This method is **DEPRECATED** since v1.5.0. Now, Badger's LSM tree automatically discards older/invalid versions of keys.
+
+**Note: The RunValueLogGC method would not garbage collect the latest value log.**
+
+### Database backup
+There are two public API methods `DB.Backup()` and `DB.Load()` which can be
+used to do online backups and restores. Badger v0.9 provides a CLI tool
+`badger`, which can do offline backup/restore. Make sure you have `$GOPATH/bin`
+in your PATH to use this tool.
+
+The command below will create a version-agnostic backup of the database, to a
+file `badger.bak` in the current working directory
+
+```
+badger backup --dir
+```
+
+To restore `badger.bak` in the current working directory to a new database:
+
+```
+badger restore --dir
+```
+
+See `badger --help` for more details.
+
+If you have a Badger database that was created using v0.8 (or below), you can
+use the `badger_backup` tool provided in v0.8.1, and then restore it using the
+command above to upgrade your database to work with the latest version.
+
+```
+badger_backup --dir --backup-file badger.bak
+```
+
+We recommend all users to use the `Backup` and `Restore` APIs and tools. However,
+Badger is also rsync-friendly because all files are immutable, barring the
+latest value log which is append-only. So, rsync can be used as rudimentary way
+to perform a backup. In the following script, we repeat rsync to ensure that the
+LSM tree remains consistent with the MANIFEST file while doing a full backup.
+
+```
+#!/bin/bash
+set -o history
+set -o histexpand
+# Makes a complete copy of a Badger database directory.
+# Repeat rsync if the MANIFEST and SSTables are updated.
+rsync -avz --delete db/ dst
+while !! | grep -q "(MANIFEST\|\.sst)$"; do :; done
+```
+
+### Memory usage
+Badger's memory usage can be managed by tweaking several options available in
+the `Options` struct that is passed in when opening the database using
+`DB.Open`.
+
+- `Options.ValueLogLoadingMode` can be set to `options.FileIO` (instead of the
+ default `options.MemoryMap`) to avoid memory-mapping log files. This can be
+ useful in environments with low RAM.
+- Number of memtables (`Options.NumMemtables`)
+ - If you modify `Options.NumMemtables`, also adjust `Options.NumLevelZeroTables` and
+ `Options.NumLevelZeroTablesStall` accordingly.
+- Number of concurrent compactions (`Options.NumCompactors`)
+- Mode in which LSM tree is loaded (`Options.TableLoadingMode`)
+- Size of table (`Options.MaxTableSize`)
+- Size of value log file (`Options.ValueLogFileSize`)
+
+If you want to decrease the memory usage of Badger instance, tweak these
+options (ideally one at a time) until you achieve the desired
+memory usage.
+
+### Statistics
+Badger records metrics using the [expvar] package, which is included in the Go
+standard library. All the metrics are documented in [y/metrics.go][metrics]
+file.
+
+`expvar` package adds a handler in to the default HTTP server (which has to be
+started explicitly), and serves up the metrics at the `/debug/vars` endpoint.
+These metrics can then be collected by a system like [Prometheus], to get
+better visibility into what Badger is doing.
+
+[expvar]: https://golang.org/pkg/expvar/
+[metrics]: https://github.com/dgraph-io/badger/blob/master/y/metrics.go
+[Prometheus]: https://prometheus.io/
+
+## Resources
+
+### Blog Posts
+1. [Introducing Badger: A fast key-value store written natively in
+Go](https://open.dgraph.io/post/badger/)
+2. [Make Badger crash resilient with ALICE](https://blog.dgraph.io/post/alice/)
+3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://blog.dgraph.io/post/badger-lmdb-boltdb/)
+4. [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/)
+
+## Design
+Badger was written with these design goals in mind:
+
+- Write a key-value database in pure Go.
+- Use latest research to build the fastest KV database for data sets spanning terabytes.
+- Optimize for SSDs.
+
+Badger’s design is based on a paper titled _[WiscKey: Separating Keys from
+Values in SSD-conscious Storage][wisckey]_.
+
+[wisckey]: https://www.usenix.org/system/files/conference/fast16/fast16-papers-lu.pdf
+
+### Comparisons
+| Feature | Badger | RocksDB | BoltDB |
+| ------- | ------ | ------- | ------ |
+| Design | LSM tree with value log | LSM tree only | B+ tree |
+| High Read throughput | Yes | No | Yes |
+| High Write throughput | Yes | Yes | No |
+| Designed for SSDs | Yes (with latest research 1) | Not specifically 2 | No |
+| Embeddable | Yes | Yes | Yes |
+| Sorted KV access | Yes | Yes | Yes |
+| Pure Go (no Cgo) | Yes | No | Yes |
+| Transactions | Yes, ACID, concurrent with SSI3 | Yes (but non-ACID) | Yes, ACID |
+| Snapshots | Yes | Yes | Yes |
+| TTL support | Yes | Yes | No |
+| 3D access (key-value-version) | Yes4 | No | No |
+
+1 The [WISCKEY paper][wisckey] (on which Badger is based) saw big
+wins with separating values from keys, significantly reducing the write
+amplification compared to a typical LSM tree.
+
+2 RocksDB is an SSD optimized version of LevelDB, which was designed specifically for rotating disks.
+As such RocksDB's design isn't aimed at SSDs.
+
+3 SSI: Serializable Snapshot Isolation. For more details, see the blog post [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/)
+
+4 Badger provides direct access to value versions via its Iterator API.
+Users can also specify how many versions to keep per key via Options.
+
+### Benchmarks
+We have run comprehensive benchmarks against RocksDB, Bolt and LMDB. The
+benchmarking code, and the detailed logs for the benchmarks can be found in the
+[badger-bench] repo. More explanation, including graphs can be found the blog posts (linked
+above).
+
+[badger-bench]: https://github.com/dgraph-io/badger-bench
+
+## Other Projects Using Badger
+Below is a list of known projects that use Badger:
+
+* [0-stor](https://github.com/zero-os/0-stor) - Single device object store.
+* [Dgraph](https://github.com/dgraph-io/dgraph) - Distributed graph database.
+* [Dispatch Protocol](https://github.com/dispatchlabs/disgo) - Blockchain protocol for distributed application data analytics.
+* [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue.
+* [Usenet Express](https://usenetexpress.com/) - Serving over 300TB of data with Badger.
+* [go-ipfs](https://github.com/ipfs/go-ipfs) - Go client for the InterPlanetary File System (IPFS), a new hypermedia distribution protocol.
+* [gorush](https://github.com/appleboy/gorush) - A push notification server written in Go.
+* [emitter](https://github.com/emitter-io/emitter) - Scalable, low latency, distributed pub/sub broker with message storage, uses MQTT, gossip and badger.
+* [GarageMQ](https://github.com/valinurovam/garagemq) - AMQP server written in Go.
+* [RedixDB](https://alash3al.github.io/redix/) - A real-time persistent key-value store with the same redis protocol.
+* [BBVA](https://github.com/BBVA/raft-badger) - Raft backend implementation using BadgerDB for Hashicorp raft.
+* [Riot](https://github.com/go-ego/riot) - An open-source, distributed search engine.
+* [Fantom](https://github.com/Fantom-foundation/go-lachesis) - aBFT Consensus platform for distributed applications.
+* [decred](https://github.com/decred/dcrdata) - An open, progressive, and self-funding cryptocurrency with a system of community-based governance integrated into its blockchain.
+* [OpenNetSys](https://github.com/opennetsys/c3-go) - Create useful dApps in any software language.
+* [HoneyTrap](https://github.com/honeytrap/honeytrap) - An extensible and opensource system for running, monitoring and managing honeypots.
+* [Insolar](https://github.com/insolar/insolar) - Enterprise-ready blockchain platform.
+* [IoTeX](https://github.com/iotexproject/iotex-core) - The next generation of the decentralized network for IoT powered by scalability- and privacy-centric blockchains.
+* [go-sessions](https://github.com/kataras/go-sessions) - The sessions manager for Go net/http and fasthttp.
+* [Babble](https://github.com/mosaicnetworks/babble) - BFT Consensus platform for distributed applications.
+* [Tormenta](https://github.com/jpincas/tormenta) - Embedded object-persistence layer / simple JSON database for Go projects.
+* [BadgerHold](https://github.com/timshannon/badgerhold) - An embeddable NoSQL store for querying Go types built on Badger
+* [Goblero](https://github.com/didil/goblero) - Pure Go embedded persistent job queue backed by BadgerDB
+* [Surfline](https://www.surfline.com) - Serving global wave and weather forecast data with Badger.
+* [Cete](https://github.com/mosuka/cete) - Simple and highly available distributed key-value store built on Badger. Makes it easy bringing up a cluster of Badger with Raft consensus algorithm by hashicorp/raft.
+* [Volument](https://volument.com/) - A new take on website analytics backed by Badger.
+
+If you are using Badger in a project please send a pull request to add it to the list.
+
+## Frequently Asked Questions
+- **My writes are getting stuck. Why?**
+
+**Update: With the new `Value(func(v []byte))` API, this deadlock can no longer
+happen.**
+
+The following is true for users on Badger v1.x.
+
+This can happen if a long running iteration with `Prefetch` is set to false, but
+a `Item::Value` call is made internally in the loop. That causes Badger to
+acquire read locks over the value log files to avoid value log GC removing the
+file from underneath. As a side effect, this also blocks a new value log GC
+file from being created, when the value log file boundary is hit.
+
+Please see Github issues [#293](https://github.com/dgraph-io/badger/issues/293)
+and [#315](https://github.com/dgraph-io/badger/issues/315).
+
+There are multiple workarounds during iteration:
+
+1. Use `Item::ValueCopy` instead of `Item::Value` when retrieving value.
+1. Set `Prefetch` to true. Badger would then copy over the value and release the
+ file lock immediately.
+1. When `Prefetch` is false, don't call `Item::Value` and do a pure key-only
+ iteration. This might be useful if you just want to delete a lot of keys.
+1. Do the writes in a separate transaction after the reads.
+
+- **My writes are really slow. Why?**
+
+Are you creating a new transaction for every single key update, and waiting for
+it to `Commit` fully before creating a new one? This will lead to very low
+throughput.
+
+We have created `WriteBatch` API which provides a way to batch up
+many updates into a single transaction and `Commit` that transaction using
+callbacks to avoid blocking. This amortizes the cost of a transaction really
+well, and provides the most efficient way to do bulk writes.
+
+```go
+wb := db.NewWriteBatch()
+defer wb.Cancel()
+
+for i := 0; i < N; i++ {
+ err := wb.Set(key(i), value(i), 0) // Will create txns as needed.
+ handle(err)
+}
+handle(wb.Flush()) // Wait for all txns to finish.
+```
+
+Note that `WriteBatch` API does not allow any reads. For read-modify-write
+workloads, you should be using the `Transaction` API.
+
+- **I don't see any disk write. Why?**
+
+If you're using Badger with `SyncWrites=false`, then your writes might not be written to value log
+and won't get synced to disk immediately. Writes to LSM tree are done inmemory first, before they
+get compacted to disk. The compaction would only happen once `MaxTableSize` has been reached. So, if
+you're doing a few writes and then checking, you might not see anything on disk. Once you `Close`
+the database, you'll see these writes on disk.
+
+- **Reverse iteration doesn't give me the right results.**
+
+Just like forward iteration goes to the first key which is equal or greater than the SEEK key, reverse iteration goes to the first key which is equal or lesser than the SEEK key. Therefore, SEEK key would not be part of the results. You can typically add a `0xff` byte as a suffix to the SEEK key to include it in the results. See the following issues: [#436](https://github.com/dgraph-io/badger/issues/436) and [#347](https://github.com/dgraph-io/badger/issues/347).
+
+- **Which instances should I use for Badger?**
+
+We recommend using instances which provide local SSD storage, without any limit
+on the maximum IOPS. In AWS, these are storage optimized instances like i3. They
+provide local SSDs which clock 100K IOPS over 4KB blocks easily.
+
+- **I'm getting a closed channel error. Why?**
+
+```
+panic: close of closed channel
+panic: send on closed channel
+```
+
+If you're seeing panics like above, this would be because you're operating on a closed DB. This can happen, if you call `Close()` before sending a write, or multiple times. You should ensure that you only call `Close()` once, and all your read/write operations finish before closing.
+
+- **Are there any Go specific settings that I should use?**
+
+We *highly* recommend setting a high number for GOMAXPROCS, which allows Go to
+observe the full IOPS throughput provided by modern SSDs. In Dgraph, we have set
+it to 128. For more details, [see this
+thread](https://groups.google.com/d/topic/golang-nuts/jPb_h3TvlKE/discussion).
+
+- **Are there any linux specific settings that I should use?**
+
+We recommend setting max file descriptors to a high number depending upon the expected size of you data.
+
+## Contact
+- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions.
+- Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests.
+- Join [](http://slack.dgraph.io).
+- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs).
+
diff --git a/vendor/github.com/dgraph-io/badger/VERSIONING.md b/vendor/github.com/dgraph-io/badger/VERSIONING.md
new file mode 100644
index 000000000..a890a36ff
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/VERSIONING.md
@@ -0,0 +1,47 @@
+# Serialization Versioning: Semantic Versioning for databases
+
+Semantic Versioning, commonly known as SemVer, is a great idea that has been very widely adopted as
+a way to decide how to name software versions. The whole concept is very well summarized on
+semver.org with the following lines:
+
+> Given a version number MAJOR.MINOR.PATCH, increment the:
+>
+> 1. MAJOR version when you make incompatible API changes,
+> 2. MINOR version when you add functionality in a backwards-compatible manner, and
+> 3. PATCH version when you make backwards-compatible bug fixes.
+>
+> Additional labels for pre-release and build metadata are available as extensions to the
+> MAJOR.MINOR.PATCH format.
+
+Unfortunately, API changes are not the most important changes for libraries that serialize data for
+later consumption. For these libraries, such as BadgerDB, changes to the API are much easier to
+handle than change to the data format used to store data on disk.
+
+## Serialization Version specification
+
+Serialization Versioning, like Semantic Versioning, uses 3 numbers and also calls them
+MAJOR.MINOR.PATCH, but the semantics of the numbers are slightly modified:
+
+Given a version number MAJOR.MINOR.PATCH, increment the:
+
+- MAJOR version when you make changes that require a transformation of the dataset before it can be
+used again.
+- MINOR version when old datasets are still readable but the API might have changed in
+backwards-compatible or incompatible ways.
+- PATCH version when you make backwards-compatible bug fixes.
+
+Additional labels for pre-release and build metadata are available as extensions to the
+MAJOR.MINOR.PATCH format.
+
+Following this naming strategy, migration from v1.x to v2.x requires a migration strategy for your
+existing dataset, and as such has to be carefully planned. Migrations in between different minor
+versions (e.g. v1.5.x and v1.6.x) might break your build, as the API *might* have changed, but once
+your code compiles there's no need for any data migration. Lastly, changes in between two different
+patch versions should never break your build or dataset.
+
+For more background on our decision to adopt Serialization Versioning, read the blog post
+[Semantic Versioning, Go Modules, and Databases][blog] and the original proposal on
+[this comment on Dgraph's Discuss forum][discuss].
+
+[blog]: https://blog.dgraph.io/post/serialization-versioning/
+[discuss]: https://discuss.dgraph.io/t/go-modules-on-badger-and-dgraph/4662/7
\ No newline at end of file
diff --git a/vendor/github.com/dgraph-io/badger/appveyor.yml b/vendor/github.com/dgraph-io/badger/appveyor.yml
new file mode 100644
index 000000000..afa54ca0a
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/appveyor.yml
@@ -0,0 +1,49 @@
+# version format
+version: "{build}"
+
+# Operating system (build VM template)
+os: Windows Server 2012 R2
+
+# Platform.
+platform: x64
+
+clone_folder: c:\gopath\src\github.com\dgraph-io\badger
+
+# Environment variables
+environment:
+ GOVERSION: 1.8.3
+ GOPATH: c:\gopath
+ GO111MODULE: on
+
+# scripts that run after cloning repository
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+ - go env
+ - python --version
+
+# To run your custom scripts instead of automatic MSBuild
+build_script:
+ # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648
+ - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)'
+ - cd c:\gopath\src\github.com\dgraph-io\badger
+ - git branch
+ - go get -t ./...
+
+# To run your custom scripts instead of automatic tests
+test_script:
+ # Unit tests
+ - ps: Add-AppveyorTest "Unit Tests" -Outcome Running
+ - go test -v github.com/dgraph-io/badger/...
+ - go test -v -vlog_mmap=false github.com/dgraph-io/badger/...
+ - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
+
+notifications:
+ - provider: Email
+ to:
+ - pawan@dgraph.io
+ on_build_failure: true
+ on_build_status_changed: true
+# to disable deployment
+deploy: off
+
diff --git a/vendor/github.com/dgraph-io/badger/backup.go b/vendor/github.com/dgraph-io/badger/backup.go
new file mode 100644
index 000000000..2569b3100
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/backup.go
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/binary"
+ "io"
+
+ "github.com/dgraph-io/badger/pb"
+ "github.com/dgraph-io/badger/y"
+)
+
+// Backup is a wrapper function over Stream.Backup to generate full and incremental backups of the
+// DB. For more control over how many goroutines are used to generate the backup, or if you wish to
+// backup only a certain range of keys, use Stream.Backup directly.
+func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) {
+ stream := db.NewStream()
+ stream.LogPrefix = "DB.Backup"
+ return stream.Backup(w, since)
+}
+
+// Backup dumps a protobuf-encoded list of all entries in the database into the
+// given writer, that are newer than the specified version. It returns a
+// timestamp indicating when the entries were dumped which can be passed into a
+// later invocation to generate an incremental dump, of entries that have been
+// added/modified since the last invocation of Stream.Backup().
+//
+// This can be used to backup the data in a database at a given point in time.
+func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) {
+ stream.KeyToList = func(key []byte, itr *Iterator) (*pb.KVList, error) {
+ list := &pb.KVList{}
+ for ; itr.Valid(); itr.Next() {
+ item := itr.Item()
+ if !bytes.Equal(item.Key(), key) {
+ return list, nil
+ }
+ if item.Version() < since {
+ // Ignore versions less than given timestamp, or skip older
+ // versions of the given key.
+ return list, nil
+ }
+
+ var valCopy []byte
+ if !item.IsDeletedOrExpired() {
+ // No need to copy value, if item is deleted or expired.
+ var err error
+ valCopy, err = item.ValueCopy(nil)
+ if err != nil {
+ stream.db.opt.Errorf("Key [%x, %d]. Error while fetching value [%v]\n",
+ item.Key(), item.Version(), err)
+ return nil, err
+ }
+ }
+
+ // clear txn bits
+ meta := item.meta &^ (bitTxn | bitFinTxn)
+ kv := &pb.KV{
+ Key: item.KeyCopy(nil),
+ Value: valCopy,
+ UserMeta: []byte{item.UserMeta()},
+ Version: item.Version(),
+ ExpiresAt: item.ExpiresAt(),
+ Meta: []byte{meta},
+ }
+ list.Kv = append(list.Kv, kv)
+
+ switch {
+ case item.DiscardEarlierVersions():
+ // If we need to discard earlier versions of this item, add a delete
+ // marker just below the current version.
+ list.Kv = append(list.Kv, &pb.KV{
+ Key: item.KeyCopy(nil),
+ Version: item.Version() - 1,
+ Meta: []byte{bitDelete},
+ })
+ return list, nil
+
+ case item.IsDeletedOrExpired():
+ return list, nil
+ }
+ }
+ return list, nil
+ }
+
+ var maxVersion uint64
+ stream.Send = func(list *pb.KVList) error {
+ for _, kv := range list.Kv {
+ if maxVersion < kv.Version {
+ maxVersion = kv.Version
+ }
+ }
+ return writeTo(list, w)
+ }
+
+ if err := stream.Orchestrate(context.Background()); err != nil {
+ return 0, err
+ }
+ return maxVersion, nil
+}
+
+func writeTo(list *pb.KVList, w io.Writer) error {
+ if err := binary.Write(w, binary.LittleEndian, uint64(list.Size())); err != nil {
+ return err
+ }
+ buf, err := list.Marshal()
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(buf)
+ return err
+}
+
+// KVLoader is used to write KVList objects in to badger. It can be used to restore a backup.
+type KVLoader struct {
+ db *DB
+ throttle *y.Throttle
+ entries []*Entry
+}
+
+// NewKVLoader returns a new instance of KVLoader.
+func (db *DB) NewKVLoader(maxPendingWrites int) *KVLoader {
+ return &KVLoader{
+ db: db,
+ throttle: y.NewThrottle(maxPendingWrites),
+ }
+}
+
+// Set writes the key-value pair to the database.
+func (l *KVLoader) Set(kv *pb.KV) error {
+ var userMeta, meta byte
+ if len(kv.UserMeta) > 0 {
+ userMeta = kv.UserMeta[0]
+ }
+ if len(kv.Meta) > 0 {
+ meta = kv.Meta[0]
+ }
+
+ l.entries = append(l.entries, &Entry{
+ Key: y.KeyWithTs(kv.Key, kv.Version),
+ Value: kv.Value,
+ UserMeta: userMeta,
+ ExpiresAt: kv.ExpiresAt,
+ meta: meta,
+ })
+ if len(l.entries) >= 1000 {
+ return l.send()
+ }
+ return nil
+}
+
+func (l *KVLoader) send() error {
+ if err := l.throttle.Do(); err != nil {
+ return err
+ }
+ if err := l.db.batchSetAsync(l.entries, func(err error) {
+ l.throttle.Done(err)
+ }); err != nil {
+ return err
+ }
+
+ l.entries = make([]*Entry, 0, 1000)
+ return nil
+}
+
+// Finish is meant to be called after all the key-value pairs have been loaded.
+func (l *KVLoader) Finish() error {
+ if len(l.entries) > 0 {
+ if err := l.send(); err != nil {
+ return err
+ }
+ }
+ return l.throttle.Finish()
+}
+
+// Load reads a protobuf-encoded list of all entries from a reader and writes
+// them to the database. This can be used to restore the database from a backup
+// made by calling DB.Backup(). If more complex logic is needed to restore a badger
+// backup, the KVLoader interface should be used instead.
+//
+// DB.Load() should be called on a database that is not running any other
+// concurrent transactions while it is running.
+func (db *DB) Load(r io.Reader, maxPendingWrites int) error {
+ br := bufio.NewReaderSize(r, 16<<10)
+ unmarshalBuf := make([]byte, 1<<10)
+
+ ldr := db.NewKVLoader(maxPendingWrites)
+ for {
+ var sz uint64
+ err := binary.Read(br, binary.LittleEndian, &sz)
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+
+ if cap(unmarshalBuf) < int(sz) {
+ unmarshalBuf = make([]byte, sz)
+ }
+
+ if _, err = io.ReadFull(br, unmarshalBuf[:sz]); err != nil {
+ return err
+ }
+
+ list := &pb.KVList{}
+ if err := list.Unmarshal(unmarshalBuf[:sz]); err != nil {
+ return err
+ }
+
+ for _, kv := range list.Kv {
+ if err := ldr.Set(kv); err != nil {
+ return err
+ }
+
+ // Update nextTxnTs, memtable stores this
+ // timestamp in badger head when flushed.
+ if kv.Version >= db.orc.nextTxnTs {
+ db.orc.nextTxnTs = kv.Version + 1
+ }
+ }
+ }
+
+ if err := ldr.Finish(); err != nil {
+ return err
+ }
+ db.orc.txnMark.Done(db.orc.nextTxnTs - 1)
+ return nil
+}
diff --git a/vendor/github.com/dgraph-io/badger/batch.go b/vendor/github.com/dgraph-io/badger/batch.go
new file mode 100644
index 000000000..c94e0fed4
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/batch.go
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2018 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "sync"
+
+ "github.com/dgraph-io/badger/y"
+)
+
+// WriteBatch holds the necessary info to perform batched writes.
+type WriteBatch struct {
+ sync.Mutex
+ txn *Txn
+ db *DB
+ throttle *y.Throttle
+ err error
+}
+
+// NewWriteBatch creates a new WriteBatch. This provides a way to conveniently do a lot of writes,
+// batching them up as tightly as possible in a single transaction and using callbacks to avoid
+// waiting for them to commit, thus achieving good performance. This API hides away the logic of
+// creating and committing transactions. Due to the nature of SSI guaratees provided by Badger,
+// blind writes can never encounter transaction conflicts (ErrConflict).
+func (db *DB) NewWriteBatch() *WriteBatch {
+ return &WriteBatch{
+ db: db,
+ txn: db.newTransaction(true, true),
+ throttle: y.NewThrottle(16),
+ }
+}
+
+// SetMaxPendingTxns sets a limit on maximum number of pending transactions while writing batches.
+// This function should be called before using WriteBatch. Default value of MaxPendingTxns is
+// 16 to minimise memory usage.
+func (wb *WriteBatch) SetMaxPendingTxns(max int) {
+ wb.throttle = y.NewThrottle(max)
+}
+
+// Cancel function must be called if there's a chance that Flush might not get
+// called. If neither Flush or Cancel is called, the transaction oracle would
+// never get a chance to clear out the row commit timestamp map, thus causing an
+// unbounded memory consumption. Typically, you can call Cancel as a defer
+// statement right after NewWriteBatch is called.
+//
+// Note that any committed writes would still go through despite calling Cancel.
+func (wb *WriteBatch) Cancel() {
+ if err := wb.throttle.Finish(); err != nil {
+ wb.db.opt.Errorf("WatchBatch.Cancel error while finishing: %v", err)
+ }
+ wb.txn.Discard()
+}
+
+func (wb *WriteBatch) callback(err error) {
+ // sync.WaitGroup is thread-safe, so it doesn't need to be run inside wb.Lock.
+ defer wb.throttle.Done(err)
+ if err == nil {
+ return
+ }
+
+ wb.Lock()
+ defer wb.Unlock()
+ if wb.err != nil {
+ return
+ }
+ wb.err = err
+}
+
+// SetEntry is the equivalent of Txn.SetEntry.
+func (wb *WriteBatch) SetEntry(e *Entry) error {
+ wb.Lock()
+ defer wb.Unlock()
+
+ if err := wb.txn.SetEntry(e); err != ErrTxnTooBig {
+ return err
+ }
+ // Txn has reached it's zenith. Commit now.
+ if cerr := wb.commit(); cerr != nil {
+ return cerr
+ }
+ // This time the error must not be ErrTxnTooBig, otherwise, we make the
+ // error permanent.
+ if err := wb.txn.SetEntry(e); err != nil {
+ wb.err = err
+ return err
+ }
+ return nil
+}
+
+// Set is equivalent of Txn.Set().
+func (wb *WriteBatch) Set(k, v []byte) error {
+ e := &Entry{Key: k, Value: v}
+ return wb.SetEntry(e)
+}
+
+// Delete is equivalent of Txn.Delete.
+func (wb *WriteBatch) Delete(k []byte) error {
+ wb.Lock()
+ defer wb.Unlock()
+
+ if err := wb.txn.Delete(k); err != ErrTxnTooBig {
+ return err
+ }
+ if err := wb.commit(); err != nil {
+ return err
+ }
+ if err := wb.txn.Delete(k); err != nil {
+ wb.err = err
+ return err
+ }
+ return nil
+}
+
+// Caller to commit must hold a write lock.
+func (wb *WriteBatch) commit() error {
+ if wb.err != nil {
+ return wb.err
+ }
+ if err := wb.throttle.Do(); err != nil {
+ return err
+ }
+ wb.txn.CommitWith(wb.callback)
+ wb.txn = wb.db.newTransaction(true, true)
+ wb.txn.readTs = 0 // We're not reading anything.
+ return wb.err
+}
+
+// Flush must be called at the end to ensure that any pending writes get committed to Badger. Flush
+// returns any error stored by WriteBatch.
+func (wb *WriteBatch) Flush() error {
+ wb.Lock()
+ _ = wb.commit()
+ wb.txn.Discard()
+ wb.Unlock()
+
+ if err := wb.throttle.Finish(); err != nil {
+ return err
+ }
+
+ return wb.err
+}
+
+// Error returns any errors encountered so far. No commits would be run once an error is detected.
+func (wb *WriteBatch) Error() error {
+ wb.Lock()
+ defer wb.Unlock()
+ return wb.err
+}
diff --git a/vendor/github.com/dgraph-io/badger/compaction.go b/vendor/github.com/dgraph-io/badger/compaction.go
new file mode 100644
index 000000000..931d56664
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/compaction.go
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "math"
+ "sync"
+
+ "golang.org/x/net/trace"
+
+ "github.com/dgraph-io/badger/table"
+ "github.com/dgraph-io/badger/y"
+)
+
+type keyRange struct {
+ left []byte
+ right []byte
+ inf bool
+}
+
+var infRange = keyRange{inf: true}
+
+func (r keyRange) String() string {
+ return fmt.Sprintf("[left=%x, right=%x, inf=%v]", r.left, r.right, r.inf)
+}
+
+func (r keyRange) equals(dst keyRange) bool {
+ return bytes.Equal(r.left, dst.left) &&
+ bytes.Equal(r.right, dst.right) &&
+ r.inf == dst.inf
+}
+
+func (r keyRange) overlapsWith(dst keyRange) bool {
+ if r.inf || dst.inf {
+ return true
+ }
+
+ // If my left is greater than dst right, we have no overlap.
+ if y.CompareKeys(r.left, dst.right) > 0 {
+ return false
+ }
+ // If my right is less than dst left, we have no overlap.
+ if y.CompareKeys(r.right, dst.left) < 0 {
+ return false
+ }
+ // We have overlap.
+ return true
+}
+
+func getKeyRange(tables []*table.Table) keyRange {
+ if len(tables) == 0 {
+ return keyRange{}
+ }
+ smallest := tables[0].Smallest()
+ biggest := tables[0].Biggest()
+ for i := 1; i < len(tables); i++ {
+ if y.CompareKeys(tables[i].Smallest(), smallest) < 0 {
+ smallest = tables[i].Smallest()
+ }
+ if y.CompareKeys(tables[i].Biggest(), biggest) > 0 {
+ biggest = tables[i].Biggest()
+ }
+ }
+ return keyRange{
+ left: y.KeyWithTs(y.ParseKey(smallest), math.MaxUint64),
+ right: y.KeyWithTs(y.ParseKey(biggest), 0),
+ }
+}
+
+type levelCompactStatus struct {
+ ranges []keyRange
+ delSize int64
+}
+
+func (lcs *levelCompactStatus) debug() string {
+ var b bytes.Buffer
+ for _, r := range lcs.ranges {
+ b.WriteString(r.String())
+ }
+ return b.String()
+}
+
+func (lcs *levelCompactStatus) overlapsWith(dst keyRange) bool {
+ for _, r := range lcs.ranges {
+ if r.overlapsWith(dst) {
+ return true
+ }
+ }
+ return false
+}
+
+func (lcs *levelCompactStatus) remove(dst keyRange) bool {
+ final := lcs.ranges[:0]
+ var found bool
+ for _, r := range lcs.ranges {
+ if !r.equals(dst) {
+ final = append(final, r)
+ } else {
+ found = true
+ }
+ }
+ lcs.ranges = final
+ return found
+}
+
+type compactStatus struct {
+ sync.RWMutex
+ levels []*levelCompactStatus
+}
+
+func (cs *compactStatus) toLog(tr trace.Trace) {
+ cs.RLock()
+ defer cs.RUnlock()
+
+ tr.LazyPrintf("Compaction status:")
+ for i, l := range cs.levels {
+ if l.debug() == "" {
+ continue
+ }
+ tr.LazyPrintf("[%d] %s", i, l.debug())
+ }
+}
+
+func (cs *compactStatus) overlapsWith(level int, this keyRange) bool {
+ cs.RLock()
+ defer cs.RUnlock()
+
+ thisLevel := cs.levels[level]
+ return thisLevel.overlapsWith(this)
+}
+
+func (cs *compactStatus) delSize(l int) int64 {
+ cs.RLock()
+ defer cs.RUnlock()
+ return cs.levels[l].delSize
+}
+
+type thisAndNextLevelRLocked struct{}
+
+// compareAndAdd will check whether we can run this compactDef. That it doesn't overlap with any
+// other running compaction. If it can be run, it would store this run in the compactStatus state.
+func (cs *compactStatus) compareAndAdd(_ thisAndNextLevelRLocked, cd compactDef) bool {
+ cs.Lock()
+ defer cs.Unlock()
+
+ level := cd.thisLevel.level
+
+ y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels))
+ thisLevel := cs.levels[level]
+ nextLevel := cs.levels[level+1]
+
+ if thisLevel.overlapsWith(cd.thisRange) {
+ return false
+ }
+ if nextLevel.overlapsWith(cd.nextRange) {
+ return false
+ }
+ // Check whether this level really needs compaction or not. Otherwise, we'll end up
+ // running parallel compactions for the same level.
+ // Update: We should not be checking size here. Compaction priority already did the size checks.
+ // Here we should just be executing the wish of others.
+
+ thisLevel.ranges = append(thisLevel.ranges, cd.thisRange)
+ nextLevel.ranges = append(nextLevel.ranges, cd.nextRange)
+ thisLevel.delSize += cd.thisSize
+ return true
+}
+
+func (cs *compactStatus) delete(cd compactDef) {
+ cs.Lock()
+ defer cs.Unlock()
+
+ level := cd.thisLevel.level
+ y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels))
+
+ thisLevel := cs.levels[level]
+ nextLevel := cs.levels[level+1]
+
+ thisLevel.delSize -= cd.thisSize
+ found := thisLevel.remove(cd.thisRange)
+ found = nextLevel.remove(cd.nextRange) && found
+
+ if !found {
+ this := cd.thisRange
+ next := cd.nextRange
+ fmt.Printf("Looking for: [%q, %q, %v] in this level.\n", this.left, this.right, this.inf)
+ fmt.Printf("This Level:\n%s\n", thisLevel.debug())
+ fmt.Println()
+ fmt.Printf("Looking for: [%q, %q, %v] in next level.\n", next.left, next.right, next.inf)
+ fmt.Printf("Next Level:\n%s\n", nextLevel.debug())
+ log.Fatal("keyRange not found")
+ }
+}
diff --git a/vendor/github.com/dgraph-io/badger/db.go b/vendor/github.com/dgraph-io/badger/db.go
new file mode 100644
index 000000000..21bb22d6f
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/db.go
@@ -0,0 +1,1468 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "encoding/hex"
+ "expvar"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/dgraph-io/badger/options"
+ "github.com/dgraph-io/badger/pb"
+ "github.com/dgraph-io/badger/skl"
+ "github.com/dgraph-io/badger/table"
+ "github.com/dgraph-io/badger/y"
+ humanize "github.com/dustin/go-humanize"
+ "github.com/pkg/errors"
+ "golang.org/x/net/trace"
+)
+
+var (
+ badgerPrefix = []byte("!badger!") // Prefix for internal keys used by badger.
+ head = []byte("!badger!head") // For storing value offset for replay.
+ txnKey = []byte("!badger!txn") // For indicating end of entries in txn.
+ badgerMove = []byte("!badger!move") // For key-value pairs which got moved during GC.
+ lfDiscardStatsKey = []byte("!badger!discard") // For storing lfDiscardStats
+)
+
+type closers struct {
+ updateSize *y.Closer
+ compactors *y.Closer
+ memtable *y.Closer
+ writes *y.Closer
+ valueGC *y.Closer
+ pub *y.Closer
+}
+
+type callback func(kv *pb.KVList)
+
+// DB provides the various functions required to interact with Badger.
+// DB is thread-safe.
+type DB struct {
+ sync.RWMutex // Guards list of inmemory tables, not individual reads and writes.
+
+ dirLockGuard *directoryLockGuard
+ // nil if Dir and ValueDir are the same
+ valueDirGuard *directoryLockGuard
+
+ closers closers
+ elog trace.EventLog
+ mt *skl.Skiplist // Our latest (actively written) in-memory table
+ imm []*skl.Skiplist // Add here only AFTER pushing to flushChan.
+ opt Options
+ manifest *manifestFile
+ lc *levelsController
+ vlog valueLog
+ vhead valuePointer // less than or equal to a pointer to the last vlog value put into mt
+ writeCh chan *request
+ flushChan chan flushTask // For flushing memtables.
+ closeOnce sync.Once // For closing DB only once.
+
+ // Number of log rotates since the last memtable flush. We will access this field via atomic
+ // functions. Since we are not going to use any 64bit atomic functions, there is no need for
+ // 64 bit alignment of this struct(see #311).
+ logRotates int32
+
+ blockWrites int32
+
+ orc *oracle
+
+ pub *publisher
+}
+
+const (
+ kvWriteChCapacity = 1000
+)
+
+func (db *DB) replayFunction() func(Entry, valuePointer) error {
+ type txnEntry struct {
+ nk []byte
+ v y.ValueStruct
+ }
+
+ var txn []txnEntry
+ var lastCommit uint64
+
+ toLSM := func(nk []byte, vs y.ValueStruct) {
+ for err := db.ensureRoomForWrite(); err != nil; err = db.ensureRoomForWrite() {
+ db.elog.Printf("Replay: Making room for writes")
+ time.Sleep(10 * time.Millisecond)
+ }
+ db.mt.Put(nk, vs)
+ }
+
+ first := true
+ return func(e Entry, vp valuePointer) error { // Function for replaying.
+ if first {
+ db.elog.Printf("First key=%q\n", e.Key)
+ }
+ first = false
+
+ if db.orc.nextTxnTs < y.ParseTs(e.Key) {
+ db.orc.nextTxnTs = y.ParseTs(e.Key)
+ }
+
+ nk := make([]byte, len(e.Key))
+ copy(nk, e.Key)
+ var nv []byte
+ meta := e.meta
+ if db.shouldWriteValueToLSM(e) {
+ nv = make([]byte, len(e.Value))
+ copy(nv, e.Value)
+ } else {
+ nv = make([]byte, vptrSize)
+ vp.Encode(nv)
+ meta = meta | bitValuePointer
+ }
+
+ v := y.ValueStruct{
+ Value: nv,
+ Meta: meta,
+ UserMeta: e.UserMeta,
+ ExpiresAt: e.ExpiresAt,
+ }
+
+ if e.meta&bitFinTxn > 0 {
+ txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
+ if err != nil {
+ return errors.Wrapf(err, "Unable to parse txn fin: %q", e.Value)
+ }
+ y.AssertTrue(lastCommit == txnTs)
+ y.AssertTrue(len(txn) > 0)
+ // Got the end of txn. Now we can store them.
+ for _, t := range txn {
+ toLSM(t.nk, t.v)
+ }
+ txn = txn[:0]
+ lastCommit = 0
+
+ } else if e.meta&bitTxn > 0 {
+ txnTs := y.ParseTs(nk)
+ if lastCommit == 0 {
+ lastCommit = txnTs
+ }
+ if lastCommit != txnTs {
+ db.opt.Warningf("Found an incomplete txn at timestamp %d. Discarding it.\n",
+ lastCommit)
+ txn = txn[:0]
+ lastCommit = txnTs
+ }
+ te := txnEntry{nk: nk, v: v}
+ txn = append(txn, te)
+
+ } else {
+ // This entry is from a rewrite.
+ toLSM(nk, v)
+
+ // We shouldn't get this entry in the middle of a transaction.
+ y.AssertTrue(lastCommit == 0)
+ y.AssertTrue(len(txn) == 0)
+ }
+ return nil
+ }
+}
+
+// Open returns a new DB object.
+func Open(opt Options) (db *DB, err error) {
+ opt.maxBatchSize = (15 * opt.MaxTableSize) / 100
+ opt.maxBatchCount = opt.maxBatchSize / int64(skl.MaxNodeSize)
+
+ if opt.ValueThreshold > ValueThresholdLimit {
+ return nil, ErrValueThreshold
+ }
+
+ if opt.ReadOnly {
+ // Can't truncate if the DB is read only.
+ opt.Truncate = false
+ // Do not perform compaction in read only mode.
+ opt.CompactL0OnClose = false
+ }
+
+ for _, path := range []string{opt.Dir, opt.ValueDir} {
+ dirExists, err := exists(path)
+ if err != nil {
+ return nil, y.Wrapf(err, "Invalid Dir: %q", path)
+ }
+ if !dirExists {
+ if opt.ReadOnly {
+ return nil, errors.Errorf("Cannot find directory %q for read-only open", path)
+ }
+ // Try to create the directory
+ err = os.Mkdir(path, 0700)
+ if err != nil {
+ return nil, y.Wrapf(err, "Error Creating Dir: %q", path)
+ }
+ }
+ }
+ absDir, err := filepath.Abs(opt.Dir)
+ if err != nil {
+ return nil, err
+ }
+ absValueDir, err := filepath.Abs(opt.ValueDir)
+ if err != nil {
+ return nil, err
+ }
+ var dirLockGuard, valueDirLockGuard *directoryLockGuard
+ dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if dirLockGuard != nil {
+ _ = dirLockGuard.release()
+ }
+ }()
+ if absValueDir != absDir {
+ valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if valueDirLockGuard != nil {
+ _ = valueDirLockGuard.release()
+ }
+ }()
+ }
+ if !(opt.ValueLogFileSize <= 2<<30 && opt.ValueLogFileSize >= 1<<20) {
+ return nil, ErrValueLogSize
+ }
+ if !(opt.ValueLogLoadingMode == options.FileIO ||
+ opt.ValueLogLoadingMode == options.MemoryMap) {
+ return nil, ErrInvalidLoadingMode
+ }
+ manifestFile, manifest, err := openOrCreateManifestFile(opt.Dir, opt.ReadOnly)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if manifestFile != nil {
+ _ = manifestFile.close()
+ }
+ }()
+
+ db = &DB{
+ imm: make([]*skl.Skiplist, 0, opt.NumMemtables),
+ flushChan: make(chan flushTask, opt.NumMemtables),
+ writeCh: make(chan *request, kvWriteChCapacity),
+ opt: opt,
+ manifest: manifestFile,
+ elog: trace.NewEventLog("Badger", "DB"),
+ dirLockGuard: dirLockGuard,
+ valueDirGuard: valueDirLockGuard,
+ orc: newOracle(opt),
+ pub: newPublisher(),
+ }
+
+ // Calculate initial size.
+ db.calculateSize()
+ db.closers.updateSize = y.NewCloser(1)
+ go db.updateSize(db.closers.updateSize)
+ db.mt = skl.NewSkiplist(arenaSize(opt))
+
+ // newLevelsController potentially loads files in directory.
+ if db.lc, err = newLevelsController(db, &manifest); err != nil {
+ return nil, err
+ }
+
+ if !opt.ReadOnly {
+ db.closers.compactors = y.NewCloser(1)
+ db.lc.startCompact(db.closers.compactors)
+
+ db.closers.memtable = y.NewCloser(1)
+ go func() {
+ _ = db.flushMemtable(db.closers.memtable) // Need levels controller to be up.
+ }()
+ }
+
+ headKey := y.KeyWithTs(head, math.MaxUint64)
+ // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
+ vs, err := db.get(headKey)
+ if err != nil {
+ return nil, errors.Wrap(err, "Retrieving head")
+ }
+ db.orc.nextTxnTs = vs.Version
+ var vptr valuePointer
+ if len(vs.Value) > 0 {
+ vptr.Decode(vs.Value)
+ }
+
+ replayCloser := y.NewCloser(1)
+ go db.doWrites(replayCloser)
+
+ if err = db.vlog.open(db, vptr, db.replayFunction()); err != nil {
+ return db, err
+ }
+ replayCloser.SignalAndWait() // Wait for replay to be applied first.
+
+ // Let's advance nextTxnTs to one more than whatever we observed via
+ // replaying the logs.
+ db.orc.txnMark.Done(db.orc.nextTxnTs)
+ // In normal mode, we must update readMark so older versions of keys can be removed during
+ // compaction when run in offline mode via the flatten tool.
+ db.orc.readMark.Done(db.orc.nextTxnTs)
+ db.orc.incrementNextTs()
+
+ db.writeCh = make(chan *request, kvWriteChCapacity)
+ db.closers.writes = y.NewCloser(1)
+ go db.doWrites(db.closers.writes)
+
+ db.closers.valueGC = y.NewCloser(1)
+ go db.vlog.waitOnGC(db.closers.valueGC)
+
+ db.closers.pub = y.NewCloser(1)
+ go db.pub.listenForUpdates(db.closers.pub)
+
+ valueDirLockGuard = nil
+ dirLockGuard = nil
+ manifestFile = nil
+ return db, nil
+}
+
+// Close closes a DB. It's crucial to call it to ensure all the pending updates make their way to
+// disk. Calling DB.Close() multiple times would still only close the DB once.
+func (db *DB) Close() error {
+ var err error
+ db.closeOnce.Do(func() {
+ err = db.close()
+ })
+ return err
+}
+
+func (db *DB) close() (err error) {
+ db.elog.Printf("Closing database")
+
+ if err := db.vlog.flushDiscardStats(); err != nil {
+ return errors.Wrap(err, "failed to flush discard stats")
+ }
+
+ atomic.StoreInt32(&db.blockWrites, 1)
+
+ // Stop value GC first.
+ db.closers.valueGC.SignalAndWait()
+
+ // Stop writes next.
+ db.closers.writes.SignalAndWait()
+
+ db.closers.pub.SignalAndWait()
+
+ // Now close the value log.
+ if vlogErr := db.vlog.Close(); vlogErr != nil {
+ err = errors.Wrap(vlogErr, "DB.Close")
+ }
+
+ // Make sure that block writer is done pushing stuff into memtable!
+ // Otherwise, you will have a race condition: we are trying to flush memtables
+ // and remove them completely, while the block / memtable writer is still
+ // trying to push stuff into the memtable. This will also resolve the value
+ // offset problem: as we push into memtable, we update value offsets there.
+ if !db.mt.Empty() {
+ db.elog.Printf("Flushing memtable")
+ for {
+ pushedFlushTask := func() bool {
+ db.Lock()
+ defer db.Unlock()
+ y.AssertTrue(db.mt != nil)
+ select {
+ case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
+ db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm.
+ db.mt = nil // Will segfault if we try writing!
+ db.elog.Printf("pushed to flush chan\n")
+ return true
+ default:
+ // If we fail to push, we need to unlock and wait for a short while.
+ // The flushing operation needs to update s.imm. Otherwise, we have a deadlock.
+ // TODO: Think about how to do this more cleanly, maybe without any locks.
+ }
+ return false
+ }()
+ if pushedFlushTask {
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
+ db.stopCompactions()
+
+ // Force Compact L0
+ // We don't need to care about cstatus since no parallel compaction is running.
+ if db.opt.CompactL0OnClose {
+ err := db.lc.doCompact(compactionPriority{level: 0, score: 1.73})
+ switch err {
+ case errFillTables:
+ // This error only means that there might be enough tables to do a compaction. So, we
+ // should not report it to the end user to avoid confusing them.
+ case nil:
+ db.opt.Infof("Force compaction on level 0 done")
+ default:
+ db.opt.Warningf("While forcing compaction on level 0: %v", err)
+ }
+ }
+
+ if lcErr := db.lc.close(); err == nil {
+ err = errors.Wrap(lcErr, "DB.Close")
+ }
+ db.elog.Printf("Waiting for closer")
+ db.closers.updateSize.SignalAndWait()
+ db.orc.Stop()
+
+ db.elog.Finish()
+
+ if db.dirLockGuard != nil {
+ if guardErr := db.dirLockGuard.release(); err == nil {
+ err = errors.Wrap(guardErr, "DB.Close")
+ }
+ }
+ if db.valueDirGuard != nil {
+ if guardErr := db.valueDirGuard.release(); err == nil {
+ err = errors.Wrap(guardErr, "DB.Close")
+ }
+ }
+ if manifestErr := db.manifest.close(); err == nil {
+ err = errors.Wrap(manifestErr, "DB.Close")
+ }
+
+ // Fsync directories to ensure that lock file, and any other removed files whose directory
+ // we haven't specifically fsynced, are guaranteed to have their directory entry removal
+ // persisted to disk.
+ if syncErr := syncDir(db.opt.Dir); err == nil {
+ err = errors.Wrap(syncErr, "DB.Close")
+ }
+ if syncErr := syncDir(db.opt.ValueDir); err == nil {
+ err = errors.Wrap(syncErr, "DB.Close")
+ }
+
+ return err
+}
+
+const (
+ lockFile = "LOCK"
+)
+
+// Sync syncs database content to disk. This function provides
+// more control to user to sync data whenever required.
+func (db *DB) Sync() error {
+ return db.vlog.sync(math.MaxUint32)
+}
+
+// getMemtables returns the current memtables and get references.
+func (db *DB) getMemTables() ([]*skl.Skiplist, func()) {
+ db.RLock()
+ defer db.RUnlock()
+
+ tables := make([]*skl.Skiplist, len(db.imm)+1)
+
+ // Get mutable memtable.
+ tables[0] = db.mt
+ tables[0].IncrRef()
+
+ // Get immutable memtables.
+ last := len(db.imm) - 1
+ for i := range db.imm {
+ tables[i+1] = db.imm[last-i]
+ tables[i+1].IncrRef()
+ }
+ return tables, func() {
+ for _, tbl := range tables {
+ tbl.DecrRef()
+ }
+ }
+}
+
+// get returns the value in memtable or disk for given key.
+// Note that value will include meta byte.
+//
+// IMPORTANT: We should never write an entry with an older timestamp for the same key, We need to
+// maintain this invariant to search for the latest value of a key, or else we need to search in all
+// tables and find the max version among them. To maintain this invariant, we also need to ensure
+// that all versions of a key are always present in the same table from level 1, because compaction
+// can push any table down.
+//
+// Update (Sep 22, 2018): To maintain the above invariant, and to allow keys to be moved from one
+// value log to another (while reclaiming space during value log GC), we have logically moved this
+// need to write "old versions after new versions" to the badgerMove keyspace. Thus, for normal
+// gets, we can stop going down the LSM tree once we find any version of the key (note however that
+// we will ALWAYS skip versions with ts greater than the key version). However, if that key has
+// been moved, then for the corresponding movekey, we'll look through all the levels of the tree
+// to ensure that we pick the highest version of the movekey present.
+func (db *DB) get(key []byte) (y.ValueStruct, error) {
+ tables, decr := db.getMemTables() // Lock should be released.
+ defer decr()
+
+ var maxVs *y.ValueStruct
+ var version uint64
+ if bytes.HasPrefix(key, badgerMove) {
+ // If we are checking badgerMove key, we should look into all the
+ // levels, so we can pick up the newer versions, which might have been
+ // compacted down the tree.
+ maxVs = &y.ValueStruct{}
+ version = y.ParseTs(key)
+ }
+
+ y.NumGets.Add(1)
+ for i := 0; i < len(tables); i++ {
+ vs := tables[i].Get(key)
+ y.NumMemtableGets.Add(1)
+ if vs.Meta == 0 && vs.Value == nil {
+ continue
+ }
+ // Found a version of the key. For user keyspace, return immediately. For move keyspace,
+ // continue iterating, unless we found a version == given key version.
+ if maxVs == nil || vs.Version == version {
+ return vs, nil
+ }
+ if maxVs.Version < vs.Version {
+ *maxVs = vs
+ }
+ }
+ return db.lc.get(key, maxVs)
+}
+
+func (db *DB) updateHead(ptrs []valuePointer) {
+ var ptr valuePointer
+ for i := len(ptrs) - 1; i >= 0; i-- {
+ p := ptrs[i]
+ if !p.IsZero() {
+ ptr = p
+ break
+ }
+ }
+ if ptr.IsZero() {
+ return
+ }
+
+ db.Lock()
+ defer db.Unlock()
+ y.AssertTrue(!ptr.Less(db.vhead))
+ db.vhead = ptr
+}
+
+var requestPool = sync.Pool{
+ New: func() interface{} {
+ return new(request)
+ },
+}
+
+func (db *DB) shouldWriteValueToLSM(e Entry) bool {
+ return len(e.Value) < db.opt.ValueThreshold
+}
+
+func (db *DB) writeToLSM(b *request) error {
+ if len(b.Ptrs) != len(b.Entries) {
+ return errors.Errorf("Ptrs and Entries don't match: %+v", b)
+ }
+
+ for i, entry := range b.Entries {
+ if entry.meta&bitFinTxn != 0 {
+ continue
+ }
+ if db.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case.
+ db.mt.Put(entry.Key,
+ y.ValueStruct{
+ Value: entry.Value,
+ Meta: entry.meta,
+ UserMeta: entry.UserMeta,
+ ExpiresAt: entry.ExpiresAt,
+ })
+ } else {
+ var offsetBuf [vptrSize]byte
+ db.mt.Put(entry.Key,
+ y.ValueStruct{
+ Value: b.Ptrs[i].Encode(offsetBuf[:]),
+ Meta: entry.meta | bitValuePointer,
+ UserMeta: entry.UserMeta,
+ ExpiresAt: entry.ExpiresAt,
+ })
+ }
+ }
+ return nil
+}
+
+// writeRequests is called serially by only one goroutine.
+func (db *DB) writeRequests(reqs []*request) error {
+ if len(reqs) == 0 {
+ return nil
+ }
+
+ done := func(err error) {
+ for _, r := range reqs {
+ r.Err = err
+ r.Wg.Done()
+ }
+ }
+ db.elog.Printf("writeRequests called. Writing to value log")
+
+ err := db.vlog.write(reqs)
+ if err != nil {
+ done(err)
+ return err
+ }
+
+ db.elog.Printf("Sending updates to subscribers")
+ db.pub.sendUpdates(reqs)
+ db.elog.Printf("Writing to memtable")
+ var count int
+ for _, b := range reqs {
+ if len(b.Entries) == 0 {
+ continue
+ }
+ count += len(b.Entries)
+ var i uint64
+ for err = db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() {
+ i++
+ if i%100 == 0 {
+ db.elog.Printf("Making room for writes")
+ }
+ // We need to poll a bit because both hasRoomForWrite and the flusher need access to s.imm.
+ // When flushChan is full and you are blocked there, and the flusher is trying to update s.imm,
+ // you will get a deadlock.
+ time.Sleep(10 * time.Millisecond)
+ }
+ if err != nil {
+ done(err)
+ return errors.Wrap(err, "writeRequests")
+ }
+ if err := db.writeToLSM(b); err != nil {
+ done(err)
+ return errors.Wrap(err, "writeRequests")
+ }
+ db.updateHead(b.Ptrs)
+ }
+ done(nil)
+ db.elog.Printf("%d entries written", count)
+ return nil
+}
+
+func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) {
+ if atomic.LoadInt32(&db.blockWrites) == 1 {
+ return nil, ErrBlockedWrites
+ }
+ var count, size int64
+ for _, e := range entries {
+ size += int64(e.estimateSize(db.opt.ValueThreshold))
+ count++
+ }
+ if count >= db.opt.maxBatchCount || size >= db.opt.maxBatchSize {
+ return nil, ErrTxnTooBig
+ }
+
+ // We can only service one request because we need each txn to be stored in a contigous section.
+ // Txns should not interleave among other txns or rewrites.
+ req := requestPool.Get().(*request)
+ req.Entries = entries
+ req.Wg = sync.WaitGroup{}
+ req.Wg.Add(1)
+ req.IncrRef() // for db write
+ req.IncrRef() // for publisher updates
+ db.writeCh <- req // Handled in doWrites.
+ y.NumPuts.Add(int64(len(entries)))
+
+ return req, nil
+}
+
+func (db *DB) doWrites(lc *y.Closer) {
+ defer lc.Done()
+ pendingCh := make(chan struct{}, 1)
+
+ writeRequests := func(reqs []*request) {
+ if err := db.writeRequests(reqs); err != nil {
+ db.opt.Errorf("writeRequests: %v", err)
+ }
+ <-pendingCh
+ }
+
+ // This variable tracks the number of pending writes.
+ reqLen := new(expvar.Int)
+ y.PendingWrites.Set(db.opt.Dir, reqLen)
+
+ reqs := make([]*request, 0, 10)
+ for {
+ var r *request
+ select {
+ case r = <-db.writeCh:
+ case <-lc.HasBeenClosed():
+ goto closedCase
+ }
+
+ for {
+ reqs = append(reqs, r)
+ reqLen.Set(int64(len(reqs)))
+
+ if len(reqs) >= 3*kvWriteChCapacity {
+ pendingCh <- struct{}{} // blocking.
+ goto writeCase
+ }
+
+ select {
+ // Either push to pending, or continue to pick from writeCh.
+ case r = <-db.writeCh:
+ case pendingCh <- struct{}{}:
+ goto writeCase
+ case <-lc.HasBeenClosed():
+ goto closedCase
+ }
+ }
+
+ closedCase:
+ close(db.writeCh)
+ for r := range db.writeCh { // Flush the channel.
+ reqs = append(reqs, r)
+ }
+
+ pendingCh <- struct{}{} // Push to pending before doing a write.
+ writeRequests(reqs)
+ return
+
+ writeCase:
+ go writeRequests(reqs)
+ reqs = make([]*request, 0, 10)
+ reqLen.Set(0)
+ }
+}
+
+// batchSet applies a list of badger.Entry. If a request level error occurs it
+// will be returned.
+// Check(kv.BatchSet(entries))
+func (db *DB) batchSet(entries []*Entry) error {
+ req, err := db.sendToWriteCh(entries)
+ if err != nil {
+ return err
+ }
+
+ return req.Wait()
+}
+
+// batchSetAsync is the asynchronous version of batchSet. It accepts a callback
+// function which is called when all the sets are complete. If a request level
+// error occurs, it will be passed back via the callback.
+// err := kv.BatchSetAsync(entries, func(err error)) {
+// Check(err)
+// }
+func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error {
+ req, err := db.sendToWriteCh(entries)
+ if err != nil {
+ return err
+ }
+ go func() {
+ err := req.Wait()
+ // Write is complete. Let's call the callback function now.
+ f(err)
+ }()
+ return nil
+}
+
+var errNoRoom = errors.New("No room for write")
+
+// ensureRoomForWrite is always called serially.
+func (db *DB) ensureRoomForWrite() error {
+ var err error
+ db.Lock()
+ defer db.Unlock()
+
+ // Here we determine if we need to force flush memtable. Given we rotated log file, it would
+ // make sense to force flush a memtable, so the updated value head would have a chance to be
+ // pushed to L0. Otherwise, it would not go to L0, until the memtable has been fully filled,
+ // which can take a lot longer if the write load has fewer keys and larger values. This force
+ // flush, thus avoids the need to read through a lot of log files on a crash and restart.
+ // Above approach is quite simple with small drawback. We are calling ensureRoomForWrite before
+ // inserting every entry in Memtable. We will get latest db.head after all entries for a request
+ // are inserted in Memtable. If we have done >= db.logRotates rotations, then while inserting
+ // first entry in Memtable, below condition will be true and we will endup flushing old value of
+ // db.head. Hence we are limiting no of value log files to be read to db.logRotates only.
+ forceFlush := atomic.LoadInt32(&db.logRotates) >= db.opt.LogRotatesToFlush
+
+ if !forceFlush && db.mt.MemSize() < db.opt.MaxTableSize {
+ return nil
+ }
+
+ y.AssertTrue(db.mt != nil) // A nil mt indicates that DB is being closed.
+ select {
+ case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
+ // After every memtable flush, let's reset the counter.
+ atomic.StoreInt32(&db.logRotates, 0)
+
+ // Ensure value log is synced to disk so this memtable's contents wouldn't be lost.
+ err = db.vlog.sync(db.vhead.Fid)
+ if err != nil {
+ return err
+ }
+
+ db.opt.Debugf("Flushing memtable, mt.size=%d size of flushChan: %d\n",
+ db.mt.MemSize(), len(db.flushChan))
+ // We manage to push this task. Let's modify imm.
+ db.imm = append(db.imm, db.mt)
+ db.mt = skl.NewSkiplist(arenaSize(db.opt))
+ // New memtable is empty. We certainly have room.
+ return nil
+ default:
+ // We need to do this to unlock and allow the flusher to modify imm.
+ return errNoRoom
+ }
+}
+
+func arenaSize(opt Options) int64 {
+ return opt.MaxTableSize + opt.maxBatchSize + opt.maxBatchCount*int64(skl.MaxNodeSize)
+}
+
+// WriteLevel0Table flushes memtable.
+func writeLevel0Table(ft flushTask, f io.Writer) error {
+ iter := ft.mt.NewIterator()
+ defer iter.Close()
+ b := table.NewTableBuilder()
+ defer b.Close()
+ for iter.SeekToFirst(); iter.Valid(); iter.Next() {
+ if len(ft.dropPrefix) > 0 && bytes.HasPrefix(iter.Key(), ft.dropPrefix) {
+ continue
+ }
+ if err := b.Add(iter.Key(), iter.Value()); err != nil {
+ return err
+ }
+ }
+ _, err := f.Write(b.Finish())
+ return err
+}
+
+type flushTask struct {
+ mt *skl.Skiplist
+ vptr valuePointer
+ dropPrefix []byte
+}
+
+// handleFlushTask must be run serially.
+func (db *DB) handleFlushTask(ft flushTask) error {
+ // There can be a scnerio, when empty memtable is flushed. For example, memtable is empty and
+ // after writing request to value log, rotation count exceeds db.LogRotatesToFlush.
+ if ft.mt.Empty() {
+ return nil
+ }
+
+ // Store badger head even if vptr is zero, need it for readTs
+ db.opt.Debugf("Storing value log head: %+v\n", ft.vptr)
+ db.elog.Printf("Storing offset: %+v\n", ft.vptr)
+ offset := make([]byte, vptrSize)
+ ft.vptr.Encode(offset)
+
+ // Pick the max commit ts, so in case of crash, our read ts would be higher than all the
+ // commits.
+ headTs := y.KeyWithTs(head, db.orc.nextTs())
+ ft.mt.Put(headTs, y.ValueStruct{Value: offset})
+
+ fileID := db.lc.reserveFileID()
+ fd, err := y.CreateSyncedFile(table.NewFilename(fileID, db.opt.Dir), true)
+ if err != nil {
+ return y.Wrap(err)
+ }
+
+ // Don't block just to sync the directory entry.
+ dirSyncCh := make(chan error)
+ go func() { dirSyncCh <- syncDir(db.opt.Dir) }()
+
+ err = writeLevel0Table(ft, fd)
+ dirSyncErr := <-dirSyncCh
+
+ if err != nil {
+ db.elog.Errorf("ERROR while writing to level 0: %v", err)
+ return err
+ }
+ if dirSyncErr != nil {
+ // Do dir sync as best effort. No need to return due to an error there.
+ db.elog.Errorf("ERROR while syncing level directory: %v", dirSyncErr)
+ }
+
+ tbl, err := table.OpenTable(fd, db.opt.TableLoadingMode, nil)
+ if err != nil {
+ db.elog.Printf("ERROR while opening table: %v", err)
+ return err
+ }
+ // We own a ref on tbl.
+ err = db.lc.addLevel0Table(tbl) // This will incrRef (if we don't error, sure)
+ _ = tbl.DecrRef() // Releases our ref.
+ return err
+}
+
+// flushMemtable must keep running until we send it an empty flushTask. If there
+// are errors during handling the flush task, we'll retry indefinitely.
+func (db *DB) flushMemtable(lc *y.Closer) error {
+ defer lc.Done()
+
+ for ft := range db.flushChan {
+ if ft.mt == nil {
+ // We close db.flushChan now, instead of sending a nil ft.mt.
+ continue
+ }
+ for {
+ err := db.handleFlushTask(ft)
+ if err == nil {
+ // Update s.imm. Need a lock.
+ db.Lock()
+ // This is a single-threaded operation. ft.mt corresponds to the head of
+ // db.imm list. Once we flush it, we advance db.imm. The next ft.mt
+ // which would arrive here would match db.imm[0], because we acquire a
+ // lock over DB when pushing to flushChan.
+ // TODO: This logic is dirty AF. Any change and this could easily break.
+ y.AssertTrue(ft.mt == db.imm[0])
+ db.imm = db.imm[1:]
+ ft.mt.DecrRef() // Return memory.
+ db.Unlock()
+
+ break
+ }
+ // Encountered error. Retry indefinitely.
+ db.opt.Errorf("Failure while flushing memtable to disk: %v. Retrying...\n", err)
+ time.Sleep(time.Second)
+ }
+ }
+ return nil
+}
+
+func exists(path string) (bool, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return true, err
+}
+
+// This function does a filewalk, calculates the size of vlog and sst files and stores it in
+// y.LSMSize and y.VlogSize.
+func (db *DB) calculateSize() {
+ newInt := func(val int64) *expvar.Int {
+ v := new(expvar.Int)
+ v.Add(val)
+ return v
+ }
+
+ totalSize := func(dir string) (int64, int64) {
+ var lsmSize, vlogSize int64
+ err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ ext := filepath.Ext(path)
+ if ext == ".sst" {
+ lsmSize += info.Size()
+ } else if ext == ".vlog" {
+ vlogSize += info.Size()
+ }
+ return nil
+ })
+ if err != nil {
+ db.elog.Printf("Got error while calculating total size of directory: %s", dir)
+ }
+ return lsmSize, vlogSize
+ }
+
+ lsmSize, vlogSize := totalSize(db.opt.Dir)
+ y.LSMSize.Set(db.opt.Dir, newInt(lsmSize))
+ // If valueDir is different from dir, we'd have to do another walk.
+ if db.opt.ValueDir != db.opt.Dir {
+ _, vlogSize = totalSize(db.opt.ValueDir)
+ }
+ y.VlogSize.Set(db.opt.Dir, newInt(vlogSize))
+}
+
+func (db *DB) updateSize(lc *y.Closer) {
+ defer lc.Done()
+
+ metricsTicker := time.NewTicker(time.Minute)
+ defer metricsTicker.Stop()
+
+ for {
+ select {
+ case <-metricsTicker.C:
+ db.calculateSize()
+ case <-lc.HasBeenClosed():
+ return
+ }
+ }
+}
+
+// RunValueLogGC triggers a value log garbage collection.
+//
+// It picks value log files to perform GC based on statistics that are collected
+// duing compactions. If no such statistics are available, then log files are
+// picked in random order. The process stops as soon as the first log file is
+// encountered which does not result in garbage collection.
+//
+// When a log file is picked, it is first sampled. If the sample shows that we
+// can discard at least discardRatio space of that file, it would be rewritten.
+//
+// If a call to RunValueLogGC results in no rewrites, then an ErrNoRewrite is
+// thrown indicating that the call resulted in no file rewrites.
+//
+// We recommend setting discardRatio to 0.5, thus indicating that a file be
+// rewritten if half the space can be discarded. This results in a lifetime
+// value log write amplification of 2 (1 from original write + 0.5 rewrite +
+// 0.25 + 0.125 + ... = 2). Setting it to higher value would result in fewer
+// space reclaims, while setting it to a lower value would result in more space
+// reclaims at the cost of increased activity on the LSM tree. discardRatio
+// must be in the range (0.0, 1.0), both endpoints excluded, otherwise an
+// ErrInvalidRequest is returned.
+//
+// Only one GC is allowed at a time. If another value log GC is running, or DB
+// has been closed, this would return an ErrRejected.
+//
+// Note: Every time GC is run, it would produce a spike of activity on the LSM
+// tree.
+func (db *DB) RunValueLogGC(discardRatio float64) error {
+ if discardRatio >= 1.0 || discardRatio <= 0.0 {
+ return ErrInvalidRequest
+ }
+
+ // Find head on disk
+ headKey := y.KeyWithTs(head, math.MaxUint64)
+ // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
+ val, err := db.lc.get(headKey, nil)
+ if err != nil {
+ return errors.Wrap(err, "Retrieving head from on-disk LSM")
+ }
+
+ var head valuePointer
+ if len(val.Value) > 0 {
+ head.Decode(val.Value)
+ }
+
+ // Pick a log file and run GC
+ return db.vlog.runGC(discardRatio, head)
+}
+
+// Size returns the size of lsm and value log files in bytes. It can be used to decide how often to
+// call RunValueLogGC.
+func (db *DB) Size() (lsm, vlog int64) {
+ if y.LSMSize.Get(db.opt.Dir) == nil {
+ lsm, vlog = 0, 0
+ return
+ }
+ lsm = y.LSMSize.Get(db.opt.Dir).(*expvar.Int).Value()
+ vlog = y.VlogSize.Get(db.opt.Dir).(*expvar.Int).Value()
+ return
+}
+
+// Sequence represents a Badger sequence.
+type Sequence struct {
+ sync.Mutex
+ db *DB
+ key []byte
+ next uint64
+ leased uint64
+ bandwidth uint64
+}
+
+// Next would return the next integer in the sequence, updating the lease by running a transaction
+// if needed.
+func (seq *Sequence) Next() (uint64, error) {
+ seq.Lock()
+ defer seq.Unlock()
+ if seq.next >= seq.leased {
+ if err := seq.updateLease(); err != nil {
+ return 0, err
+ }
+ }
+ val := seq.next
+ seq.next++
+ return val, nil
+}
+
+// Release the leased sequence to avoid wasted integers. This should be done right
+// before closing the associated DB. However it is valid to use the sequence after
+// it was released, causing a new lease with full bandwidth.
+func (seq *Sequence) Release() error {
+ seq.Lock()
+ defer seq.Unlock()
+ err := seq.db.Update(func(txn *Txn) error {
+ var buf [8]byte
+ binary.BigEndian.PutUint64(buf[:], seq.next)
+ return txn.SetEntry(NewEntry(seq.key, buf[:]))
+ })
+ if err != nil {
+ return err
+ }
+ seq.leased = seq.next
+ return nil
+}
+
+func (seq *Sequence) updateLease() error {
+ return seq.db.Update(func(txn *Txn) error {
+ item, err := txn.Get(seq.key)
+ if err == ErrKeyNotFound {
+ seq.next = 0
+ } else if err != nil {
+ return err
+ } else {
+ var num uint64
+ if err := item.Value(func(v []byte) error {
+ num = binary.BigEndian.Uint64(v)
+ return nil
+ }); err != nil {
+ return err
+ }
+ seq.next = num
+ }
+
+ lease := seq.next + seq.bandwidth
+ var buf [8]byte
+ binary.BigEndian.PutUint64(buf[:], lease)
+ if err = txn.SetEntry(NewEntry(seq.key, buf[:])); err != nil {
+ return err
+ }
+ seq.leased = lease
+ return nil
+ })
+}
+
+// GetSequence would initiate a new sequence object, generating it from the stored lease, if
+// available, in the database. Sequence can be used to get a list of monotonically increasing
+// integers. Multiple sequences can be created by providing different keys. Bandwidth sets the
+// size of the lease, determining how many Next() requests can be served from memory.
+//
+// GetSequence is not supported on ManagedDB. Calling this would result in a panic.
+func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) {
+ if db.opt.managedTxns {
+ panic("Cannot use GetSequence with managedDB=true.")
+ }
+
+ switch {
+ case len(key) == 0:
+ return nil, ErrEmptyKey
+ case bandwidth == 0:
+ return nil, ErrZeroBandwidth
+ }
+ seq := &Sequence{
+ db: db,
+ key: key,
+ next: 0,
+ leased: 0,
+ bandwidth: bandwidth,
+ }
+ err := seq.updateLease()
+ return seq, err
+}
+
+// Tables gets the TableInfo objects from the level controller. If withKeysCount
+// is true, TableInfo objects also contain counts of keys for the tables.
+func (db *DB) Tables(withKeysCount bool) []TableInfo {
+ return db.lc.getTableInfo(withKeysCount)
+}
+
+// KeySplits can be used to get rough key ranges to divide up iteration over
+// the DB.
+func (db *DB) KeySplits(prefix []byte) []string {
+ var splits []string
+ // We just want table ranges here and not keys count.
+ for _, ti := range db.Tables(false) {
+ // We don't use ti.Left, because that has a tendency to store !badger
+ // keys.
+ if bytes.HasPrefix(ti.Right, prefix) {
+ splits = append(splits, string(ti.Right))
+ }
+ }
+ sort.Strings(splits)
+ return splits
+}
+
+// MaxBatchCount returns max possible entries in batch
+func (db *DB) MaxBatchCount() int64 {
+ return db.opt.maxBatchCount
+}
+
+// MaxBatchSize returns max possible batch size
+func (db *DB) MaxBatchSize() int64 {
+ return db.opt.maxBatchSize
+}
+
+func (db *DB) stopCompactions() {
+ // Stop memtable flushes.
+ if db.closers.memtable != nil {
+ close(db.flushChan)
+ db.closers.memtable.SignalAndWait()
+ }
+ // Stop compactions.
+ if db.closers.compactors != nil {
+ db.closers.compactors.SignalAndWait()
+ }
+}
+
+func (db *DB) startCompactions() {
+ // Resume compactions.
+ if db.closers.compactors != nil {
+ db.closers.compactors = y.NewCloser(1)
+ db.lc.startCompact(db.closers.compactors)
+ }
+ if db.closers.memtable != nil {
+ db.flushChan = make(chan flushTask, db.opt.NumMemtables)
+ db.closers.memtable = y.NewCloser(1)
+ go func() {
+ _ = db.flushMemtable(db.closers.memtable)
+ }()
+ }
+}
+
+// Flatten can be used to force compactions on the LSM tree so all the tables fall on the same
+// level. This ensures that all the versions of keys are colocated and not split across multiple
+// levels, which is necessary after a restore from backup. During Flatten, live compactions are
+// stopped. Ideally, no writes are going on during Flatten. Otherwise, it would create competition
+// between flattening the tree and new tables being created at level zero.
+func (db *DB) Flatten(workers int) error {
+ db.stopCompactions()
+ defer db.startCompactions()
+
+ compactAway := func(cp compactionPriority) error {
+ db.opt.Infof("Attempting to compact with %+v\n", cp)
+ errCh := make(chan error, 1)
+ for i := 0; i < workers; i++ {
+ go func() {
+ errCh <- db.lc.doCompact(cp)
+ }()
+ }
+ var success int
+ var rerr error
+ for i := 0; i < workers; i++ {
+ err := <-errCh
+ if err != nil {
+ rerr = err
+ db.opt.Warningf("While running doCompact with %+v. Error: %v\n", cp, err)
+ } else {
+ success++
+ }
+ }
+ if success == 0 {
+ return rerr
+ }
+ // We could do at least one successful compaction. So, we'll consider this a success.
+ db.opt.Infof("%d compactor(s) succeeded. One or more tables from level %d compacted.\n",
+ success, cp.level)
+ return nil
+ }
+
+ hbytes := func(sz int64) string {
+ return humanize.Bytes(uint64(sz))
+ }
+
+ for {
+ db.opt.Infof("\n")
+ var levels []int
+ for i, l := range db.lc.levels {
+ sz := l.getTotalSize()
+ db.opt.Infof("Level: %d. %8s Size. %8s Max.\n",
+ i, hbytes(l.getTotalSize()), hbytes(l.maxTotalSize))
+ if sz > 0 {
+ levels = append(levels, i)
+ }
+ }
+ if len(levels) <= 1 {
+ prios := db.lc.pickCompactLevels()
+ if len(prios) == 0 || prios[0].score <= 1.0 {
+ db.opt.Infof("All tables consolidated into one level. Flattening done.\n")
+ return nil
+ }
+ if err := compactAway(prios[0]); err != nil {
+ return err
+ }
+ continue
+ }
+ // Create an artificial compaction priority, to ensure that we compact the level.
+ cp := compactionPriority{level: levels[0], score: 1.71}
+ if err := compactAway(cp); err != nil {
+ return err
+ }
+ }
+}
+
+func (db *DB) prepareToDrop() func() {
+ if db.opt.ReadOnly {
+ panic("Attempting to drop data in read-only mode.")
+ }
+ // Stop accepting new writes.
+ atomic.StoreInt32(&db.blockWrites, 1)
+
+ // Make all pending writes finish. The following will also close writeCh.
+ db.closers.writes.SignalAndWait()
+ db.opt.Infof("Writes flushed. Stopping compactions now...")
+
+ // Stop all compactions.
+ db.stopCompactions()
+ return func() {
+ db.opt.Infof("Resuming writes")
+ db.startCompactions()
+
+ db.writeCh = make(chan *request, kvWriteChCapacity)
+ db.closers.writes = y.NewCloser(1)
+ go db.doWrites(db.closers.writes)
+
+ // Resume writes.
+ atomic.StoreInt32(&db.blockWrites, 0)
+ }
+}
+
+// DropAll would drop all the data stored in Badger. It does this in the following way.
+// - Stop accepting new writes.
+// - Pause memtable flushes and compactions.
+// - Pick all tables from all levels, create a changeset to delete all these
+// tables and apply it to manifest.
+// - Pick all log files from value log, and delete all of them. Restart value log files from zero.
+// - Resume memtable flushes and compactions.
+//
+// NOTE: DropAll is resilient to concurrent writes, but not to reads. It is up to the user to not do
+// any reads while DropAll is going on, otherwise they may result in panics. Ideally, both reads and
+// writes are paused before running DropAll, and resumed after it is finished.
+func (db *DB) DropAll() error {
+ f, err := db.dropAll()
+ if err != nil {
+ return err
+ }
+ if f == nil {
+ panic("both error and returned function cannot be nil in DropAll")
+ }
+ f()
+ return nil
+}
+
+func (db *DB) dropAll() (func(), error) {
+ db.opt.Infof("DropAll called. Blocking writes...")
+ f := db.prepareToDrop()
+
+ // Block all foreign interactions with memory tables.
+ db.Lock()
+ defer db.Unlock()
+
+ // Remove inmemory tables. Calling DecrRef for safety. Not sure if they're absolutely needed.
+ db.mt.DecrRef()
+ for _, mt := range db.imm {
+ mt.DecrRef()
+ }
+ db.imm = db.imm[:0]
+ db.mt = skl.NewSkiplist(arenaSize(db.opt)) // Set it up for future writes.
+
+ num, err := db.lc.dropTree()
+ if err != nil {
+ return nil, err
+ }
+ db.opt.Infof("Deleted %d SSTables. Now deleting value logs...\n", num)
+
+ num, err = db.vlog.dropAll()
+ if err != nil {
+ return nil, err
+ }
+ db.vhead = valuePointer{} // Zero it out.
+ db.lc.nextFileID = 1
+ db.opt.Infof("Deleted %d value log files. DropAll done.\n", num)
+ return f, nil
+}
+
+// DropPrefix would drop all the keys with the provided prefix. It does this in the following way:
+// - Stop accepting new writes.
+// - Stop memtable flushes and compactions.
+// - Flush out all memtables, skipping over keys with the given prefix, Kp.
+// - Write out the value log header to memtables when flushing, so we don't accidentally bring Kp
+// back after a restart.
+// - Compact L0->L1, skipping over Kp.
+// - Compact rest of the levels, Li->Li, picking tables which have Kp.
+// - Resume memtable flushes, compactions and writes.
+func (db *DB) DropPrefix(prefix []byte) error {
+ db.opt.Infof("DropPrefix called on %s. Blocking writes...", hex.Dump(prefix))
+ f := db.prepareToDrop()
+ defer f()
+
+ // Block all foreign interactions with memory tables.
+ db.Lock()
+ defer db.Unlock()
+
+ db.imm = append(db.imm, db.mt)
+ for _, memtable := range db.imm {
+ if memtable.Empty() {
+ memtable.DecrRef()
+ continue
+ }
+ task := flushTask{
+ mt: memtable,
+ // Ensure that the head of value log gets persisted to disk.
+ vptr: db.vhead,
+ dropPrefix: prefix,
+ }
+ db.opt.Debugf("Flushing memtable")
+ if err := db.handleFlushTask(task); err != nil {
+ db.opt.Errorf("While trying to flush memtable: %v", err)
+ return err
+ }
+ memtable.DecrRef()
+ }
+ db.imm = db.imm[:0]
+ db.mt = skl.NewSkiplist(arenaSize(db.opt))
+
+ // Drop prefixes from the levels.
+ if err := db.lc.dropPrefix(prefix); err != nil {
+ return err
+ }
+ db.opt.Infof("DropPrefix done")
+ return nil
+}
+
+// Subscribe can be used watch key changes for the given key prefix.
+func (db *DB) Subscribe(ctx context.Context, cb callback, prefix []byte, prefixes ...[]byte) error {
+ if cb == nil {
+ return ErrNilCallback
+ }
+ prefixes = append(prefixes, prefix)
+ c := y.NewCloser(1)
+ recvCh, id := db.pub.newSubscriber(c, prefixes...)
+ slurp := func(batch *pb.KVList) {
+ defer func() {
+ if len(batch.GetKv()) > 0 {
+ cb(batch)
+ }
+ }()
+ for {
+ select {
+ case kvs := <-recvCh:
+ batch.Kv = append(batch.Kv, kvs.Kv...)
+ default:
+ return
+ }
+ }
+ }
+ for {
+ select {
+ case <-c.HasBeenClosed():
+ slurp(new(pb.KVList))
+ // Drain if any pending updates.
+ c.Done()
+ // No need to delete here. Closer will be called only while
+ // closing DB. Subscriber will be deleted by cleanSubscribers.
+ return nil
+ case <-ctx.Done():
+ c.Done()
+ db.pub.deleteSubscriber(id)
+ // Delete the subscriber to avoid further updates.
+ return ctx.Err()
+ case batch := <-recvCh:
+ slurp(batch)
+ }
+ }
+}
diff --git a/vendor/github.com/dgraph-io/badger/dir_unix.go b/vendor/github.com/dgraph-io/badger/dir_unix.go
new file mode 100644
index 000000000..d56e6e821
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/dir_unix.go
@@ -0,0 +1,118 @@
+// +build !windows
+
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/dgraph-io/badger/y"
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part
+// of the locking mechanism, it's just advisory.
+type directoryLockGuard struct {
+ // File handle on the directory, which we've flocked.
+ f *os.File
+ // The absolute path to our pid file.
+ path string
+ // Was this a shared lock for a read-only database?
+ readOnly bool
+}
+
+// acquireDirectoryLock gets a lock on the directory (using flock). If
+// this is not read-only, it will also write our pid to
+// dirPath/pidFileName for convenience.
+func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (
+ *directoryLockGuard, error) {
+ // Convert to absolute path so that Release still works even if we do an unbalanced
+ // chdir in the meantime.
+ absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
+ if err != nil {
+ return nil, errors.Wrap(err, "cannot get absolute path for pid lock file")
+ }
+ f, err := os.Open(dirPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot open directory %q", dirPath)
+ }
+ opts := unix.LOCK_EX | unix.LOCK_NB
+ if readOnly {
+ opts = unix.LOCK_SH | unix.LOCK_NB
+ }
+
+ err = unix.Flock(int(f.Fd()), opts)
+ if err != nil {
+ f.Close()
+ return nil, errors.Wrapf(err,
+ "Cannot acquire directory lock on %q. Another process is using this Badger database.",
+ dirPath)
+ }
+
+ if !readOnly {
+ // Yes, we happily overwrite a pre-existing pid file. We're the
+ // only read-write badger process using this directory.
+ err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666)
+ if err != nil {
+ f.Close()
+ return nil, errors.Wrapf(err,
+ "Cannot write pid file %q", absPidFilePath)
+ }
+ }
+ return &directoryLockGuard{f, absPidFilePath, readOnly}, nil
+}
+
+// Release deletes the pid file and releases our lock on the directory.
+func (guard *directoryLockGuard) release() error {
+ var err error
+ if !guard.readOnly {
+ // It's important that we remove the pid file first.
+ err = os.Remove(guard.path)
+ }
+
+ if closeErr := guard.f.Close(); err == nil {
+ err = closeErr
+ }
+ guard.path = ""
+ guard.f = nil
+
+ return err
+}
+
+// openDir opens a directory for syncing.
+func openDir(path string) (*os.File, error) { return os.Open(path) }
+
+// When you create or delete a file, you have to ensure the directory entry for the file is synced
+// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync,
+// or see https://github.com/coreos/etcd/issues/6368 for an example.)
+func syncDir(dir string) error {
+ f, err := openDir(dir)
+ if err != nil {
+ return errors.Wrapf(err, "While opening directory: %s.", dir)
+ }
+ err = y.FileSync(f)
+ closeErr := f.Close()
+ if err != nil {
+ return errors.Wrapf(err, "While syncing directory: %s.", dir)
+ }
+ return errors.Wrapf(closeErr, "While closing directory: %s.", dir)
+}
diff --git a/vendor/github.com/dgraph-io/badger/dir_windows.go b/vendor/github.com/dgraph-io/badger/dir_windows.go
new file mode 100644
index 000000000..60f982e2c
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/dir_windows.go
@@ -0,0 +1,110 @@
+// +build windows
+
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+// OpenDir opens a directory in windows with write access for syncing.
+import (
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/pkg/errors"
+)
+
+// FILE_ATTRIBUTE_TEMPORARY - A file that is being used for temporary storage.
+// FILE_FLAG_DELETE_ON_CLOSE - The file is to be deleted immediately after all of its handles are
+// closed, which includes the specified handle and any other open or duplicated handles.
+// See: https://docs.microsoft.com/en-us/windows/desktop/FileIO/file-attribute-constants
+// NOTE: Added here to avoid importing golang.org/x/sys/windows
+const (
+ FILE_ATTRIBUTE_TEMPORARY = 0x00000100
+ FILE_FLAG_DELETE_ON_CLOSE = 0x04000000
+)
+
+func openDir(path string) (*os.File, error) {
+ fd, err := openDirWin(path)
+ if err != nil {
+ return nil, err
+ }
+ return os.NewFile(uintptr(fd), path), nil
+}
+
+func openDirWin(path string) (fd syscall.Handle, err error) {
+ if len(path) == 0 {
+ return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return syscall.InvalidHandle, err
+ }
+ access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
+ sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
+ createmode := uint32(syscall.OPEN_EXISTING)
+ fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
+ return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
+}
+
+// DirectoryLockGuard holds a lock on the directory.
+type directoryLockGuard struct {
+ h syscall.Handle
+ path string
+}
+
+// AcquireDirectoryLock acquires exclusive access to a directory.
+func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) {
+ if readOnly {
+ return nil, ErrWindowsNotSupported
+ }
+
+ // Convert to absolute path so that Release still works even if we do an unbalanced
+ // chdir in the meantime.
+ absLockFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
+ if err != nil {
+ return nil, errors.Wrap(err, "Cannot get absolute path for pid lock file")
+ }
+
+ // This call creates a file handler in memory that only one process can use at a time. When
+ // that process ends, the file is deleted by the system.
+ // FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory.
+ // FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete
+ // the file when all processes holding the handler are closed.
+ // XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg.
+ h, err := syscall.CreateFile(
+ syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil,
+ syscall.OPEN_ALWAYS,
+ uint32(FILE_ATTRIBUTE_TEMPORARY|FILE_FLAG_DELETE_ON_CLOSE),
+ 0)
+ if err != nil {
+ return nil, errors.Wrapf(err,
+ "Cannot create lock file %q. Another process is using this Badger database",
+ absLockFilePath)
+ }
+
+ return &directoryLockGuard{h: h, path: absLockFilePath}, nil
+}
+
+// Release removes the directory lock.
+func (g *directoryLockGuard) release() error {
+ g.path = ""
+ return syscall.CloseHandle(g.h)
+}
+
+// Windows doesn't support syncing directories to the file system. See
+// https://github.com/dgraph-io/badger/issues/699#issuecomment-504133587 for more details.
+func syncDir(dir string) error { return nil }
diff --git a/vendor/github.com/dgraph-io/badger/doc.go b/vendor/github.com/dgraph-io/badger/doc.go
new file mode 100644
index 000000000..83dc9a28a
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/doc.go
@@ -0,0 +1,28 @@
+/*
+Package badger implements an embeddable, simple and fast key-value database,
+written in pure Go. It is designed to be highly performant for both reads and
+writes simultaneously. Badger uses Multi-Version Concurrency Control (MVCC), and
+supports transactions. It runs transactions concurrently, with serializable
+snapshot isolation guarantees.
+
+Badger uses an LSM tree along with a value log to separate keys from values,
+hence reducing both write amplification and the size of the LSM tree. This
+allows LSM tree to be served entirely from RAM, while the values are served
+from SSD.
+
+
+Usage
+
+Badger has the following main types: DB, Txn, Item and Iterator. DB contains
+keys that are associated with values. It must be opened with the appropriate
+options before it can be accessed.
+
+All operations happen inside a Txn. Txn represents a transaction, which can
+be read-only or read-write. Read-only transactions can read values for a
+given key (which are returned inside an Item), or iterate over a set of
+key-value pairs using an Iterator (which are returned as Item type values as
+well). Read-write transactions can also update and delete keys from the DB.
+
+See the examples for more usage details.
+*/
+package badger
diff --git a/vendor/github.com/dgraph-io/badger/errors.go b/vendor/github.com/dgraph-io/badger/errors.go
new file mode 100644
index 000000000..8d2df6833
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/errors.go
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "math"
+
+ "github.com/pkg/errors"
+)
+
+const (
+ // ValueThresholdLimit is the maximum permissible value of opt.ValueThreshold.
+ ValueThresholdLimit = math.MaxUint16 - 16 + 1
+)
+
+var (
+ // ErrValueLogSize is returned when opt.ValueLogFileSize option is not within the valid
+ // range.
+ ErrValueLogSize = errors.New("Invalid ValueLogFileSize, must be between 1MB and 2GB")
+
+ // ErrValueThreshold is returned when ValueThreshold is set to a value close to or greater than
+ // uint16.
+ ErrValueThreshold = errors.Errorf(
+ "Invalid ValueThreshold, must be less than %d", ValueThresholdLimit)
+
+ // ErrKeyNotFound is returned when key isn't found on a txn.Get.
+ ErrKeyNotFound = errors.New("Key not found")
+
+ // ErrTxnTooBig is returned if too many writes are fit into a single transaction.
+ ErrTxnTooBig = errors.New("Txn is too big to fit into one request")
+
+ // ErrConflict is returned when a transaction conflicts with another transaction. This can
+ // happen if the read rows had been updated concurrently by another transaction.
+ ErrConflict = errors.New("Transaction Conflict. Please retry")
+
+ // ErrReadOnlyTxn is returned if an update function is called on a read-only transaction.
+ ErrReadOnlyTxn = errors.New("No sets or deletes are allowed in a read-only transaction")
+
+ // ErrDiscardedTxn is returned if a previously discarded transaction is re-used.
+ ErrDiscardedTxn = errors.New("This transaction has been discarded. Create a new one")
+
+ // ErrEmptyKey is returned if an empty key is passed on an update function.
+ ErrEmptyKey = errors.New("Key cannot be empty")
+
+ // ErrInvalidKey is returned if the key has a special !badger! prefix,
+ // reserved for internal usage.
+ ErrInvalidKey = errors.New("Key is using a reserved !badger! prefix")
+
+ // ErrRetry is returned when a log file containing the value is not found.
+ // This usually indicates that it may have been garbage collected, and the
+ // operation needs to be retried.
+ ErrRetry = errors.New("Unable to find log file. Please retry")
+
+ // ErrThresholdZero is returned if threshold is set to zero, and value log GC is called.
+ // In such a case, GC can't be run.
+ ErrThresholdZero = errors.New(
+ "Value log GC can't run because threshold is set to zero")
+
+ // ErrNoRewrite is returned if a call for value log GC doesn't result in a log file rewrite.
+ ErrNoRewrite = errors.New(
+ "Value log GC attempt didn't result in any cleanup")
+
+ // ErrRejected is returned if a value log GC is called either while another GC is running, or
+ // after DB::Close has been called.
+ ErrRejected = errors.New("Value log GC request rejected")
+
+ // ErrInvalidRequest is returned if the user request is invalid.
+ ErrInvalidRequest = errors.New("Invalid request")
+
+ // ErrManagedTxn is returned if the user tries to use an API which isn't
+ // allowed due to external management of transactions, when using ManagedDB.
+ ErrManagedTxn = errors.New(
+ "Invalid API request. Not allowed to perform this action using ManagedDB")
+
+ // ErrInvalidDump if a data dump made previously cannot be loaded into the database.
+ ErrInvalidDump = errors.New("Data dump cannot be read")
+
+ // ErrZeroBandwidth is returned if the user passes in zero bandwidth for sequence.
+ ErrZeroBandwidth = errors.New("Bandwidth must be greater than zero")
+
+ // ErrInvalidLoadingMode is returned when opt.ValueLogLoadingMode option is not
+ // within the valid range
+ ErrInvalidLoadingMode = errors.New("Invalid ValueLogLoadingMode, must be FileIO or MemoryMap")
+
+ // ErrReplayNeeded is returned when opt.ReadOnly is set but the
+ // database requires a value log replay.
+ ErrReplayNeeded = errors.New("Database was not properly closed, cannot open read-only")
+
+ // ErrWindowsNotSupported is returned when opt.ReadOnly is used on Windows
+ ErrWindowsNotSupported = errors.New("Read-only mode is not supported on Windows")
+
+ // ErrTruncateNeeded is returned when the value log gets corrupt, and requires truncation of
+ // corrupt data to allow Badger to run properly.
+ ErrTruncateNeeded = errors.New(
+ "Value log truncate required to run DB. This might result in data loss")
+
+ // ErrBlockedWrites is returned if the user called DropAll. During the process of dropping all
+ // data from Badger, we stop accepting new writes, by returning this error.
+ ErrBlockedWrites = errors.New("Writes are blocked, possibly due to DropAll or Close")
+
+ // ErrNilCallback is returned when subscriber's callback is nil.
+ ErrNilCallback = errors.New("Callback cannot be nil")
+)
diff --git a/vendor/github.com/dgraph-io/badger/go.mod b/vendor/github.com/dgraph-io/badger/go.mod
new file mode 100644
index 000000000..6b49487f0
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/go.mod
@@ -0,0 +1,15 @@
+module github.com/dgraph-io/badger
+
+go 1.12
+
+require (
+ github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9
+ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2
+ github.com/dustin/go-humanize v1.0.0
+ github.com/golang/protobuf v1.3.1
+ github.com/pkg/errors v0.8.1
+ github.com/spf13/cobra v0.0.5
+ github.com/stretchr/testify v1.3.0
+ golang.org/x/net v0.0.0-20190620200207-3b0461eec859
+ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb
+)
diff --git a/vendor/github.com/dgraph-io/badger/go.sum b/vendor/github.com/dgraph-io/badger/go.sum
new file mode 100644
index 000000000..7e32ad171
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/go.sum
@@ -0,0 +1,55 @@
+github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=
+github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/dgraph-io/badger/histogram.go b/vendor/github.com/dgraph-io/badger/histogram.go
new file mode 100644
index 000000000..d8c94bb7a
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/histogram.go
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "fmt"
+ "math"
+)
+
+// PrintHistogram builds and displays the key-value size histogram.
+// When keyPrefix is set, only the keys that have prefix "keyPrefix" are
+// considered for creating the histogram
+func (db *DB) PrintHistogram(keyPrefix []byte) {
+ if db == nil {
+ fmt.Println("\nCannot build histogram: DB is nil.")
+ return
+ }
+ histogram := db.buildHistogram(keyPrefix)
+ fmt.Printf("Histogram of key sizes (in bytes)\n")
+ histogram.keySizeHistogram.printHistogram()
+ fmt.Printf("Histogram of value sizes (in bytes)\n")
+ histogram.valueSizeHistogram.printHistogram()
+}
+
+// histogramData stores information about a histogram
+type histogramData struct {
+ bins []int64
+ countPerBin []int64
+ totalCount int64
+ min int64
+ max int64
+ sum int64
+}
+
+// sizeHistogram contains keySize histogram and valueSize histogram
+type sizeHistogram struct {
+ keySizeHistogram, valueSizeHistogram histogramData
+}
+
+// newSizeHistogram returns a new instance of keyValueSizeHistogram with
+// properly initialized fields.
+func newSizeHistogram() *sizeHistogram {
+ // TODO(ibrahim): find appropriate bin size.
+ keyBins := createHistogramBins(1, 16)
+ valueBins := createHistogramBins(1, 30)
+ return &sizeHistogram{
+ keySizeHistogram: histogramData{
+ bins: keyBins,
+ countPerBin: make([]int64, len(keyBins)+1),
+ max: math.MinInt64,
+ min: math.MaxInt64,
+ sum: 0,
+ },
+ valueSizeHistogram: histogramData{
+ bins: valueBins,
+ countPerBin: make([]int64, len(valueBins)+1),
+ max: math.MinInt64,
+ min: math.MaxInt64,
+ sum: 0,
+ },
+ }
+}
+
+// createHistogramBins creates bins for an histogram. The bin sizes are powers
+// of two of the form [2^min_exponent, ..., 2^max_exponent].
+func createHistogramBins(minExponent, maxExponent uint32) []int64 {
+ var bins []int64
+ for i := minExponent; i <= maxExponent; i++ {
+ bins = append(bins, int64(1)< histogram.max {
+ histogram.max = value
+ }
+ if value < histogram.min {
+ histogram.min = value
+ }
+
+ histogram.sum += value
+ histogram.totalCount++
+
+ for index := 0; index <= len(histogram.bins); index++ {
+ // Allocate value in the last buckets if we reached the end of the Bounds array.
+ if index == len(histogram.bins) {
+ histogram.countPerBin[index]++
+ break
+ }
+
+ // Check if the value should be added to the "index" bin
+ if value < int64(histogram.bins[index]) {
+ histogram.countPerBin[index]++
+ break
+ }
+ }
+}
+
+// buildHistogram builds the key-value size histogram.
+// When keyPrefix is set, only the keys that have prefix "keyPrefix" are
+// considered for creating the histogram
+func (db *DB) buildHistogram(keyPrefix []byte) *sizeHistogram {
+ txn := db.NewTransaction(false)
+ defer txn.Discard()
+
+ itr := txn.NewIterator(DefaultIteratorOptions)
+ defer itr.Close()
+
+ badgerHistogram := newSizeHistogram()
+
+ // Collect key and value sizes.
+ for itr.Seek(keyPrefix); itr.ValidForPrefix(keyPrefix); itr.Next() {
+ item := itr.Item()
+ badgerHistogram.keySizeHistogram.Update(item.KeySize())
+ badgerHistogram.valueSizeHistogram.Update(item.ValueSize())
+ }
+ return badgerHistogram
+}
+
+// printHistogram prints the histogram data in a human-readable format.
+func (histogram histogramData) printHistogram() {
+ fmt.Printf("Total count: %d\n", histogram.totalCount)
+ fmt.Printf("Min value: %d\n", histogram.min)
+ fmt.Printf("Max value: %d\n", histogram.max)
+ fmt.Printf("Mean: %.2f\n", float64(histogram.sum)/float64(histogram.totalCount))
+ fmt.Printf("%24s %9s\n", "Range", "Count")
+
+ numBins := len(histogram.bins)
+ for index, count := range histogram.countPerBin {
+ if count == 0 {
+ continue
+ }
+
+ // The last bin represents the bin that contains the range from
+ // the last bin up to infinity so it's processed differently than the
+ // other bins.
+ if index == len(histogram.countPerBin)-1 {
+ lowerBound := int(histogram.bins[numBins-1])
+ fmt.Printf("[%10d, %10s) %9d\n", lowerBound, "infinity", count)
+ continue
+ }
+
+ upperBound := int(histogram.bins[index])
+ lowerBound := 0
+ if index > 0 {
+ lowerBound = int(histogram.bins[index-1])
+ }
+
+ fmt.Printf("[%10d, %10d) %9d\n", lowerBound, upperBound, count)
+ }
+ fmt.Println()
+}
diff --git a/vendor/github.com/dgraph-io/badger/iterator.go b/vendor/github.com/dgraph-io/badger/iterator.go
new file mode 100644
index 000000000..f4af4058d
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/iterator.go
@@ -0,0 +1,684 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "bytes"
+ "fmt"
+ "hash/crc32"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/dgraph-io/badger/options"
+ "github.com/dgraph-io/badger/table"
+
+ "github.com/dgraph-io/badger/y"
+)
+
+type prefetchStatus uint8
+
+const (
+ prefetched prefetchStatus = iota + 1
+)
+
+// Item is returned during iteration. Both the Key() and Value() output is only valid until
+// iterator.Next() is called.
+type Item struct {
+ status prefetchStatus
+ err error
+ wg sync.WaitGroup
+ db *DB
+ key []byte
+ vptr []byte
+ meta byte // We need to store meta to know about bitValuePointer.
+ userMeta byte
+ expiresAt uint64
+ val []byte
+ slice *y.Slice // Used only during prefetching.
+ next *Item
+ version uint64
+ txn *Txn
+}
+
+// String returns a string representation of Item
+func (item *Item) String() string {
+ return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta)
+}
+
+// Key returns the key.
+//
+// Key is only valid as long as item is valid, or transaction is valid. If you need to use it
+// outside its validity, please use KeyCopy.
+func (item *Item) Key() []byte {
+ return item.key
+}
+
+// KeyCopy returns a copy of the key of the item, writing it to dst slice.
+// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and
+// returned.
+func (item *Item) KeyCopy(dst []byte) []byte {
+ return y.SafeCopy(dst, item.key)
+}
+
+// Version returns the commit timestamp of the item.
+func (item *Item) Version() uint64 {
+ return item.version
+}
+
+// Value retrieves the value of the item from the value log.
+//
+// This method must be called within a transaction. Calling it outside a
+// transaction is considered undefined behavior. If an iterator is being used,
+// then Item.Value() is defined in the current iteration only, because items are
+// reused.
+//
+// If you need to use a value outside a transaction, please use Item.ValueCopy
+// instead, or copy it yourself. Value might change once discard or commit is called.
+// Use ValueCopy if you want to do a Set after Get.
+func (item *Item) Value(fn func(val []byte) error) error {
+ item.wg.Wait()
+ if item.status == prefetched {
+ if item.err == nil && fn != nil {
+ if err := fn(item.val); err != nil {
+ return err
+ }
+ }
+ return item.err
+ }
+ buf, cb, err := item.yieldItemValue()
+ defer runCallback(cb)
+ if err != nil {
+ return err
+ }
+ if fn != nil {
+ return fn(buf)
+ }
+ return nil
+}
+
+// ValueCopy returns a copy of the value of the item from the value log, writing it to dst slice.
+// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and
+// returned. Tip: It might make sense to reuse the returned slice as dst argument for the next call.
+//
+// This function is useful in long running iterate/update transactions to avoid a write deadlock.
+// See Github issue: https://github.com/dgraph-io/badger/issues/315
+func (item *Item) ValueCopy(dst []byte) ([]byte, error) {
+ item.wg.Wait()
+ if item.status == prefetched {
+ return y.SafeCopy(dst, item.val), item.err
+ }
+ buf, cb, err := item.yieldItemValue()
+ defer runCallback(cb)
+ return y.SafeCopy(dst, buf), err
+}
+
+func (item *Item) hasValue() bool {
+ if item.meta == 0 && item.vptr == nil {
+ // key not found
+ return false
+ }
+ return true
+}
+
+// IsDeletedOrExpired returns true if item contains deleted or expired value.
+func (item *Item) IsDeletedOrExpired() bool {
+ return isDeletedOrExpired(item.meta, item.expiresAt)
+}
+
+// DiscardEarlierVersions returns whether the item was created with the
+// option to discard earlier versions of a key when multiple are available.
+func (item *Item) DiscardEarlierVersions() bool {
+ return item.meta&bitDiscardEarlierVersions > 0
+}
+
+func (item *Item) yieldItemValue() ([]byte, func(), error) {
+ key := item.Key() // No need to copy.
+ for {
+ if !item.hasValue() {
+ return nil, nil, nil
+ }
+
+ if item.slice == nil {
+ item.slice = new(y.Slice)
+ }
+
+ if (item.meta & bitValuePointer) == 0 {
+ val := item.slice.Resize(len(item.vptr))
+ copy(val, item.vptr)
+ return val, nil, nil
+ }
+
+ var vp valuePointer
+ vp.Decode(item.vptr)
+ result, cb, err := item.db.vlog.Read(vp, item.slice)
+ if err != ErrRetry {
+ return result, cb, err
+ }
+ if bytes.HasPrefix(key, badgerMove) {
+ // err == ErrRetry
+ // Error is retry even after checking the move keyspace. So, let's
+ // just assume that value is not present.
+ return nil, cb, nil
+ }
+
+ // The value pointer is pointing to a deleted value log. Look for the
+ // move key and read that instead.
+ runCallback(cb)
+ // Do not put badgerMove on the left in append. It seems to cause some sort of manipulation.
+ keyTs := y.KeyWithTs(item.Key(), item.Version())
+ key = make([]byte, len(badgerMove)+len(keyTs))
+ n := copy(key, badgerMove)
+ copy(key[n:], keyTs)
+ // Note that we can't set item.key to move key, because that would
+ // change the key user sees before and after this call. Also, this move
+ // logic is internal logic and should not impact the external behavior
+ // of the retrieval.
+ vs, err := item.db.get(key)
+ if err != nil {
+ return nil, nil, err
+ }
+ if vs.Version != item.Version() {
+ return nil, nil, nil
+ }
+ // Bug fix: Always copy the vs.Value into vptr here. Otherwise, when item is reused this
+ // slice gets overwritten.
+ item.vptr = y.SafeCopy(item.vptr, vs.Value)
+ item.meta &^= bitValuePointer // Clear the value pointer bit.
+ if vs.Meta&bitValuePointer > 0 {
+ item.meta |= bitValuePointer // This meta would only be about value pointer.
+ }
+ }
+}
+
+func runCallback(cb func()) {
+ if cb != nil {
+ cb()
+ }
+}
+
+func (item *Item) prefetchValue() {
+ val, cb, err := item.yieldItemValue()
+ defer runCallback(cb)
+
+ item.err = err
+ item.status = prefetched
+ if val == nil {
+ return
+ }
+ if item.db.opt.ValueLogLoadingMode == options.MemoryMap {
+ buf := item.slice.Resize(len(val))
+ copy(buf, val)
+ item.val = buf
+ } else {
+ item.val = val
+ }
+}
+
+// EstimatedSize returns the approximate size of the key-value pair.
+//
+// This can be called while iterating through a store to quickly estimate the
+// size of a range of key-value pairs (without fetching the corresponding
+// values).
+func (item *Item) EstimatedSize() int64 {
+ if !item.hasValue() {
+ return 0
+ }
+ if (item.meta & bitValuePointer) == 0 {
+ return int64(len(item.key) + len(item.vptr))
+ }
+ var vp valuePointer
+ vp.Decode(item.vptr)
+ return int64(vp.Len) // includes key length.
+}
+
+// KeySize returns the size of the key.
+// Exact size of the key is key + 8 bytes of timestamp
+func (item *Item) KeySize() int64 {
+ return int64(len(item.key))
+}
+
+// ValueSize returns the exact size of the value.
+//
+// This can be called to quickly estimate the size of a value without fetching
+// it.
+func (item *Item) ValueSize() int64 {
+ if !item.hasValue() {
+ return 0
+ }
+ if (item.meta & bitValuePointer) == 0 {
+ return int64(len(item.vptr))
+ }
+ var vp valuePointer
+ vp.Decode(item.vptr)
+
+ klen := int64(len(item.key) + 8) // 8 bytes for timestamp.
+ return int64(vp.Len) - klen - headerBufSize - crc32.Size
+}
+
+// UserMeta returns the userMeta set by the user. Typically, this byte, optionally set by the user
+// is used to interpret the value.
+func (item *Item) UserMeta() byte {
+ return item.userMeta
+}
+
+// ExpiresAt returns a Unix time value indicating when the item will be
+// considered expired. 0 indicates that the item will never expire.
+func (item *Item) ExpiresAt() uint64 {
+ return item.expiresAt
+}
+
+// TODO: Switch this to use linked list container in Go.
+type list struct {
+ head *Item
+ tail *Item
+}
+
+func (l *list) push(i *Item) {
+ i.next = nil
+ if l.tail == nil {
+ l.head = i
+ l.tail = i
+ return
+ }
+ l.tail.next = i
+ l.tail = i
+}
+
+func (l *list) pop() *Item {
+ if l.head == nil {
+ return nil
+ }
+ i := l.head
+ if l.head == l.tail {
+ l.tail = nil
+ l.head = nil
+ } else {
+ l.head = i.next
+ }
+ i.next = nil
+ return i
+}
+
+// IteratorOptions is used to set options when iterating over Badger key-value
+// stores.
+//
+// This package provides DefaultIteratorOptions which contains options that
+// should work for most applications. Consider using that as a starting point
+// before customizing it for your own needs.
+type IteratorOptions struct {
+ // Indicates whether we should prefetch values during iteration and store them.
+ PrefetchValues bool
+ // How many KV pairs to prefetch while iterating. Valid only if PrefetchValues is true.
+ PrefetchSize int
+ Reverse bool // Direction of iteration. False is forward, true is backward.
+ AllVersions bool // Fetch all valid versions of the same key.
+
+ // The following option is used to narrow down the SSTables that iterator picks up. If
+ // Prefix is specified, only tables which could have this prefix are picked based on their range
+ // of keys.
+ Prefix []byte // Only iterate over this given prefix.
+ prefixIsKey bool // If set, use the prefix for bloom filter lookup.
+
+ InternalAccess bool // Used to allow internal access to badger keys.
+}
+
+func (opt *IteratorOptions) pickTable(t table.TableInterface) bool {
+ if len(opt.Prefix) == 0 {
+ return true
+ }
+ trim := func(key []byte) []byte {
+ if len(key) > len(opt.Prefix) {
+ return key[:len(opt.Prefix)]
+ }
+ return key
+ }
+ if bytes.Compare(trim(t.Smallest()), opt.Prefix) > 0 {
+ return false
+ }
+ if bytes.Compare(trim(t.Biggest()), opt.Prefix) < 0 {
+ return false
+ }
+ // Bloom filter lookup would only work if opt.Prefix does NOT have the read
+ // timestamp as part of the key.
+ if opt.prefixIsKey && t.DoesNotHave(opt.Prefix) {
+ return false
+ }
+ return true
+}
+
+// DefaultIteratorOptions contains default options when iterating over Badger key-value stores.
+var DefaultIteratorOptions = IteratorOptions{
+ PrefetchValues: true,
+ PrefetchSize: 100,
+ Reverse: false,
+ AllVersions: false,
+}
+
+// Iterator helps iterating over the KV pairs in a lexicographically sorted order.
+type Iterator struct {
+ iitr *y.MergeIterator
+ txn *Txn
+ readTs uint64
+
+ opt IteratorOptions
+ item *Item
+ data list
+ waste list
+
+ lastKey []byte // Used to skip over multiple versions of the same key.
+
+ closed bool
+}
+
+// NewIterator returns a new iterator. Depending upon the options, either only keys, or both
+// key-value pairs would be fetched. The keys are returned in lexicographically sorted order.
+// Using prefetch is recommended if you're doing a long running iteration, for performance.
+//
+// Multiple Iterators:
+// For a read-only txn, multiple iterators can be running simultaneously. However, for a read-write
+// txn, only one can be running at one time to avoid race conditions, because Txn is thread-unsafe.
+func (txn *Txn) NewIterator(opt IteratorOptions) *Iterator {
+ if txn.discarded {
+ panic("Transaction has already been discarded")
+ }
+ // Do not change the order of the next if. We must track the number of running iterators.
+ if atomic.AddInt32(&txn.numIterators, 1) > 1 && txn.update {
+ atomic.AddInt32(&txn.numIterators, -1)
+ panic("Only one iterator can be active at one time, for a RW txn.")
+ }
+
+ // TODO: If Prefix is set, only pick those memtables which have keys with
+ // the prefix.
+ tables, decr := txn.db.getMemTables()
+ defer decr()
+ txn.db.vlog.incrIteratorCount()
+ var iters []y.Iterator
+ if itr := txn.newPendingWritesIterator(opt.Reverse); itr != nil {
+ iters = append(iters, itr)
+ }
+ for i := 0; i < len(tables); i++ {
+ iters = append(iters, tables[i].NewUniIterator(opt.Reverse))
+ }
+ iters = txn.db.lc.appendIterators(iters, &opt) // This will increment references.
+ res := &Iterator{
+ txn: txn,
+ iitr: y.NewMergeIterator(iters, opt.Reverse),
+ opt: opt,
+ readTs: txn.readTs,
+ }
+ return res
+}
+
+// NewKeyIterator is just like NewIterator, but allows the user to iterate over all versions of a
+// single key. Internally, it sets the Prefix option in provided opt, and uses that prefix to
+// additionally run bloom filter lookups before picking tables from the LSM tree.
+func (txn *Txn) NewKeyIterator(key []byte, opt IteratorOptions) *Iterator {
+ if len(opt.Prefix) > 0 {
+ panic("opt.Prefix should be nil for NewKeyIterator.")
+ }
+ opt.Prefix = key // This key must be without the timestamp.
+ opt.prefixIsKey = true
+ return txn.NewIterator(opt)
+}
+
+func (it *Iterator) newItem() *Item {
+ item := it.waste.pop()
+ if item == nil {
+ item = &Item{slice: new(y.Slice), db: it.txn.db, txn: it.txn}
+ }
+ return item
+}
+
+// Item returns pointer to the current key-value pair.
+// This item is only valid until it.Next() gets called.
+func (it *Iterator) Item() *Item {
+ tx := it.txn
+ tx.addReadKey(it.item.Key())
+ return it.item
+}
+
+// Valid returns false when iteration is done.
+func (it *Iterator) Valid() bool {
+ if it.item == nil {
+ return false
+ }
+ return bytes.HasPrefix(it.item.key, it.opt.Prefix)
+}
+
+// ValidForPrefix returns false when iteration is done
+// or when the current key is not prefixed by the specified prefix.
+func (it *Iterator) ValidForPrefix(prefix []byte) bool {
+ return it.Valid() && bytes.HasPrefix(it.item.key, prefix)
+}
+
+// Close would close the iterator. It is important to call this when you're done with iteration.
+func (it *Iterator) Close() {
+ if it.closed {
+ return
+ }
+ it.closed = true
+
+ it.iitr.Close()
+ // It is important to wait for the fill goroutines to finish. Otherwise, we might leave zombie
+ // goroutines behind, which are waiting to acquire file read locks after DB has been closed.
+ waitFor := func(l list) {
+ item := l.pop()
+ for item != nil {
+ item.wg.Wait()
+ item = l.pop()
+ }
+ }
+ waitFor(it.waste)
+ waitFor(it.data)
+
+ // TODO: We could handle this error.
+ _ = it.txn.db.vlog.decrIteratorCount()
+ atomic.AddInt32(&it.txn.numIterators, -1)
+}
+
+// Next would advance the iterator by one. Always check it.Valid() after a Next()
+// to ensure you have access to a valid it.Item().
+func (it *Iterator) Next() {
+ // Reuse current item
+ it.item.wg.Wait() // Just cleaner to wait before pushing to avoid doing ref counting.
+ it.waste.push(it.item)
+
+ // Set next item to current
+ it.item = it.data.pop()
+
+ for it.iitr.Valid() {
+ if it.parseItem() {
+ // parseItem calls one extra next.
+ // This is used to deal with the complexity of reverse iteration.
+ break
+ }
+ }
+}
+
+func isDeletedOrExpired(meta byte, expiresAt uint64) bool {
+ if meta&bitDelete > 0 {
+ return true
+ }
+ if expiresAt == 0 {
+ return false
+ }
+ return expiresAt <= uint64(time.Now().Unix())
+}
+
+// parseItem is a complex function because it needs to handle both forward and reverse iteration
+// implementation. We store keys such that their versions are sorted in descending order. This makes
+// forward iteration efficient, but revese iteration complicated. This tradeoff is better because
+// forward iteration is more common than reverse.
+//
+// This function advances the iterator.
+func (it *Iterator) parseItem() bool {
+ mi := it.iitr
+ key := mi.Key()
+
+ setItem := func(item *Item) {
+ if it.item == nil {
+ it.item = item
+ } else {
+ it.data.push(item)
+ }
+ }
+
+ // Skip badger keys.
+ if !it.opt.InternalAccess && bytes.HasPrefix(key, badgerPrefix) {
+ mi.Next()
+ return false
+ }
+
+ // Skip any versions which are beyond the readTs.
+ version := y.ParseTs(key)
+ if version > it.readTs {
+ mi.Next()
+ return false
+ }
+
+ if it.opt.AllVersions {
+ // Return deleted or expired values also, otherwise user can't figure out
+ // whether the key was deleted.
+ item := it.newItem()
+ it.fill(item)
+ setItem(item)
+ mi.Next()
+ return true
+ }
+
+ // If iterating in forward direction, then just checking the last key against current key would
+ // be sufficient.
+ if !it.opt.Reverse {
+ if y.SameKey(it.lastKey, key) {
+ mi.Next()
+ return false
+ }
+ // Only track in forward direction.
+ // We should update lastKey as soon as we find a different key in our snapshot.
+ // Consider keys: a 5, b 7 (del), b 5. When iterating, lastKey = a.
+ // Then we see b 7, which is deleted. If we don't store lastKey = b, we'll then return b 5,
+ // which is wrong. Therefore, update lastKey here.
+ it.lastKey = y.SafeCopy(it.lastKey, mi.Key())
+ }
+
+FILL:
+ // If deleted, advance and return.
+ vs := mi.Value()
+ if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
+ mi.Next()
+ return false
+ }
+
+ item := it.newItem()
+ it.fill(item)
+ // fill item based on current cursor position. All Next calls have returned, so reaching here
+ // means no Next was called.
+
+ mi.Next() // Advance but no fill item yet.
+ if !it.opt.Reverse || !mi.Valid() { // Forward direction, or invalid.
+ setItem(item)
+ return true
+ }
+
+ // Reverse direction.
+ nextTs := y.ParseTs(mi.Key())
+ mik := y.ParseKey(mi.Key())
+ if nextTs <= it.readTs && bytes.Equal(mik, item.key) {
+ // This is a valid potential candidate.
+ goto FILL
+ }
+ // Ignore the next candidate. Return the current one.
+ setItem(item)
+ return true
+}
+
+func (it *Iterator) fill(item *Item) {
+ vs := it.iitr.Value()
+ item.meta = vs.Meta
+ item.userMeta = vs.UserMeta
+ item.expiresAt = vs.ExpiresAt
+
+ item.version = y.ParseTs(it.iitr.Key())
+ item.key = y.SafeCopy(item.key, y.ParseKey(it.iitr.Key()))
+
+ item.vptr = y.SafeCopy(item.vptr, vs.Value)
+ item.val = nil
+ if it.opt.PrefetchValues {
+ item.wg.Add(1)
+ go func() {
+ // FIXME we are not handling errors here.
+ item.prefetchValue()
+ item.wg.Done()
+ }()
+ }
+}
+
+func (it *Iterator) prefetch() {
+ prefetchSize := 2
+ if it.opt.PrefetchValues && it.opt.PrefetchSize > 1 {
+ prefetchSize = it.opt.PrefetchSize
+ }
+
+ i := it.iitr
+ var count int
+ it.item = nil
+ for i.Valid() {
+ if !it.parseItem() {
+ continue
+ }
+ count++
+ if count == prefetchSize {
+ break
+ }
+ }
+}
+
+// Seek would seek to the provided key if present. If absent, it would seek to the next
+// smallest key greater than the provided key if iterating in the forward direction.
+// Behavior would be reversed if iterating backwards.
+func (it *Iterator) Seek(key []byte) {
+ for i := it.data.pop(); i != nil; i = it.data.pop() {
+ i.wg.Wait()
+ it.waste.push(i)
+ }
+
+ it.lastKey = it.lastKey[:0]
+ if len(key) == 0 {
+ key = it.opt.Prefix
+ }
+ if len(key) == 0 {
+ it.iitr.Rewind()
+ it.prefetch()
+ return
+ }
+
+ if !it.opt.Reverse {
+ key = y.KeyWithTs(key, it.txn.readTs)
+ } else {
+ key = y.KeyWithTs(key, 0)
+ }
+ it.iitr.Seek(key)
+ it.prefetch()
+}
+
+// Rewind would rewind the iterator cursor all the way to zero-th position, which would be the
+// smallest key if iterating forward, and largest if iterating backward. It does not keep track of
+// whether the cursor started with a Seek().
+func (it *Iterator) Rewind() {
+ it.Seek(nil)
+}
diff --git a/vendor/github.com/dgraph-io/badger/level_handler.go b/vendor/github.com/dgraph-io/badger/level_handler.go
new file mode 100644
index 000000000..147967fb8
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/level_handler.go
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/dgraph-io/badger/table"
+ "github.com/dgraph-io/badger/y"
+ "github.com/pkg/errors"
+)
+
+type levelHandler struct {
+ // Guards tables, totalSize.
+ sync.RWMutex
+
+ // For level >= 1, tables are sorted by key ranges, which do not overlap.
+ // For level 0, tables are sorted by time.
+ // For level 0, newest table are at the back. Compact the oldest one first, which is at the front.
+ tables []*table.Table
+ totalSize int64
+
+ // The following are initialized once and const.
+ level int
+ strLevel string
+ maxTotalSize int64
+ db *DB
+}
+
+func (s *levelHandler) getTotalSize() int64 {
+ s.RLock()
+ defer s.RUnlock()
+ return s.totalSize
+}
+
+// initTables replaces s.tables with given tables. This is done during loading.
+func (s *levelHandler) initTables(tables []*table.Table) {
+ s.Lock()
+ defer s.Unlock()
+
+ s.tables = tables
+ s.totalSize = 0
+ for _, t := range tables {
+ s.totalSize += t.Size()
+ }
+
+ if s.level == 0 {
+ // Key range will overlap. Just sort by fileID in ascending order
+ // because newer tables are at the end of level 0.
+ sort.Slice(s.tables, func(i, j int) bool {
+ return s.tables[i].ID() < s.tables[j].ID()
+ })
+ } else {
+ // Sort tables by keys.
+ sort.Slice(s.tables, func(i, j int) bool {
+ return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0
+ })
+ }
+}
+
+// deleteTables remove tables idx0, ..., idx1-1.
+func (s *levelHandler) deleteTables(toDel []*table.Table) error {
+ s.Lock() // s.Unlock() below
+
+ toDelMap := make(map[uint64]struct{})
+ for _, t := range toDel {
+ toDelMap[t.ID()] = struct{}{}
+ }
+
+ // Make a copy as iterators might be keeping a slice of tables.
+ var newTables []*table.Table
+ for _, t := range s.tables {
+ _, found := toDelMap[t.ID()]
+ if !found {
+ newTables = append(newTables, t)
+ continue
+ }
+ s.totalSize -= t.Size()
+ }
+ s.tables = newTables
+
+ s.Unlock() // Unlock s _before_ we DecrRef our tables, which can be slow.
+
+ return decrRefs(toDel)
+}
+
+// replaceTables will replace tables[left:right] with newTables. Note this EXCLUDES tables[right].
+// You must call decr() to delete the old tables _after_ writing the update to the manifest.
+func (s *levelHandler) replaceTables(toDel, toAdd []*table.Table) error {
+ // Need to re-search the range of tables in this level to be replaced as other goroutines might
+ // be changing it as well. (They can't touch our tables, but if they add/remove other tables,
+ // the indices get shifted around.)
+ s.Lock() // We s.Unlock() below.
+
+ toDelMap := make(map[uint64]struct{})
+ for _, t := range toDel {
+ toDelMap[t.ID()] = struct{}{}
+ }
+ var newTables []*table.Table
+ for _, t := range s.tables {
+ _, found := toDelMap[t.ID()]
+ if !found {
+ newTables = append(newTables, t)
+ continue
+ }
+ s.totalSize -= t.Size()
+ }
+
+ // Increase totalSize first.
+ for _, t := range toAdd {
+ s.totalSize += t.Size()
+ t.IncrRef()
+ newTables = append(newTables, t)
+ }
+
+ // Assign tables.
+ s.tables = newTables
+ sort.Slice(s.tables, func(i, j int) bool {
+ return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0
+ })
+ s.Unlock() // s.Unlock before we DecrRef tables -- that can be slow.
+ return decrRefs(toDel)
+}
+
+func decrRefs(tables []*table.Table) error {
+ for _, table := range tables {
+ if err := table.DecrRef(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func newLevelHandler(db *DB, level int) *levelHandler {
+ return &levelHandler{
+ level: level,
+ strLevel: fmt.Sprintf("l%d", level),
+ db: db,
+ }
+}
+
+// tryAddLevel0Table returns true if ok and no stalling.
+func (s *levelHandler) tryAddLevel0Table(t *table.Table) bool {
+ y.AssertTrue(s.level == 0)
+ // Need lock as we may be deleting the first table during a level 0 compaction.
+ s.Lock()
+ defer s.Unlock()
+ if len(s.tables) >= s.db.opt.NumLevelZeroTablesStall {
+ return false
+ }
+
+ s.tables = append(s.tables, t)
+ t.IncrRef()
+ s.totalSize += t.Size()
+
+ return true
+}
+
+func (s *levelHandler) numTables() int {
+ s.RLock()
+ defer s.RUnlock()
+ return len(s.tables)
+}
+
+func (s *levelHandler) close() error {
+ s.RLock()
+ defer s.RUnlock()
+ var err error
+ for _, t := range s.tables {
+ if closeErr := t.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ }
+ return errors.Wrap(err, "levelHandler.close")
+}
+
+// getTableForKey acquires a read-lock to access s.tables. It returns a list of tableHandlers.
+func (s *levelHandler) getTableForKey(key []byte) ([]*table.Table, func() error) {
+ s.RLock()
+ defer s.RUnlock()
+
+ if s.level == 0 {
+ // For level 0, we need to check every table. Remember to make a copy as s.tables may change
+ // once we exit this function, and we don't want to lock s.tables while seeking in tables.
+ // CAUTION: Reverse the tables.
+ out := make([]*table.Table, 0, len(s.tables))
+ for i := len(s.tables) - 1; i >= 0; i-- {
+ out = append(out, s.tables[i])
+ s.tables[i].IncrRef()
+ }
+ return out, func() error {
+ for _, t := range out {
+ if err := t.DecrRef(); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ }
+ // For level >= 1, we can do a binary search as key range does not overlap.
+ idx := sort.Search(len(s.tables), func(i int) bool {
+ return y.CompareKeys(s.tables[i].Biggest(), key) >= 0
+ })
+ if idx >= len(s.tables) {
+ // Given key is strictly > than every element we have.
+ return nil, func() error { return nil }
+ }
+ tbl := s.tables[idx]
+ tbl.IncrRef()
+ return []*table.Table{tbl}, tbl.DecrRef
+}
+
+// get returns value for a given key or the key after that. If not found, return nil.
+func (s *levelHandler) get(key []byte) (y.ValueStruct, error) {
+ tables, decr := s.getTableForKey(key)
+ keyNoTs := y.ParseKey(key)
+
+ var maxVs y.ValueStruct
+ for _, th := range tables {
+ if th.DoesNotHave(keyNoTs) {
+ y.NumLSMBloomHits.Add(s.strLevel, 1)
+ continue
+ }
+
+ it := th.NewIterator(false)
+ defer it.Close()
+
+ y.NumLSMGets.Add(s.strLevel, 1)
+ it.Seek(key)
+ if !it.Valid() {
+ continue
+ }
+ if y.SameKey(key, it.Key()) {
+ if version := y.ParseTs(it.Key()); maxVs.Version < version {
+ maxVs = it.Value()
+ maxVs.Version = version
+ }
+ }
+ }
+ return maxVs, decr()
+}
+
+// appendIterators appends iterators to an array of iterators, for merging.
+// Note: This obtains references for the table handlers. Remember to close these iterators.
+func (s *levelHandler) appendIterators(iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
+ s.RLock()
+ defer s.RUnlock()
+
+ tables := make([]*table.Table, 0, len(s.tables))
+ for _, t := range s.tables {
+ if opt.pickTable(t) {
+ tables = append(tables, t)
+ }
+ }
+ if len(tables) == 0 {
+ return iters
+ }
+
+ if s.level == 0 {
+ // Remember to add in reverse order!
+ // The newer table at the end of s.tables should be added first as it takes precedence.
+ return appendIteratorsReversed(iters, tables, opt.Reverse)
+ }
+ return append(iters, table.NewConcatIterator(tables, opt.Reverse))
+}
+
+type levelHandlerRLocked struct{}
+
+// overlappingTables returns the tables that intersect with key range. Returns a half-interval.
+// This function should already have acquired a read lock, and this is so important the caller must
+// pass an empty parameter declaring such.
+func (s *levelHandler) overlappingTables(_ levelHandlerRLocked, kr keyRange) (int, int) {
+ if len(kr.left) == 0 || len(kr.right) == 0 {
+ return 0, 0
+ }
+ left := sort.Search(len(s.tables), func(i int) bool {
+ return y.CompareKeys(kr.left, s.tables[i].Biggest()) <= 0
+ })
+ right := sort.Search(len(s.tables), func(i int) bool {
+ return y.CompareKeys(kr.right, s.tables[i].Smallest()) < 0
+ })
+ return left, right
+}
diff --git a/vendor/github.com/dgraph-io/badger/levels.go b/vendor/github.com/dgraph-io/badger/levels.go
new file mode 100644
index 000000000..a4efd6624
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/levels.go
@@ -0,0 +1,989 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "math/rand"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/trace"
+
+ "github.com/dgraph-io/badger/pb"
+ "github.com/dgraph-io/badger/table"
+ "github.com/dgraph-io/badger/y"
+ "github.com/pkg/errors"
+)
+
+type levelsController struct {
+ nextFileID uint64 // Atomic
+ elog trace.EventLog
+
+ // The following are initialized once and const.
+ levels []*levelHandler
+ kv *DB
+
+ cstatus compactStatus
+}
+
+var (
+ // This is for getting timings between stalls.
+ lastUnstalled time.Time
+)
+
+// revertToManifest checks that all necessary table files exist and removes all table files not
+// referenced by the manifest. idMap is a set of table file id's that were read from the directory
+// listing.
+func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {
+ // 1. Check all files in manifest exist.
+ for id := range mf.Tables {
+ if _, ok := idMap[id]; !ok {
+ return fmt.Errorf("file does not exist for table %d", id)
+ }
+ }
+
+ // 2. Delete files that shouldn't exist.
+ for id := range idMap {
+ if _, ok := mf.Tables[id]; !ok {
+ kv.elog.Printf("Table file %d not referenced in MANIFEST\n", id)
+ filename := table.NewFilename(id, kv.opt.Dir)
+ if err := os.Remove(filename); err != nil {
+ return y.Wrapf(err, "While removing table %d", id)
+ }
+ }
+ }
+
+ return nil
+}
+
+func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) {
+ y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables)
+ s := &levelsController{
+ kv: db,
+ elog: db.elog,
+ levels: make([]*levelHandler, db.opt.MaxLevels),
+ }
+ s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels)
+
+ for i := 0; i < db.opt.MaxLevels; i++ {
+ s.levels[i] = newLevelHandler(db, i)
+ if i == 0 {
+ // Do nothing.
+ } else if i == 1 {
+ // Level 1 probably shouldn't be too much bigger than level 0.
+ s.levels[i].maxTotalSize = db.opt.LevelOneSize
+ } else {
+ s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier)
+ }
+ s.cstatus.levels[i] = new(levelCompactStatus)
+ }
+
+ // Compare manifest against directory, check for existent/non-existent files, and remove.
+ if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil {
+ return nil, err
+ }
+
+ // Some files may be deleted. Let's reload.
+ var flags uint32 = y.Sync
+ if db.opt.ReadOnly {
+ flags |= y.ReadOnly
+ }
+
+ var mu sync.Mutex
+ tables := make([][]*table.Table, db.opt.MaxLevels)
+ var maxFileID uint64
+
+ // We found that using 3 goroutines allows disk throughput to be utilized to its max.
+ // Disk utilization is the main thing we should focus on, while trying to read the data. That's
+ // the one factor that remains constant between HDD and SSD.
+ throttle := y.NewThrottle(3)
+
+ start := time.Now()
+ var numOpened int32
+ tick := time.NewTicker(3 * time.Second)
+ defer tick.Stop()
+
+ for fileID, tf := range mf.Tables {
+ fname := table.NewFilename(fileID, db.opt.Dir)
+ select {
+ case <-tick.C:
+ db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened),
+ len(mf.Tables), time.Since(start).Round(time.Millisecond))
+ default:
+ }
+ if err := throttle.Do(); err != nil {
+ closeAllTables(tables)
+ return nil, err
+ }
+ if fileID > maxFileID {
+ maxFileID = fileID
+ }
+ go func(fname string, tf TableManifest) {
+ var rerr error
+ defer func() {
+ throttle.Done(rerr)
+ atomic.AddInt32(&numOpened, 1)
+ }()
+ fd, err := y.OpenExistingFile(fname, flags)
+ if err != nil {
+ rerr = errors.Wrapf(err, "Opening file: %q", fname)
+ return
+ }
+
+ t, err := table.OpenTable(fd, db.opt.TableLoadingMode, tf.Checksum)
+ if err != nil {
+ if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") {
+ db.opt.Errorf(err.Error())
+ db.opt.Errorf("Ignoring table %s", fd.Name())
+ // Do not set rerr. We will continue without this table.
+ } else {
+ rerr = errors.Wrapf(err, "Opening table: %q", fname)
+ }
+ return
+ }
+
+ mu.Lock()
+ tables[tf.Level] = append(tables[tf.Level], t)
+ mu.Unlock()
+ }(fname, tf)
+ }
+ if err := throttle.Finish(); err != nil {
+ closeAllTables(tables)
+ return nil, err
+ }
+ db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened),
+ time.Since(start).Round(time.Millisecond))
+ s.nextFileID = maxFileID + 1
+ for i, tbls := range tables {
+ s.levels[i].initTables(tbls)
+ }
+
+ // Make sure key ranges do not overlap etc.
+ if err := s.validate(); err != nil {
+ _ = s.cleanupLevels()
+ return nil, errors.Wrap(err, "Level validation")
+ }
+
+ // Sync directory (because we have at least removed some files, or previously created the
+ // manifest file).
+ if err := syncDir(db.opt.Dir); err != nil {
+ _ = s.close()
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef()
+// because that would delete the underlying files.) We ignore errors, which is OK because tables
+// are read-only.
+func closeAllTables(tables [][]*table.Table) {
+ for _, tableSlice := range tables {
+ for _, table := range tableSlice {
+ _ = table.Close()
+ }
+ }
+}
+
+func (s *levelsController) cleanupLevels() error {
+ var firstErr error
+ for _, l := range s.levels {
+ if err := l.close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+}
+
+// dropTree picks all tables from all levels, creates a manifest changeset,
+// applies it, and then decrements the refs of these tables, which would result
+// in their deletion.
+func (s *levelsController) dropTree() (int, error) {
+ // First pick all tables, so we can create a manifest changelog.
+ var all []*table.Table
+ for _, l := range s.levels {
+ l.RLock()
+ all = append(all, l.tables...)
+ l.RUnlock()
+ }
+ if len(all) == 0 {
+ return 0, nil
+ }
+
+ // Generate the manifest changes.
+ changes := []*pb.ManifestChange{}
+ for _, table := range all {
+ changes = append(changes, newDeleteChange(table.ID()))
+ }
+ changeSet := pb.ManifestChangeSet{Changes: changes}
+ if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
+ return 0, err
+ }
+
+ // Now that manifest has been successfully written, we can delete the tables.
+ for _, l := range s.levels {
+ l.Lock()
+ l.totalSize = 0
+ l.tables = l.tables[:0]
+ l.Unlock()
+ }
+ for _, table := range all {
+ if err := table.DecrRef(); err != nil {
+ return 0, err
+ }
+ }
+ return len(all), nil
+}
+
+// dropPrefix runs a L0->L1 compaction, and then runs same level compaction on the rest of the
+// levels. For L0->L1 compaction, it runs compactions normally, but skips over all the keys with the
+// provided prefix. For Li->Li compactions, it picks up the tables which would have the prefix. The
+// tables who only have keys with this prefix are quickly dropped. The ones which have other keys
+// are run through MergeIterator and compacted to create new tables. All the mechanisms of
+// compactions apply, i.e. level sizes and MANIFEST are updated as in the normal flow.
+func (s *levelsController) dropPrefix(prefix []byte) error {
+ opt := s.kv.opt
+ for _, l := range s.levels {
+ l.RLock()
+ if l.level == 0 {
+ size := len(l.tables)
+ l.RUnlock()
+
+ if size > 0 {
+ cp := compactionPriority{
+ level: 0,
+ score: 1.74,
+ // A unique number greater than 1.0 does two things. Helps identify this
+ // function in logs, and forces a compaction.
+ dropPrefix: prefix,
+ }
+ if err := s.doCompact(cp); err != nil {
+ opt.Warningf("While compacting level 0: %v", err)
+ return nil
+ }
+ }
+ continue
+ }
+
+ var tables []*table.Table
+ for _, table := range l.tables {
+ var absent bool
+ switch {
+ case bytes.HasPrefix(table.Smallest(), prefix):
+ case bytes.HasPrefix(table.Biggest(), prefix):
+ case bytes.Compare(prefix, table.Smallest()) > 0 &&
+ bytes.Compare(prefix, table.Biggest()) < 0:
+ default:
+ absent = true
+ }
+ if !absent {
+ tables = append(tables, table)
+ }
+ }
+ l.RUnlock()
+ if len(tables) == 0 {
+ continue
+ }
+
+ cd := compactDef{
+ elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"),
+ thisLevel: l,
+ nextLevel: l,
+ top: []*table.Table{},
+ bot: tables,
+ dropPrefix: prefix,
+ }
+ if err := s.runCompactDef(l.level, cd); err != nil {
+ opt.Warningf("While running compact def: %+v. Error: %v", cd, err)
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *levelsController) startCompact(lc *y.Closer) {
+ n := s.kv.opt.NumCompactors
+ lc.AddRunning(n - 1)
+ for i := 0; i < n; i++ {
+ go s.runWorker(lc)
+ }
+}
+
+func (s *levelsController) runWorker(lc *y.Closer) {
+ defer lc.Done()
+
+ randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond)
+ select {
+ case <-randomDelay.C:
+ case <-lc.HasBeenClosed():
+ randomDelay.Stop()
+ return
+ }
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ // Can add a done channel or other stuff.
+ case <-ticker.C:
+ prios := s.pickCompactLevels()
+ for _, p := range prios {
+ if err := s.doCompact(p); err == nil {
+ break
+ } else if err == errFillTables {
+ // pass
+ } else {
+ s.kv.opt.Warningf("While running doCompact: %v\n", err)
+ }
+ }
+ case <-lc.HasBeenClosed():
+ return
+ }
+ }
+}
+
+// Returns true if level zero may be compacted, without accounting for compactions that already
+// might be happening.
+func (s *levelsController) isLevel0Compactable() bool {
+ return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables
+}
+
+// Returns true if the non-zero level may be compacted. delSize provides the size of the tables
+// which are currently being compacted so that we treat them as already having started being
+// compacted (because they have been, yet their size is already counted in getTotalSize).
+func (l *levelHandler) isCompactable(delSize int64) bool {
+ return l.getTotalSize()-delSize >= l.maxTotalSize
+}
+
+type compactionPriority struct {
+ level int
+ score float64
+ dropPrefix []byte
+}
+
+// pickCompactLevel determines which level to compact.
+// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction
+func (s *levelsController) pickCompactLevels() (prios []compactionPriority) {
+ // This function must use identical criteria for guaranteeing compaction's progress that
+ // addLevel0Table uses.
+
+ // cstatus is checked to see if level 0's tables are already being compacted
+ if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() {
+ pri := compactionPriority{
+ level: 0,
+ score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables),
+ }
+ prios = append(prios, pri)
+ }
+
+ for i, l := range s.levels[1:] {
+ // Don't consider those tables that are already being compacted right now.
+ delSize := s.cstatus.delSize(i + 1)
+
+ if l.isCompactable(delSize) {
+ pri := compactionPriority{
+ level: i + 1,
+ score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize),
+ }
+ prios = append(prios, pri)
+ }
+ }
+ sort.Slice(prios, func(i, j int) bool {
+ return prios[i].score > prios[j].score
+ })
+ return prios
+}
+
+// compactBuildTables merge topTables and botTables to form a list of new tables.
+func (s *levelsController) compactBuildTables(
+ lev int, cd compactDef) ([]*table.Table, func() error, error) {
+ topTables := cd.top
+ botTables := cd.bot
+
+ var hasOverlap bool
+ {
+ kr := getKeyRange(cd.top)
+ for i, lh := range s.levels {
+ if i <= lev { // Skip upper levels.
+ continue
+ }
+ lh.RLock()
+ left, right := lh.overlappingTables(levelHandlerRLocked{}, kr)
+ lh.RUnlock()
+ if right-left > 0 {
+ hasOverlap = true
+ break
+ }
+ }
+ }
+
+ // Try to collect stats so that we can inform value log about GC. That would help us find which
+ // value log file should be GCed.
+ discardStats := make(map[uint32]int64)
+ updateStats := func(vs y.ValueStruct) {
+ if vs.Meta&bitValuePointer > 0 {
+ var vp valuePointer
+ vp.Decode(vs.Value)
+ discardStats[vp.Fid] += int64(vp.Len)
+ }
+ }
+
+ // Create iterators across all the tables involved first.
+ var iters []y.Iterator
+ if lev == 0 {
+ iters = appendIteratorsReversed(iters, topTables, false)
+ } else if len(topTables) > 0 {
+ y.AssertTrue(len(topTables) == 1)
+ iters = []y.Iterator{topTables[0].NewIterator(false)}
+ }
+
+ // Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap.
+ var valid []*table.Table
+ for _, table := range botTables {
+ if len(cd.dropPrefix) > 0 &&
+ bytes.HasPrefix(table.Smallest(), cd.dropPrefix) &&
+ bytes.HasPrefix(table.Biggest(), cd.dropPrefix) {
+ // All the keys in this table have the dropPrefix. So, this table does not need to be
+ // in the iterator and can be dropped immediately.
+ continue
+ }
+ valid = append(valid, table)
+ }
+ iters = append(iters, table.NewConcatIterator(valid, false))
+ it := y.NewMergeIterator(iters, false)
+ defer it.Close() // Important to close the iterator to do ref counting.
+
+ it.Rewind()
+
+ // Pick a discard ts, so we can discard versions below this ts. We should
+ // never discard any versions starting from above this timestamp, because
+ // that would affect the snapshot view guarantee provided by transactions.
+ discardTs := s.kv.orc.discardAtOrBelow()
+
+ // Start generating new tables.
+ type newTableResult struct {
+ table *table.Table
+ err error
+ }
+ resultCh := make(chan newTableResult)
+ var numBuilds, numVersions int
+ var lastKey, skipKey []byte
+ for it.Valid() {
+ timeStart := time.Now()
+ builder := table.NewTableBuilder()
+ var numKeys, numSkips uint64
+ for ; it.Valid(); it.Next() {
+ // See if we need to skip the prefix.
+ if len(cd.dropPrefix) > 0 && bytes.HasPrefix(it.Key(), cd.dropPrefix) {
+ numSkips++
+ updateStats(it.Value())
+ continue
+ }
+
+ // See if we need to skip this key.
+ if len(skipKey) > 0 {
+ if y.SameKey(it.Key(), skipKey) {
+ numSkips++
+ updateStats(it.Value())
+ continue
+ } else {
+ skipKey = skipKey[:0]
+ }
+ }
+
+ if !y.SameKey(it.Key(), lastKey) {
+ if builder.ReachedCapacity(s.kv.opt.MaxTableSize) {
+ // Only break if we are on a different key, and have reached capacity. We want
+ // to ensure that all versions of the key are stored in the same sstable, and
+ // not divided across multiple tables at the same level.
+ break
+ }
+ lastKey = y.SafeCopy(lastKey, it.Key())
+ numVersions = 0
+ }
+
+ vs := it.Value()
+ version := y.ParseTs(it.Key())
+ // Do not discard entries inserted by merge operator. These entries will be
+ // discarded once they're merged
+ if version <= discardTs && vs.Meta&bitMergeEntry == 0 {
+ // Keep track of the number of versions encountered for this key. Only consider the
+ // versions which are below the minReadTs, otherwise, we might end up discarding the
+ // only valid version for a running transaction.
+ numVersions++
+ lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0
+ if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) ||
+ numVersions > s.kv.opt.NumVersionsToKeep ||
+ lastValidVersion {
+ // If this version of the key is deleted or expired, skip all the rest of the
+ // versions. Ensure that we're only removing versions below readTs.
+ skipKey = y.SafeCopy(skipKey, it.Key())
+
+ if lastValidVersion {
+ // Add this key. We have set skipKey, so the following key versions
+ // would be skipped.
+ } else if hasOverlap {
+ // If this key range has overlap with lower levels, then keep the deletion
+ // marker with the latest version, discarding the rest. We have set skipKey,
+ // so the following key versions would be skipped.
+ } else {
+ // If no overlap, we can skip all the versions, by continuing here.
+ numSkips++
+ updateStats(vs)
+ continue // Skip adding this key.
+ }
+ }
+ }
+ numKeys++
+ y.Check(builder.Add(it.Key(), it.Value()))
+ }
+ // It was true that it.Valid() at least once in the loop above, which means we
+ // called Add() at least once, and builder is not Empty().
+ s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v",
+ numKeys, numSkips, time.Since(timeStart))
+ if !builder.Empty() {
+ numBuilds++
+ fileID := s.reserveFileID()
+ go func(builder *table.Builder) {
+ defer builder.Close()
+
+ fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true)
+ if err != nil {
+ resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)}
+ return
+ }
+
+ if _, err := fd.Write(builder.Finish()); err != nil {
+ resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)}
+ return
+ }
+
+ tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode, nil)
+ // decrRef is added below.
+ resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())}
+ }(builder)
+ }
+ }
+
+ newTables := make([]*table.Table, 0, 20)
+ // Wait for all table builders to finish.
+ var firstErr error
+ for x := 0; x < numBuilds; x++ {
+ res := <-resultCh
+ newTables = append(newTables, res.table)
+ if firstErr == nil {
+ firstErr = res.err
+ }
+ }
+
+ if firstErr == nil {
+ // Ensure created files' directory entries are visible. We don't mind the extra latency
+ // from not doing this ASAP after all file creation has finished because this is a
+ // background operation.
+ firstErr = syncDir(s.kv.opt.Dir)
+ }
+
+ if firstErr != nil {
+ // An error happened. Delete all the newly created table files (by calling DecrRef
+ // -- we're the only holders of a ref).
+ for j := 0; j < numBuilds; j++ {
+ if newTables[j] != nil {
+ _ = newTables[j].DecrRef()
+ }
+ }
+ errorReturn := errors.Wrapf(firstErr, "While running compaction for: %+v", cd)
+ return nil, nil, errorReturn
+ }
+
+ sort.Slice(newTables, func(i, j int) bool {
+ return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0
+ })
+ if err := s.kv.vlog.updateDiscardStats(discardStats); err != nil {
+ return nil, nil, errors.Wrap(err, "failed to update discard stats")
+ }
+ s.kv.opt.Debugf("Discard stats: %v", discardStats)
+ return newTables, func() error { return decrRefs(newTables) }, nil
+}
+
+func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet {
+ changes := []*pb.ManifestChange{}
+ for _, table := range newTables {
+ changes = append(changes,
+ newCreateChange(table.ID(), cd.nextLevel.level, table.Checksum))
+ }
+ for _, table := range cd.top {
+ changes = append(changes, newDeleteChange(table.ID()))
+ }
+ for _, table := range cd.bot {
+ changes = append(changes, newDeleteChange(table.ID()))
+ }
+ return pb.ManifestChangeSet{Changes: changes}
+}
+
+type compactDef struct {
+ elog trace.Trace
+
+ thisLevel *levelHandler
+ nextLevel *levelHandler
+
+ top []*table.Table
+ bot []*table.Table
+
+ thisRange keyRange
+ nextRange keyRange
+
+ thisSize int64
+
+ dropPrefix []byte
+}
+
+func (cd *compactDef) lockLevels() {
+ cd.thisLevel.RLock()
+ cd.nextLevel.RLock()
+}
+
+func (cd *compactDef) unlockLevels() {
+ cd.nextLevel.RUnlock()
+ cd.thisLevel.RUnlock()
+}
+
+func (s *levelsController) fillTablesL0(cd *compactDef) bool {
+ cd.lockLevels()
+ defer cd.unlockLevels()
+
+ cd.top = make([]*table.Table, len(cd.thisLevel.tables))
+ copy(cd.top, cd.thisLevel.tables)
+ if len(cd.top) == 0 {
+ return false
+ }
+ cd.thisRange = infRange
+
+ kr := getKeyRange(cd.top)
+ left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr)
+ cd.bot = make([]*table.Table, right-left)
+ copy(cd.bot, cd.nextLevel.tables[left:right])
+
+ if len(cd.bot) == 0 {
+ cd.nextRange = kr
+ } else {
+ cd.nextRange = getKeyRange(cd.bot)
+ }
+
+ if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
+ return false
+ }
+
+ return true
+}
+
+func (s *levelsController) fillTables(cd *compactDef) bool {
+ cd.lockLevels()
+ defer cd.unlockLevels()
+
+ tbls := make([]*table.Table, len(cd.thisLevel.tables))
+ copy(tbls, cd.thisLevel.tables)
+ if len(tbls) == 0 {
+ return false
+ }
+
+ // Find the biggest table, and compact that first.
+ // TODO: Try other table picking strategies.
+ sort.Slice(tbls, func(i, j int) bool {
+ return tbls[i].Size() > tbls[j].Size()
+ })
+
+ for _, t := range tbls {
+ cd.thisSize = t.Size()
+ cd.thisRange = keyRange{
+ // We pick all the versions of the smallest and the biggest key.
+ left: y.KeyWithTs(y.ParseKey(t.Smallest()), math.MaxUint64),
+ // Note that version zero would be the rightmost key.
+ right: y.KeyWithTs(y.ParseKey(t.Biggest()), 0),
+ }
+ if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) {
+ continue
+ }
+ cd.top = []*table.Table{t}
+ left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange)
+
+ cd.bot = make([]*table.Table, right-left)
+ copy(cd.bot, cd.nextLevel.tables[left:right])
+
+ if len(cd.bot) == 0 {
+ cd.bot = []*table.Table{}
+ cd.nextRange = cd.thisRange
+ if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
+ continue
+ }
+ return true
+ }
+ cd.nextRange = getKeyRange(cd.bot)
+
+ if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) {
+ continue
+ }
+ if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
+ continue
+ }
+ return true
+ }
+ return false
+}
+
+func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) {
+ timeStart := time.Now()
+
+ thisLevel := cd.thisLevel
+ nextLevel := cd.nextLevel
+
+ // Table should never be moved directly between levels, always be rewritten to allow discarding
+ // invalid versions.
+
+ newTables, decr, err := s.compactBuildTables(l, cd)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ // Only assign to err, if it's not already nil.
+ if decErr := decr(); err == nil {
+ err = decErr
+ }
+ }()
+ changeSet := buildChangeSet(&cd, newTables)
+
+ // We write to the manifest _before_ we delete files (and after we created files)
+ if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
+ return err
+ }
+
+ // See comment earlier in this function about the ordering of these ops, and the order in which
+ // we access levels when reading.
+ if err := nextLevel.replaceTables(cd.bot, newTables); err != nil {
+ return err
+ }
+ if err := thisLevel.deleteTables(cd.top); err != nil {
+ return err
+ }
+
+ // Note: For level 0, while doCompact is running, it is possible that new tables are added.
+ // However, the tables are added only to the end, so it is ok to just delete the first table.
+
+ s.kv.opt.Infof("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n",
+ thisLevel.level, nextLevel.level, len(cd.top)+len(cd.bot),
+ len(newTables), time.Since(timeStart))
+ return nil
+}
+
+var errFillTables = errors.New("Unable to fill tables")
+
+// doCompact picks some table on level l and compacts it away to the next level.
+func (s *levelsController) doCompact(p compactionPriority) error {
+ l := p.level
+ y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check.
+
+ cd := compactDef{
+ elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"),
+ thisLevel: s.levels[l],
+ nextLevel: s.levels[l+1],
+ dropPrefix: p.dropPrefix,
+ }
+ cd.elog.SetMaxEvents(100)
+ defer cd.elog.Finish()
+
+ s.kv.opt.Infof("Got compaction priority: %+v", p)
+
+ // While picking tables to be compacted, both levels' tables are expected to
+ // remain unchanged.
+ if l == 0 {
+ if !s.fillTablesL0(&cd) {
+ return errFillTables
+ }
+
+ } else {
+ if !s.fillTables(&cd) {
+ return errFillTables
+ }
+ }
+ defer s.cstatus.delete(cd) // Remove the ranges from compaction status.
+
+ s.kv.opt.Infof("Running for level: %d\n", cd.thisLevel.level)
+ s.cstatus.toLog(cd.elog)
+ if err := s.runCompactDef(l, cd); err != nil {
+ // This compaction couldn't be done successfully.
+ s.kv.opt.Warningf("LOG Compact FAILED with error: %+v: %+v", err, cd)
+ return err
+ }
+
+ s.cstatus.toLog(cd.elog)
+ s.kv.opt.Infof("Compaction for level: %d DONE", cd.thisLevel.level)
+ return nil
+}
+
+func (s *levelsController) addLevel0Table(t *table.Table) error {
+ // We update the manifest _before_ the table becomes part of a levelHandler, because at that
+ // point it could get used in some compaction. This ensures the manifest file gets updated in
+ // the proper order. (That means this update happens before that of some compaction which
+ // deletes the table.)
+ err := s.kv.manifest.addChanges([]*pb.ManifestChange{
+ newCreateChange(t.ID(), 0, t.Checksum),
+ })
+ if err != nil {
+ return err
+ }
+
+ for !s.levels[0].tryAddLevel0Table(t) {
+ // Stall. Make sure all levels are healthy before we unstall.
+ var timeStart time.Time
+ {
+ s.elog.Printf("STALLED STALLED STALLED: %v\n", time.Since(lastUnstalled))
+ s.cstatus.RLock()
+ for i := 0; i < s.kv.opt.MaxLevels; i++ {
+ s.elog.Printf("level=%d. Status=%s Size=%d\n",
+ i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize())
+ }
+ s.cstatus.RUnlock()
+ timeStart = time.Now()
+ }
+ // Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we
+ // will very quickly fill up level 0 again and if the compaction strategy favors level 0,
+ // then level 1 is going to super full.
+ for i := 0; ; i++ {
+ // Passing 0 for delSize to compactable means we're treating incomplete compactions as
+ // not having finished -- we wait for them to finish. Also, it's crucial this behavior
+ // replicates pickCompactLevels' behavior in computing compactability in order to
+ // guarantee progress.
+ if !s.isLevel0Compactable() && !s.levels[1].isCompactable(0) {
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ if i%100 == 0 {
+ prios := s.pickCompactLevels()
+ s.elog.Printf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios)
+ i = 0
+ }
+ }
+ {
+ s.elog.Printf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart))
+ lastUnstalled = time.Now()
+ }
+ }
+
+ return nil
+}
+
+func (s *levelsController) close() error {
+ err := s.cleanupLevels()
+ return errors.Wrap(err, "levelsController.Close")
+}
+
+// get returns the found value if any. If not found, we return nil.
+func (s *levelsController) get(key []byte, maxVs *y.ValueStruct) (y.ValueStruct, error) {
+ // It's important that we iterate the levels from 0 on upward. The reason is, if we iterated
+ // in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could
+ // read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do
+ // parallelize this, we will need to call the h.RLock() function by increasing order of level
+ // number.)
+ version := y.ParseTs(key)
+ for _, h := range s.levels {
+ vs, err := h.get(key) // Calls h.RLock() and h.RUnlock().
+ if err != nil {
+ return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key)
+ }
+ if vs.Value == nil && vs.Meta == 0 {
+ continue
+ }
+ if maxVs == nil || vs.Version == version {
+ return vs, nil
+ }
+ if maxVs.Version < vs.Version {
+ *maxVs = vs
+ }
+ }
+ if maxVs != nil {
+ return *maxVs, nil
+ }
+ return y.ValueStruct{}, nil
+}
+
+func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator {
+ for i := len(th) - 1; i >= 0; i-- {
+ // This will increment the reference of the table handler.
+ out = append(out, th[i].NewIterator(reversed))
+ }
+ return out
+}
+
+// appendIterators appends iterators to an array of iterators, for merging.
+// Note: This obtains references for the table handlers. Remember to close these iterators.
+func (s *levelsController) appendIterators(
+ iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
+ // Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing
+ // data when there's a compaction.
+ for _, level := range s.levels {
+ iters = level.appendIterators(iters, opt)
+ }
+ return iters
+}
+
+// TableInfo represents the information about a table.
+type TableInfo struct {
+ ID uint64
+ Level int
+ Left []byte
+ Right []byte
+ KeyCount uint64 // Number of keys in the table
+}
+
+func (s *levelsController) getTableInfo(withKeysCount bool) (result []TableInfo) {
+ for _, l := range s.levels {
+ l.RLock()
+ for _, t := range l.tables {
+ var count uint64
+ if withKeysCount {
+ it := t.NewIterator(false)
+ for it.Rewind(); it.Valid(); it.Next() {
+ count++
+ }
+ }
+
+ info := TableInfo{
+ ID: t.ID(),
+ Level: l.level,
+ Left: t.Smallest(),
+ Right: t.Biggest(),
+ KeyCount: count,
+ }
+ result = append(result, info)
+ }
+ l.RUnlock()
+ }
+ sort.Slice(result, func(i, j int) bool {
+ if result[i].Level != result[j].Level {
+ return result[i].Level < result[j].Level
+ }
+ return result[i].ID < result[j].ID
+ })
+ return
+}
diff --git a/vendor/github.com/dgraph-io/badger/logger.go b/vendor/github.com/dgraph-io/badger/logger.go
new file mode 100644
index 000000000..3a9b8a337
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/logger.go
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "log"
+ "os"
+)
+
+// Logger is implemented by any logging system that is used for standard logs.
+type Logger interface {
+ Errorf(string, ...interface{})
+ Warningf(string, ...interface{})
+ Infof(string, ...interface{})
+ Debugf(string, ...interface{})
+}
+
+// Errorf logs an ERROR log message to the logger specified in opts or to the
+// global logger if no logger is specified in opts.
+func (opt *Options) Errorf(format string, v ...interface{}) {
+ if opt.Logger == nil {
+ return
+ }
+ opt.Logger.Errorf(format, v...)
+}
+
+// Infof logs an INFO message to the logger specified in opts.
+func (opt *Options) Infof(format string, v ...interface{}) {
+ if opt.Logger == nil {
+ return
+ }
+ opt.Logger.Infof(format, v...)
+}
+
+// Warningf logs a WARNING message to the logger specified in opts.
+func (opt *Options) Warningf(format string, v ...interface{}) {
+ if opt.Logger == nil {
+ return
+ }
+ opt.Logger.Warningf(format, v...)
+}
+
+// Debugf logs a DEBUG message to the logger specified in opts.
+func (opt *Options) Debugf(format string, v ...interface{}) {
+ if opt.Logger == nil {
+ return
+ }
+ opt.Logger.Debugf(format, v...)
+}
+
+type defaultLog struct {
+ *log.Logger
+}
+
+var defaultLogger = &defaultLog{Logger: log.New(os.Stderr, "badger ", log.LstdFlags)}
+
+func (l *defaultLog) Errorf(f string, v ...interface{}) {
+ l.Printf("ERROR: "+f, v...)
+}
+
+func (l *defaultLog) Warningf(f string, v ...interface{}) {
+ l.Printf("WARNING: "+f, v...)
+}
+
+func (l *defaultLog) Infof(f string, v ...interface{}) {
+ l.Printf("INFO: "+f, v...)
+}
+
+func (l *defaultLog) Debugf(f string, v ...interface{}) {
+ l.Printf("DEBUG: "+f, v...)
+}
diff --git a/vendor/github.com/dgraph-io/badger/managed_db.go b/vendor/github.com/dgraph-io/badger/managed_db.go
new file mode 100644
index 000000000..4de226ae2
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/managed_db.go
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+// OpenManaged returns a new DB, which allows more control over setting
+// transaction timestamps, aka managed mode.
+//
+// This is only useful for databases built on top of Badger (like Dgraph), and
+// can be ignored by most users.
+func OpenManaged(opts Options) (*DB, error) {
+ opts.managedTxns = true
+ return Open(opts)
+}
+
+// NewTransactionAt follows the same logic as DB.NewTransaction(), but uses the
+// provided read timestamp.
+//
+// This is only useful for databases built on top of Badger (like Dgraph), and
+// can be ignored by most users.
+func (db *DB) NewTransactionAt(readTs uint64, update bool) *Txn {
+ if !db.opt.managedTxns {
+ panic("Cannot use NewTransactionAt with managedDB=false. Use NewTransaction instead.")
+ }
+ txn := db.newTransaction(update, true)
+ txn.readTs = readTs
+ return txn
+}
+
+// CommitAt commits the transaction, following the same logic as Commit(), but
+// at the given commit timestamp. This will panic if not used with managed transactions.
+//
+// This is only useful for databases built on top of Badger (like Dgraph), and
+// can be ignored by most users.
+func (txn *Txn) CommitAt(commitTs uint64, callback func(error)) error {
+ if !txn.db.opt.managedTxns {
+ panic("Cannot use CommitAt with managedDB=false. Use Commit instead.")
+ }
+ txn.commitTs = commitTs
+ if callback == nil {
+ return txn.Commit()
+ }
+ txn.CommitWith(callback)
+ return nil
+}
+
+// SetDiscardTs sets a timestamp at or below which, any invalid or deleted
+// versions can be discarded from the LSM tree, and thence from the value log to
+// reclaim disk space. Can only be used with managed transactions.
+func (db *DB) SetDiscardTs(ts uint64) {
+ if !db.opt.managedTxns {
+ panic("Cannot use SetDiscardTs with managedDB=false.")
+ }
+ db.orc.setDiscardTs(ts)
+}
diff --git a/vendor/github.com/dgraph-io/badger/manifest.go b/vendor/github.com/dgraph-io/badger/manifest.go
new file mode 100644
index 000000000..a58188294
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/manifest.go
@@ -0,0 +1,440 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/dgraph-io/badger/pb"
+ "github.com/dgraph-io/badger/y"
+ "github.com/pkg/errors"
+)
+
+// Manifest represents the contents of the MANIFEST file in a Badger store.
+//
+// The MANIFEST file describes the startup state of the db -- all LSM files and what level they're
+// at.
+//
+// It consists of a sequence of ManifestChangeSet objects. Each of these is treated atomically,
+// and contains a sequence of ManifestChange's (file creations/deletions) which we use to
+// reconstruct the manifest at startup.
+type Manifest struct {
+ Levels []levelManifest
+ Tables map[uint64]TableManifest
+
+ // Contains total number of creation and deletion changes in the manifest -- used to compute
+ // whether it'd be useful to rewrite the manifest.
+ Creations int
+ Deletions int
+}
+
+func createManifest() Manifest {
+ levels := make([]levelManifest, 0)
+ return Manifest{
+ Levels: levels,
+ Tables: make(map[uint64]TableManifest),
+ }
+}
+
+// levelManifest contains information about LSM tree levels
+// in the MANIFEST file.
+type levelManifest struct {
+ Tables map[uint64]struct{} // Set of table id's
+}
+
+// TableManifest contains information about a specific level
+// in the LSM tree.
+type TableManifest struct {
+ Level uint8
+ Checksum []byte
+}
+
+// manifestFile holds the file pointer (and other info) about the manifest file, which is a log
+// file we append to.
+type manifestFile struct {
+ fp *os.File
+ directory string
+ // We make this configurable so that unit tests can hit rewrite() code quickly
+ deletionsRewriteThreshold int
+
+ // Guards appends, which includes access to the manifest field.
+ appendLock sync.Mutex
+
+ // Used to track the current state of the manifest, used when rewriting.
+ manifest Manifest
+}
+
+const (
+ // ManifestFilename is the filename for the manifest file.
+ ManifestFilename = "MANIFEST"
+ manifestRewriteFilename = "MANIFEST-REWRITE"
+ manifestDeletionsRewriteThreshold = 10000
+ manifestDeletionsRatio = 10
+)
+
+// asChanges returns a sequence of changes that could be used to recreate the Manifest in its
+// present state.
+func (m *Manifest) asChanges() []*pb.ManifestChange {
+ changes := make([]*pb.ManifestChange, 0, len(m.Tables))
+ for id, tm := range m.Tables {
+ changes = append(changes, newCreateChange(id, int(tm.Level), tm.Checksum))
+ }
+ return changes
+}
+
+func (m *Manifest) clone() Manifest {
+ changeSet := pb.ManifestChangeSet{Changes: m.asChanges()}
+ ret := createManifest()
+ y.Check(applyChangeSet(&ret, &changeSet))
+ return ret
+}
+
+// openOrCreateManifestFile opens a Badger manifest file if it exists, or creates on if
+// one doesn’t.
+func openOrCreateManifestFile(dir string, readOnly bool) (
+ ret *manifestFile, result Manifest, err error) {
+ return helpOpenOrCreateManifestFile(dir, readOnly, manifestDeletionsRewriteThreshold)
+}
+
+func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold int) (
+ ret *manifestFile, result Manifest, err error) {
+
+ path := filepath.Join(dir, ManifestFilename)
+ var flags uint32
+ if readOnly {
+ flags |= y.ReadOnly
+ }
+ fp, err := y.OpenExistingFile(path, flags) // We explicitly sync in addChanges, outside the lock.
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, Manifest{}, err
+ }
+ if readOnly {
+ return nil, Manifest{}, fmt.Errorf("no manifest found, required for read-only db")
+ }
+ m := createManifest()
+ fp, netCreations, err := helpRewrite(dir, &m)
+ if err != nil {
+ return nil, Manifest{}, err
+ }
+ y.AssertTrue(netCreations == 0)
+ mf := &manifestFile{
+ fp: fp,
+ directory: dir,
+ manifest: m.clone(),
+ deletionsRewriteThreshold: deletionsThreshold,
+ }
+ return mf, m, nil
+ }
+
+ manifest, truncOffset, err := ReplayManifestFile(fp)
+ if err != nil {
+ _ = fp.Close()
+ return nil, Manifest{}, err
+ }
+
+ if !readOnly {
+ // Truncate file so we don't have a half-written entry at the end.
+ if err := fp.Truncate(truncOffset); err != nil {
+ _ = fp.Close()
+ return nil, Manifest{}, err
+ }
+ }
+ if _, err = fp.Seek(0, io.SeekEnd); err != nil {
+ _ = fp.Close()
+ return nil, Manifest{}, err
+ }
+
+ mf := &manifestFile{
+ fp: fp,
+ directory: dir,
+ manifest: manifest.clone(),
+ deletionsRewriteThreshold: deletionsThreshold,
+ }
+ return mf, manifest, nil
+}
+
+func (mf *manifestFile) close() error {
+ return mf.fp.Close()
+}
+
+// addChanges writes a batch of changes, atomically, to the file. By "atomically" that means when
+// we replay the MANIFEST file, we'll either replay all the changes or none of them. (The truth of
+// this depends on the filesystem -- some might append garbage data if a system crash happens at
+// the wrong time.)
+func (mf *manifestFile) addChanges(changesParam []*pb.ManifestChange) error {
+ changes := pb.ManifestChangeSet{Changes: changesParam}
+ buf, err := changes.Marshal()
+ if err != nil {
+ return err
+ }
+
+ // Maybe we could use O_APPEND instead (on certain file systems)
+ mf.appendLock.Lock()
+ if err := applyChangeSet(&mf.manifest, &changes); err != nil {
+ mf.appendLock.Unlock()
+ return err
+ }
+ // Rewrite manifest if it'd shrink by 1/10 and it's big enough to care
+ if mf.manifest.Deletions > mf.deletionsRewriteThreshold &&
+ mf.manifest.Deletions > manifestDeletionsRatio*(mf.manifest.Creations-mf.manifest.Deletions) {
+ if err := mf.rewrite(); err != nil {
+ mf.appendLock.Unlock()
+ return err
+ }
+ } else {
+ var lenCrcBuf [8]byte
+ binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(buf)))
+ binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(buf, y.CastagnoliCrcTable))
+ buf = append(lenCrcBuf[:], buf...)
+ if _, err := mf.fp.Write(buf); err != nil {
+ mf.appendLock.Unlock()
+ return err
+ }
+ }
+
+ mf.appendLock.Unlock()
+ return y.FileSync(mf.fp)
+}
+
+// Has to be 4 bytes. The value can never change, ever, anyway.
+var magicText = [4]byte{'B', 'd', 'g', 'r'}
+
+// The magic version number.
+const magicVersion = 4
+
+func helpRewrite(dir string, m *Manifest) (*os.File, int, error) {
+ rewritePath := filepath.Join(dir, manifestRewriteFilename)
+ // We explicitly sync.
+ fp, err := y.OpenTruncFile(rewritePath, false)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ buf := make([]byte, 8)
+ copy(buf[0:4], magicText[:])
+ binary.BigEndian.PutUint32(buf[4:8], magicVersion)
+
+ netCreations := len(m.Tables)
+ changes := m.asChanges()
+ set := pb.ManifestChangeSet{Changes: changes}
+
+ changeBuf, err := set.Marshal()
+ if err != nil {
+ fp.Close()
+ return nil, 0, err
+ }
+ var lenCrcBuf [8]byte
+ binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(changeBuf)))
+ binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(changeBuf, y.CastagnoliCrcTable))
+ buf = append(buf, lenCrcBuf[:]...)
+ buf = append(buf, changeBuf...)
+ if _, err := fp.Write(buf); err != nil {
+ fp.Close()
+ return nil, 0, err
+ }
+ if err := y.FileSync(fp); err != nil {
+ fp.Close()
+ return nil, 0, err
+ }
+
+ // In Windows the files should be closed before doing a Rename.
+ if err = fp.Close(); err != nil {
+ return nil, 0, err
+ }
+ manifestPath := filepath.Join(dir, ManifestFilename)
+ if err := os.Rename(rewritePath, manifestPath); err != nil {
+ return nil, 0, err
+ }
+ fp, err = y.OpenExistingFile(manifestPath, 0)
+ if err != nil {
+ return nil, 0, err
+ }
+ if _, err := fp.Seek(0, io.SeekEnd); err != nil {
+ fp.Close()
+ return nil, 0, err
+ }
+ if err := syncDir(dir); err != nil {
+ fp.Close()
+ return nil, 0, err
+ }
+
+ return fp, netCreations, nil
+}
+
+// Must be called while appendLock is held.
+func (mf *manifestFile) rewrite() error {
+ // In Windows the files should be closed before doing a Rename.
+ if err := mf.fp.Close(); err != nil {
+ return err
+ }
+ fp, netCreations, err := helpRewrite(mf.directory, &mf.manifest)
+ if err != nil {
+ return err
+ }
+ mf.fp = fp
+ mf.manifest.Creations = netCreations
+ mf.manifest.Deletions = 0
+
+ return nil
+}
+
+type countingReader struct {
+ wrapped *bufio.Reader
+ count int64
+}
+
+func (r *countingReader) Read(p []byte) (n int, err error) {
+ n, err = r.wrapped.Read(p)
+ r.count += int64(n)
+ return
+}
+
+func (r *countingReader) ReadByte() (b byte, err error) {
+ b, err = r.wrapped.ReadByte()
+ if err == nil {
+ r.count++
+ }
+ return
+}
+
+var (
+ errBadMagic = errors.New("manifest has bad magic")
+ errBadChecksum = errors.New("manifest has checksum mismatch")
+)
+
+// ReplayManifestFile reads the manifest file and constructs two manifest objects. (We need one
+// immutable copy and one mutable copy of the manifest. Easiest way is to construct two of them.)
+// Also, returns the last offset after a completely read manifest entry -- the file must be
+// truncated at that point before further appends are made (if there is a partial entry after
+// that). In normal conditions, truncOffset is the file size.
+func ReplayManifestFile(fp *os.File) (ret Manifest, truncOffset int64, err error) {
+ r := countingReader{wrapped: bufio.NewReader(fp)}
+
+ var magicBuf [8]byte
+ if _, err := io.ReadFull(&r, magicBuf[:]); err != nil {
+ return Manifest{}, 0, errBadMagic
+ }
+ if !bytes.Equal(magicBuf[0:4], magicText[:]) {
+ return Manifest{}, 0, errBadMagic
+ }
+ version := binary.BigEndian.Uint32(magicBuf[4:8])
+ if version != magicVersion {
+ return Manifest{}, 0,
+ fmt.Errorf("manifest has unsupported version: %d (we support %d)", version, magicVersion)
+ }
+
+ build := createManifest()
+ var offset int64
+ for {
+ offset = r.count
+ var lenCrcBuf [8]byte
+ _, err := io.ReadFull(&r, lenCrcBuf[:])
+ if err != nil {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ break
+ }
+ return Manifest{}, 0, err
+ }
+ length := binary.BigEndian.Uint32(lenCrcBuf[0:4])
+ var buf = make([]byte, length)
+ if _, err := io.ReadFull(&r, buf); err != nil {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ break
+ }
+ return Manifest{}, 0, err
+ }
+ if crc32.Checksum(buf, y.CastagnoliCrcTable) != binary.BigEndian.Uint32(lenCrcBuf[4:8]) {
+ return Manifest{}, 0, errBadChecksum
+ }
+
+ var changeSet pb.ManifestChangeSet
+ if err := changeSet.Unmarshal(buf); err != nil {
+ return Manifest{}, 0, err
+ }
+
+ if err := applyChangeSet(&build, &changeSet); err != nil {
+ return Manifest{}, 0, err
+ }
+ }
+
+ return build, offset, err
+}
+
+func applyManifestChange(build *Manifest, tc *pb.ManifestChange) error {
+ switch tc.Op {
+ case pb.ManifestChange_CREATE:
+ if _, ok := build.Tables[tc.Id]; ok {
+ return fmt.Errorf("MANIFEST invalid, table %d exists", tc.Id)
+ }
+ build.Tables[tc.Id] = TableManifest{
+ Level: uint8(tc.Level),
+ Checksum: append([]byte{}, tc.Checksum...),
+ }
+ for len(build.Levels) <= int(tc.Level) {
+ build.Levels = append(build.Levels, levelManifest{make(map[uint64]struct{})})
+ }
+ build.Levels[tc.Level].Tables[tc.Id] = struct{}{}
+ build.Creations++
+ case pb.ManifestChange_DELETE:
+ tm, ok := build.Tables[tc.Id]
+ if !ok {
+ return fmt.Errorf("MANIFEST removes non-existing table %d", tc.Id)
+ }
+ delete(build.Levels[tm.Level].Tables, tc.Id)
+ delete(build.Tables, tc.Id)
+ build.Deletions++
+ default:
+ return fmt.Errorf("MANIFEST file has invalid manifestChange op")
+ }
+ return nil
+}
+
+// This is not a "recoverable" error -- opening the KV store fails because the MANIFEST file is
+// just plain broken.
+func applyChangeSet(build *Manifest, changeSet *pb.ManifestChangeSet) error {
+ for _, change := range changeSet.Changes {
+ if err := applyManifestChange(build, change); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func newCreateChange(id uint64, level int, checksum []byte) *pb.ManifestChange {
+ return &pb.ManifestChange{
+ Id: id,
+ Op: pb.ManifestChange_CREATE,
+ Level: uint32(level),
+ Checksum: checksum,
+ }
+}
+
+func newDeleteChange(id uint64) *pb.ManifestChange {
+ return &pb.ManifestChange{
+ Id: id,
+ Op: pb.ManifestChange_DELETE,
+ }
+}
diff --git a/vendor/github.com/dgraph-io/badger/merge.go b/vendor/github.com/dgraph-io/badger/merge.go
new file mode 100644
index 000000000..02ad4bcde
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/merge.go
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "sync"
+ "time"
+
+ "github.com/dgraph-io/badger/y"
+ "github.com/pkg/errors"
+)
+
+// MergeOperator represents a Badger merge operator.
+type MergeOperator struct {
+ sync.RWMutex
+ f MergeFunc
+ db *DB
+ key []byte
+ closer *y.Closer
+}
+
+// MergeFunc accepts two byte slices, one representing an existing value, and
+// another representing a new value that needs to be ‘merged’ into it. MergeFunc
+// contains the logic to perform the ‘merge’ and return an updated value.
+// MergeFunc could perform operations like integer addition, list appends etc.
+// Note that the ordering of the operands is maintained.
+type MergeFunc func(existingVal, newVal []byte) []byte
+
+// GetMergeOperator creates a new MergeOperator for a given key and returns a
+// pointer to it. It also fires off a goroutine that performs a compaction using
+// the merge function that runs periodically, as specified by dur.
+func (db *DB) GetMergeOperator(key []byte,
+ f MergeFunc, dur time.Duration) *MergeOperator {
+ op := &MergeOperator{
+ f: f,
+ db: db,
+ key: key,
+ closer: y.NewCloser(1),
+ }
+
+ go op.runCompactions(dur)
+ return op
+}
+
+var errNoMerge = errors.New("No need for merge")
+
+func (op *MergeOperator) iterateAndMerge() (newVal []byte, latest uint64, err error) {
+ txn := op.db.NewTransaction(false)
+ defer txn.Discard()
+ opt := DefaultIteratorOptions
+ opt.AllVersions = true
+ it := txn.NewKeyIterator(op.key, opt)
+ defer it.Close()
+
+ var numVersions int
+ for it.Rewind(); it.Valid(); it.Next() {
+ item := it.Item()
+ numVersions++
+ if numVersions == 1 {
+ // This should be the newVal, considering this is the latest version.
+ newVal, err = item.ValueCopy(newVal)
+ if err != nil {
+ return nil, 0, err
+ }
+ latest = item.Version()
+ } else {
+ if err := item.Value(func(oldVal []byte) error {
+ // The merge should always be on the newVal considering it has the merge result of
+ // the latest version. The value read should be the oldVal.
+ newVal = op.f(oldVal, newVal)
+ return nil
+ }); err != nil {
+ return nil, 0, err
+ }
+ }
+ if item.DiscardEarlierVersions() {
+ break
+ }
+ }
+ if numVersions == 0 {
+ return nil, latest, ErrKeyNotFound
+ } else if numVersions == 1 {
+ return newVal, latest, errNoMerge
+ }
+ return newVal, latest, nil
+}
+
+func (op *MergeOperator) compact() error {
+ op.Lock()
+ defer op.Unlock()
+ val, version, err := op.iterateAndMerge()
+ if err == ErrKeyNotFound || err == errNoMerge {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ entries := []*Entry{
+ {
+ Key: y.KeyWithTs(op.key, version),
+ Value: val,
+ meta: bitDiscardEarlierVersions,
+ },
+ }
+ // Write value back to the DB. It is important that we do not set the bitMergeEntry bit
+ // here. When compaction happens, all the older merged entries will be removed.
+ return op.db.batchSetAsync(entries, func(err error) {
+ if err != nil {
+ op.db.opt.Errorf("failed to insert the result of merge compaction: %s", err)
+ }
+ })
+}
+
+func (op *MergeOperator) runCompactions(dur time.Duration) {
+ ticker := time.NewTicker(dur)
+ defer op.closer.Done()
+ var stop bool
+ for {
+ select {
+ case <-op.closer.HasBeenClosed():
+ stop = true
+ case <-ticker.C: // wait for tick
+ }
+ if err := op.compact(); err != nil {
+ op.db.opt.Errorf("failure while running merge operation: %s", err)
+ }
+ if stop {
+ ticker.Stop()
+ break
+ }
+ }
+}
+
+// Add records a value in Badger which will eventually be merged by a background
+// routine into the values that were recorded by previous invocations to Add().
+func (op *MergeOperator) Add(val []byte) error {
+ return op.db.Update(func(txn *Txn) error {
+ return txn.SetEntry(NewEntry(op.key, val).withMergeBit())
+ })
+}
+
+// Get returns the latest value for the merge operator, which is derived by
+// applying the merge function to all the values added so far.
+//
+// If Add has not been called even once, Get will return ErrKeyNotFound.
+func (op *MergeOperator) Get() ([]byte, error) {
+ op.RLock()
+ defer op.RUnlock()
+ var existing []byte
+ err := op.db.View(func(txn *Txn) (err error) {
+ existing, _, err = op.iterateAndMerge()
+ return err
+ })
+ if err == errNoMerge {
+ return existing, nil
+ }
+ return existing, err
+}
+
+// Stop waits for any pending merge to complete and then stops the background
+// goroutine.
+func (op *MergeOperator) Stop() {
+ op.closer.SignalAndWait()
+}
diff --git a/vendor/github.com/dgraph-io/badger/options.go b/vendor/github.com/dgraph-io/badger/options.go
new file mode 100644
index 000000000..b91fdc5e3
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/options.go
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "github.com/dgraph-io/badger/options"
+)
+
+// Note: If you add a new option X make sure you also add a WithX method on Options.
+
+// Options are params for creating DB object.
+//
+// This package provides DefaultOptions which contains options that should
+// work for most applications. Consider using that as a starting point before
+// customizing it for your own needs.
+//
+// Each option X is documented on the WithX method.
+type Options struct {
+ // Required options.
+
+ Dir string
+ ValueDir string
+
+ // Usually modified options.
+
+ SyncWrites bool
+ TableLoadingMode options.FileLoadingMode
+ ValueLogLoadingMode options.FileLoadingMode
+ NumVersionsToKeep int
+ ReadOnly bool
+ Truncate bool
+ Logger Logger
+
+ // Fine tuning options.
+
+ MaxTableSize int64
+ LevelSizeMultiplier int
+ MaxLevels int
+ ValueThreshold int
+ NumMemtables int
+
+ NumLevelZeroTables int
+ NumLevelZeroTablesStall int
+
+ LevelOneSize int64
+ ValueLogFileSize int64
+ ValueLogMaxEntries uint32
+
+ NumCompactors int
+ CompactL0OnClose bool
+ LogRotatesToFlush int32
+
+ // Transaction start and commit timestamps are managed by end-user.
+ // This is only useful for databases built on top of Badger (like Dgraph).
+ // Not recommended for most users.
+ managedTxns bool
+
+ // 4. Flags for testing purposes
+ // ------------------------------
+ maxBatchCount int64 // max entries in batch
+ maxBatchSize int64 // max batch size in bytes
+
+}
+
+// DefaultOptions sets a list of recommended options for good performance.
+// Feel free to modify these to suit your needs with the WithX methods.
+func DefaultOptions(path string) Options {
+ return Options{
+ Dir: path,
+ ValueDir: path,
+ LevelOneSize: 256 << 20,
+ LevelSizeMultiplier: 10,
+ TableLoadingMode: options.MemoryMap,
+ ValueLogLoadingMode: options.MemoryMap,
+ // table.MemoryMap to mmap() the tables.
+ // table.Nothing to not preload the tables.
+ MaxLevels: 7,
+ MaxTableSize: 64 << 20,
+ NumCompactors: 2, // Compactions can be expensive. Only run 2.
+ NumLevelZeroTables: 5,
+ NumLevelZeroTablesStall: 10,
+ NumMemtables: 5,
+ SyncWrites: true,
+ NumVersionsToKeep: 1,
+ CompactL0OnClose: true,
+ // Nothing to read/write value log using standard File I/O
+ // MemoryMap to mmap() the value log files
+ // (2^30 - 1)*2 when mmapping < 2^31 - 1, max int32.
+ // -1 so 2*ValueLogFileSize won't overflow on 32-bit systems.
+ ValueLogFileSize: 1<<30 - 1,
+
+ ValueLogMaxEntries: 1000000,
+ ValueThreshold: 32,
+ Truncate: false,
+ Logger: defaultLogger,
+ LogRotatesToFlush: 2,
+ }
+}
+
+// LSMOnlyOptions follows from DefaultOptions, but sets a higher ValueThreshold
+// so values would be colocated with the LSM tree, with value log largely acting
+// as a write-ahead log only. These options would reduce the disk usage of value
+// log, and make Badger act more like a typical LSM tree.
+func LSMOnlyOptions(path string) Options {
+ // Max value length which fits in uint16.
+ // Let's not set any other options, because they can cause issues with the
+ // size of key-value a user can pass to Badger. For e.g., if we set
+ // ValueLogFileSize to 64MB, a user can't pass a value more than that.
+ // Setting it to ValueLogMaxEntries to 1000, can generate too many files.
+ // These options are better configured on a usage basis, than broadly here.
+ // The ValueThreshold is the most important setting a user needs to do to
+ // achieve a heavier usage of LSM tree.
+ // NOTE: If a user does not want to set 64KB as the ValueThreshold because
+ // of performance reasons, 1KB would be a good option too, allowing
+ // values smaller than 1KB to be colocated with the keys in the LSM tree.
+ return DefaultOptions(path).WithValueThreshold(65500)
+}
+
+// WithDir returns a new Options value with Dir set to the given value.
+//
+// Dir is the path of the directory where key data will be stored in.
+// If it doesn't exist, Badger will try to create it for you.
+// This is set automatically to be the path given to `DefaultOptions`.
+func (opt Options) WithDir(val string) Options {
+ opt.Dir = val
+ return opt
+}
+
+// WithValueDir returns a new Options value with ValueDir set to the given value.
+//
+// ValueDir is the path of the directory where value data will be stored in.
+// If it doesn't exist, Badger will try to create it for you.
+// This is set automatically to be the path given to `DefaultOptions`.
+func (opt Options) WithValueDir(val string) Options {
+ opt.ValueDir = val
+ return opt
+}
+
+// WithSyncWrites returns a new Options value with SyncWrites set to the given value.
+//
+// When SyncWrites is true all writes are synced to disk. Setting this to false would achieve better
+// performance, but may cause data loss in case of crash.
+//
+// The default value of SyncWrites is true.
+func (opt Options) WithSyncWrites(val bool) Options {
+ opt.SyncWrites = val
+ return opt
+}
+
+// WithTableLoadingMode returns a new Options value with TableLoadingMode set to the given value.
+//
+// TableLoadingMode indicates which file loading mode should be used for the LSM tree data files.
+//
+// The default value of TableLoadingMode is options.MemoryMap.
+func (opt Options) WithTableLoadingMode(val options.FileLoadingMode) Options {
+ opt.TableLoadingMode = val
+ return opt
+}
+
+// WithValueLogLoadingMode returns a new Options value with ValueLogLoadingMode set to the given
+// value.
+//
+// ValueLogLoadingMode indicates which file loading mode should be used for the value log data
+// files.
+//
+// The default value of ValueLogLoadingMode is options.MemoryMap.
+func (opt Options) WithValueLogLoadingMode(val options.FileLoadingMode) Options {
+ opt.ValueLogLoadingMode = val
+ return opt
+}
+
+// WithNumVersionsToKeep returns a new Options value with NumVersionsToKeep set to the given value.
+//
+// NumVersionsToKeep sets how many versions to keep per key at most.
+//
+// The default value of NumVersionsToKeep is 1.
+func (opt Options) WithNumVersionsToKeep(val int) Options {
+ opt.NumVersionsToKeep = val
+ return opt
+}
+
+// WithReadOnly returns a new Options value with ReadOnly set to the given value.
+//
+// When ReadOnly is true the DB will be opened on read-only mode.
+// Multiple processes can open the same Badger DB.
+// Note: if the DB being opened had crashed before and has vlog data to be replayed,
+// ReadOnly will cause Open to fail with an appropriate message.
+//
+// The default value of ReadOnly is false.
+func (opt Options) WithReadOnly(val bool) Options {
+ opt.ReadOnly = val
+ return opt
+}
+
+// WithTruncate returns a new Options value with Truncate set to the given value.
+//
+// Truncate indicates whether value log files should be truncated to delete corrupt data, if any.
+// This option is ignored when ReadOnly is true.
+//
+// The default value of Truncate is false.
+func (opt Options) WithTruncate(val bool) Options {
+ opt.Truncate = val
+ return opt
+}
+
+// WithLogger returns a new Options value with Logger set to the given value.
+//
+// Logger provides a way to configure what logger each value of badger.DB uses.
+//
+// The default value of Logger writes to stderr using the log package from the Go standard library.
+func (opt Options) WithLogger(val Logger) Options {
+ opt.Logger = val
+ return opt
+}
+
+// WithMaxTableSize returns a new Options value with MaxTableSize set to the given value.
+//
+// MaxTableSize sets the maximum size in bytes for each LSM table or file.
+//
+// The default value of MaxTableSize is 64MB.
+func (opt Options) WithMaxTableSize(val int64) Options {
+ opt.MaxTableSize = val
+ return opt
+}
+
+// WithLevelSizeMultiplier returns a new Options value with LevelSizeMultiplier set to the given
+// value.
+//
+// LevelSizeMultiplier sets the ratio between the maximum sizes of contiguous levels in the LSM.
+// Once a level grows to be larger than this ratio allowed, the compaction process will be
+// triggered.
+//
+// The default value of LevelSizeMultiplier is 10.
+func (opt Options) WithLevelSizeMultiplier(val int) Options {
+ opt.LevelSizeMultiplier = val
+ return opt
+}
+
+// WithMaxLevels returns a new Options value with MaxLevels set to the given value.
+//
+// Maximum number of levels of compaction allowed in the LSM.
+//
+// The default value of MaxLevels is 7.
+func (opt Options) WithMaxLevels(val int) Options {
+ opt.MaxLevels = val
+ return opt
+}
+
+// WithValueThreshold returns a new Options value with ValueThreshold set to the given value.
+//
+// ValueThreshold sets the threshold used to decide whether a value is stored directly in the LSM
+// tree or separatedly in the log value files.
+//
+// The default value of ValueThreshold is 32, but LSMOnlyOptions sets it to 65500.
+func (opt Options) WithValueThreshold(val int) Options {
+ opt.ValueThreshold = val
+ return opt
+}
+
+// WithNumMemtables returns a new Options value with NumMemtables set to the given value.
+//
+// NumMemtables sets the maximum number of tables to keep in memory before stalling.
+//
+// The default value of NumMemtables is 5.
+func (opt Options) WithNumMemtables(val int) Options {
+ opt.NumMemtables = val
+ return opt
+}
+
+// WithNumLevelZeroTables returns a new Options value with NumLevelZeroTables set to the given
+// value.
+//
+// NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts.
+//
+// The default value of NumLevelZeroTables is 5.
+func (opt Options) WithNumLevelZeroTables(val int) Options {
+ opt.NumLevelZeroTables = val
+ return opt
+}
+
+// WithNumLevelZeroTablesStall returns a new Options value with NumLevelZeroTablesStall set to the
+// given value.
+//
+// NumLevelZeroTablesStall sets the number of Level 0 tables that once reached causes the DB to
+// stall until compaction succeeds.
+//
+// The default value of NumLevelZeroTablesStall is 10.
+func (opt Options) WithNumLevelZeroTablesStall(val int) Options {
+ opt.NumLevelZeroTablesStall = val
+ return opt
+}
+
+// WithLevelOneSize returns a new Options value with LevelOneSize set to the given value.
+//
+// LevelOneSize sets the maximum total size for Level 1.
+//
+// The default value of LevelOneSize is 20MB.
+func (opt Options) WithLevelOneSize(val int64) Options {
+ opt.LevelOneSize = val
+ return opt
+}
+
+// WithValueLogFileSize returns a new Options value with ValueLogFileSize set to the given value.
+//
+// ValueLogFileSize sets the maximum size of a single value log file.
+//
+// The default value of ValueLogFileSize is 1GB.
+func (opt Options) WithValueLogFileSize(val int64) Options {
+ opt.ValueLogFileSize = val
+ return opt
+}
+
+// WithValueLogMaxEntries returns a new Options value with ValueLogMaxEntries set to the given
+// value.
+//
+// ValueLogMaxEntries sets the maximum number of entries a value log file can hold approximately.
+// A actual size limit of a value log file is the minimum of ValueLogFileSize and
+// ValueLogMaxEntries.
+//
+// The default value of ValueLogMaxEntries is one million (1000000).
+func (opt Options) WithValueLogMaxEntries(val uint32) Options {
+ opt.ValueLogMaxEntries = val
+ return opt
+}
+
+// WithNumCompactors returns a new Options value with NumCompactors set to the given value.
+//
+// NumCompactors sets the number of compaction workers to run concurrently.
+// Setting this to zero stops compactions, which could eventually cause writes to block forever.
+//
+// The default value of NumCompactors is 2.
+func (opt Options) WithNumCompactors(val int) Options {
+ opt.NumCompactors = val
+ return opt
+}
+
+// WithCompactL0OnClose returns a new Options value with CompactL0OnClose set to the given value.
+//
+// CompactL0OnClose determines whether Level 0 should be compacted before closing the DB.
+// This ensures that both reads and writes are efficient when the DB is opened later.
+//
+// The default value of CompactL0OnClose is true.
+func (opt Options) WithCompactL0OnClose(val bool) Options {
+ opt.CompactL0OnClose = val
+ return opt
+}
+
+// WithLogRotatesToFlush returns a new Options value with LogRotatesToFlush set to the given value.
+//
+// LogRotatesToFlush sets the number of value log file rotates after which the Memtables are
+// flushed to disk. This is useful in write loads with fewer keys and larger values. This work load
+// would fill up the value logs quickly, while not filling up the Memtables. Thus, on a crash
+// and restart, the value log head could cause the replay of a good number of value log files
+// which can slow things on start.
+//
+// The default value of LogRotatesToFlush is 2.
+func (opt Options) WithLogRotatesToFlush(val int32) Options {
+ opt.LogRotatesToFlush = val
+ return opt
+}
diff --git a/vendor/github.com/dgraph-io/badger/options/options.go b/vendor/github.com/dgraph-io/badger/options/options.go
new file mode 100644
index 000000000..06c8b1b7f
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/options/options.go
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package options
+
+// FileLoadingMode specifies how data in LSM table files and value log files should
+// be loaded.
+type FileLoadingMode int
+
+const (
+ // FileIO indicates that files must be loaded using standard I/O
+ FileIO FileLoadingMode = iota
+ // LoadToRAM indicates that file must be loaded into RAM
+ LoadToRAM
+ // MemoryMap indicates that that the file must be memory-mapped
+ MemoryMap
+)
diff --git a/vendor/github.com/dgraph-io/badger/pb/gen.sh b/vendor/github.com/dgraph-io/badger/pb/gen.sh
new file mode 100644
index 000000000..49b44ff4e
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/pb/gen.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# You might need to go get -v github.com/gogo/protobuf/...
+
+protos=${GOPATH-$HOME/go}/src/github.com/dgraph-io/badger/pb
+pushd $protos > /dev/null
+protoc --gofast_out=plugins=grpc:. -I=. pb.proto
diff --git a/vendor/github.com/dgraph-io/badger/pb/pb.pb.go b/vendor/github.com/dgraph-io/badger/pb/pb.pb.go
new file mode 100644
index 000000000..f9a2c6eee
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/pb/pb.pb.go
@@ -0,0 +1,1313 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: pb.proto
+
+package pb
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ io "io"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type ManifestChange_Operation int32
+
+const (
+ ManifestChange_CREATE ManifestChange_Operation = 0
+ ManifestChange_DELETE ManifestChange_Operation = 1
+)
+
+var ManifestChange_Operation_name = map[int32]string{
+ 0: "CREATE",
+ 1: "DELETE",
+}
+
+var ManifestChange_Operation_value = map[string]int32{
+ "CREATE": 0,
+ "DELETE": 1,
+}
+
+func (x ManifestChange_Operation) String() string {
+ return proto.EnumName(ManifestChange_Operation_name, int32(x))
+}
+
+func (ManifestChange_Operation) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_f80abaa17e25ccc8, []int{3, 0}
+}
+
+type KV struct {
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ UserMeta []byte `protobuf:"bytes,3,opt,name=user_meta,json=userMeta,proto3" json:"user_meta,omitempty"`
+ Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
+ ExpiresAt uint64 `protobuf:"varint,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"`
+ Meta []byte `protobuf:"bytes,6,opt,name=meta,proto3" json:"meta,omitempty"`
+ // Stream id is used to identify which stream the KV came from.
+ StreamId uint32 `protobuf:"varint,10,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *KV) Reset() { *m = KV{} }
+func (m *KV) String() string { return proto.CompactTextString(m) }
+func (*KV) ProtoMessage() {}
+func (*KV) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f80abaa17e25ccc8, []int{0}
+}
+func (m *KV) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_KV.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *KV) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KV.Merge(m, src)
+}
+func (m *KV) XXX_Size() int {
+ return m.Size()
+}
+func (m *KV) XXX_DiscardUnknown() {
+ xxx_messageInfo_KV.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KV proto.InternalMessageInfo
+
+func (m *KV) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *KV) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *KV) GetUserMeta() []byte {
+ if m != nil {
+ return m.UserMeta
+ }
+ return nil
+}
+
+func (m *KV) GetVersion() uint64 {
+ if m != nil {
+ return m.Version
+ }
+ return 0
+}
+
+func (m *KV) GetExpiresAt() uint64 {
+ if m != nil {
+ return m.ExpiresAt
+ }
+ return 0
+}
+
+func (m *KV) GetMeta() []byte {
+ if m != nil {
+ return m.Meta
+ }
+ return nil
+}
+
+func (m *KV) GetStreamId() uint32 {
+ if m != nil {
+ return m.StreamId
+ }
+ return 0
+}
+
+type KVList struct {
+ Kv []*KV `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *KVList) Reset() { *m = KVList{} }
+func (m *KVList) String() string { return proto.CompactTextString(m) }
+func (*KVList) ProtoMessage() {}
+func (*KVList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f80abaa17e25ccc8, []int{1}
+}
+func (m *KVList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KVList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_KVList.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *KVList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KVList.Merge(m, src)
+}
+func (m *KVList) XXX_Size() int {
+ return m.Size()
+}
+func (m *KVList) XXX_DiscardUnknown() {
+ xxx_messageInfo_KVList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KVList proto.InternalMessageInfo
+
+func (m *KVList) GetKv() []*KV {
+ if m != nil {
+ return m.Kv
+ }
+ return nil
+}
+
+type ManifestChangeSet struct {
+ // A set of changes that are applied atomically.
+ Changes []*ManifestChange `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ManifestChangeSet) Reset() { *m = ManifestChangeSet{} }
+func (m *ManifestChangeSet) String() string { return proto.CompactTextString(m) }
+func (*ManifestChangeSet) ProtoMessage() {}
+func (*ManifestChangeSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f80abaa17e25ccc8, []int{2}
+}
+func (m *ManifestChangeSet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ManifestChangeSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ManifestChangeSet.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ManifestChangeSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ManifestChangeSet.Merge(m, src)
+}
+func (m *ManifestChangeSet) XXX_Size() int {
+ return m.Size()
+}
+func (m *ManifestChangeSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_ManifestChangeSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ManifestChangeSet proto.InternalMessageInfo
+
+func (m *ManifestChangeSet) GetChanges() []*ManifestChange {
+ if m != nil {
+ return m.Changes
+ }
+ return nil
+}
+
+type ManifestChange struct {
+ Id uint64 `protobuf:"varint,1,opt,name=Id,proto3" json:"Id,omitempty"`
+ Op ManifestChange_Operation `protobuf:"varint,2,opt,name=Op,proto3,enum=pb.ManifestChange_Operation" json:"Op,omitempty"`
+ Level uint32 `protobuf:"varint,3,opt,name=Level,proto3" json:"Level,omitempty"`
+ Checksum []byte `protobuf:"bytes,4,opt,name=Checksum,proto3" json:"Checksum,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ManifestChange) Reset() { *m = ManifestChange{} }
+func (m *ManifestChange) String() string { return proto.CompactTextString(m) }
+func (*ManifestChange) ProtoMessage() {}
+func (*ManifestChange) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f80abaa17e25ccc8, []int{3}
+}
+func (m *ManifestChange) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ManifestChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ManifestChange.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ManifestChange) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ManifestChange.Merge(m, src)
+}
+func (m *ManifestChange) XXX_Size() int {
+ return m.Size()
+}
+func (m *ManifestChange) XXX_DiscardUnknown() {
+ xxx_messageInfo_ManifestChange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ManifestChange proto.InternalMessageInfo
+
+func (m *ManifestChange) GetId() uint64 {
+ if m != nil {
+ return m.Id
+ }
+ return 0
+}
+
+func (m *ManifestChange) GetOp() ManifestChange_Operation {
+ if m != nil {
+ return m.Op
+ }
+ return ManifestChange_CREATE
+}
+
+func (m *ManifestChange) GetLevel() uint32 {
+ if m != nil {
+ return m.Level
+ }
+ return 0
+}
+
+func (m *ManifestChange) GetChecksum() []byte {
+ if m != nil {
+ return m.Checksum
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("pb.ManifestChange_Operation", ManifestChange_Operation_name, ManifestChange_Operation_value)
+ proto.RegisterType((*KV)(nil), "pb.KV")
+ proto.RegisterType((*KVList)(nil), "pb.KVList")
+ proto.RegisterType((*ManifestChangeSet)(nil), "pb.ManifestChangeSet")
+ proto.RegisterType((*ManifestChange)(nil), "pb.ManifestChange")
+}
+
+func init() { proto.RegisterFile("pb.proto", fileDescriptor_f80abaa17e25ccc8) }
+
+var fileDescriptor_f80abaa17e25ccc8 = []byte{
+ // 365 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x4f, 0x8a, 0xdb, 0x30,
+ 0x14, 0xc6, 0x47, 0x8a, 0xc7, 0xe3, 0xbc, 0xce, 0x04, 0x57, 0x94, 0x22, 0xfa, 0xc7, 0x18, 0x77,
+ 0xe3, 0xc5, 0xe0, 0xc5, 0xf4, 0x04, 0x69, 0xea, 0x45, 0x48, 0x42, 0x40, 0x0d, 0xd9, 0x06, 0x39,
+ 0x7e, 0x6d, 0x8c, 0x13, 0x5b, 0x58, 0x8a, 0x69, 0x6f, 0xd2, 0x0b, 0xf4, 0x04, 0xbd, 0x44, 0x97,
+ 0x3d, 0x42, 0x49, 0x2f, 0x52, 0xac, 0xfc, 0x81, 0xd0, 0xdd, 0xfb, 0xbe, 0xef, 0xbd, 0x4f, 0xf0,
+ 0x13, 0x78, 0x2a, 0x4b, 0x54, 0x53, 0x9b, 0x9a, 0x51, 0x95, 0x45, 0x3f, 0x09, 0xd0, 0xc9, 0x92,
+ 0xf9, 0xd0, 0x2b, 0xf1, 0x1b, 0x27, 0x21, 0x89, 0xef, 0x45, 0x37, 0xb2, 0x17, 0x70, 0xdb, 0xca,
+ 0xed, 0x1e, 0x39, 0xb5, 0xde, 0x51, 0xb0, 0xd7, 0xd0, 0xdf, 0x6b, 0x6c, 0x56, 0x3b, 0x34, 0x92,
+ 0xf7, 0x6c, 0xe2, 0x75, 0xc6, 0x0c, 0x8d, 0x64, 0x1c, 0xee, 0x5a, 0x6c, 0x74, 0x51, 0x57, 0xdc,
+ 0x09, 0x49, 0xec, 0x88, 0xb3, 0x64, 0x6f, 0x01, 0xf0, 0xab, 0x2a, 0x1a, 0xd4, 0x2b, 0x69, 0xf8,
+ 0xad, 0x0d, 0xfb, 0x27, 0x67, 0x68, 0x18, 0x03, 0xc7, 0x16, 0xba, 0xb6, 0xd0, 0xce, 0xdd, 0x4b,
+ 0xda, 0x34, 0x28, 0x77, 0xab, 0x22, 0xe7, 0x10, 0x92, 0xf8, 0x41, 0x78, 0x47, 0x63, 0x9c, 0x47,
+ 0x21, 0xb8, 0x93, 0xe5, 0xb4, 0xd0, 0x86, 0xbd, 0x04, 0x5a, 0xb6, 0x9c, 0x84, 0xbd, 0xf8, 0xd9,
+ 0x93, 0x9b, 0xa8, 0x2c, 0x99, 0x2c, 0x05, 0x2d, 0xdb, 0x68, 0x08, 0xcf, 0x67, 0xb2, 0x2a, 0x3e,
+ 0xa3, 0x36, 0xa3, 0x8d, 0xac, 0xbe, 0xe0, 0x27, 0x34, 0xec, 0x11, 0xee, 0xd6, 0x56, 0xe8, 0xd3,
+ 0x05, 0xeb, 0x2e, 0xae, 0xf7, 0xc4, 0x79, 0x25, 0xfa, 0x41, 0x60, 0x70, 0x9d, 0xb1, 0x01, 0xd0,
+ 0x71, 0x6e, 0x29, 0x39, 0x82, 0x8e, 0x73, 0xf6, 0x08, 0x74, 0xae, 0x2c, 0xa1, 0xc1, 0xd3, 0x9b,
+ 0xff, 0xbb, 0x92, 0xb9, 0xc2, 0x46, 0x9a, 0xa2, 0xae, 0x04, 0x9d, 0xab, 0x0e, 0xe9, 0x14, 0x5b,
+ 0xdc, 0x5a, 0x70, 0x0f, 0xe2, 0x28, 0xd8, 0x2b, 0xf0, 0x46, 0x1b, 0x5c, 0x97, 0x7a, 0xbf, 0xb3,
+ 0xd8, 0xee, 0xc5, 0x45, 0x47, 0xef, 0xa0, 0x7f, 0xa9, 0x60, 0x00, 0xee, 0x48, 0xa4, 0xc3, 0x45,
+ 0xea, 0xdf, 0x74, 0xf3, 0xc7, 0x74, 0x9a, 0x2e, 0x52, 0x9f, 0x7c, 0xf0, 0x7f, 0x1d, 0x02, 0xf2,
+ 0xfb, 0x10, 0x90, 0x3f, 0x87, 0x80, 0x7c, 0xff, 0x1b, 0xdc, 0x64, 0xae, 0xfd, 0xdf, 0xf7, 0xff,
+ 0x02, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x28, 0x5d, 0xcf, 0xeb, 0x01, 0x00, 0x00,
+}
+
+func (m *KV) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KV) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.Value) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ if len(m.UserMeta) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(len(m.UserMeta)))
+ i += copy(dAtA[i:], m.UserMeta)
+ }
+ if m.Version != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(m.Version))
+ }
+ if m.ExpiresAt != 0 {
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(m.ExpiresAt))
+ }
+ if len(m.Meta) > 0 {
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(len(m.Meta)))
+ i += copy(dAtA[i:], m.Meta)
+ }
+ if m.StreamId != 0 {
+ dAtA[i] = 0x50
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(m.StreamId))
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *KVList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KVList) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Kv) > 0 {
+ for _, msg := range m.Kv {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *ManifestChangeSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ManifestChangeSet) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Changes) > 0 {
+ for _, msg := range m.Changes {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *ManifestChange) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ManifestChange) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Id != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(m.Id))
+ }
+ if m.Op != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(m.Op))
+ }
+ if m.Level != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(m.Level))
+ }
+ if len(m.Checksum) > 0 {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintPb(dAtA, i, uint64(len(m.Checksum)))
+ i += copy(dAtA[i:], m.Checksum)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func encodeVarintPb(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *KV) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovPb(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovPb(uint64(l))
+ }
+ l = len(m.UserMeta)
+ if l > 0 {
+ n += 1 + l + sovPb(uint64(l))
+ }
+ if m.Version != 0 {
+ n += 1 + sovPb(uint64(m.Version))
+ }
+ if m.ExpiresAt != 0 {
+ n += 1 + sovPb(uint64(m.ExpiresAt))
+ }
+ l = len(m.Meta)
+ if l > 0 {
+ n += 1 + l + sovPb(uint64(l))
+ }
+ if m.StreamId != 0 {
+ n += 1 + sovPb(uint64(m.StreamId))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *KVList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Kv) > 0 {
+ for _, e := range m.Kv {
+ l = e.Size()
+ n += 1 + l + sovPb(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ManifestChangeSet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Changes) > 0 {
+ for _, e := range m.Changes {
+ l = e.Size()
+ n += 1 + l + sovPb(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ManifestChange) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Id != 0 {
+ n += 1 + sovPb(uint64(m.Id))
+ }
+ if m.Op != 0 {
+ n += 1 + sovPb(uint64(m.Op))
+ }
+ if m.Level != 0 {
+ n += 1 + sovPb(uint64(m.Level))
+ }
+ l = len(m.Checksum)
+ if l > 0 {
+ n += 1 + l + sovPb(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovPb(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozPb(x uint64) (n int) {
+ return sovPb(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *KV) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KV: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthPb
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPb
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthPb
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPb
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserMeta", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthPb
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPb
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UserMeta = append(m.UserMeta[:0], dAtA[iNdEx:postIndex]...)
+ if m.UserMeta == nil {
+ m.UserMeta = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ m.Version = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Version |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType)
+ }
+ m.ExpiresAt = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ExpiresAt |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthPb
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPb
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Meta = append(m.Meta[:0], dAtA[iNdEx:postIndex]...)
+ if m.Meta == nil {
+ m.Meta = []byte{}
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType)
+ }
+ m.StreamId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.StreamId |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPb(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPb
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthPb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KVList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KVList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KVList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPb
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthPb
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kv = append(m.Kv, &KV{})
+ if err := m.Kv[len(m.Kv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPb(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPb
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthPb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ManifestChangeSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ManifestChangeSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ManifestChangeSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPb
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthPb
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Changes = append(m.Changes, &ManifestChange{})
+ if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPb(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPb
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthPb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ManifestChange) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ManifestChange: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ManifestChange: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
+ }
+ m.Id = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Id |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType)
+ }
+ m.Op = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Op |= ManifestChange_Operation(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
+ }
+ m.Level = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Level |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthPb
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPb
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Checksum = append(m.Checksum[:0], dAtA[iNdEx:postIndex]...)
+ if m.Checksum == nil {
+ m.Checksum = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPb(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPb
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthPb
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipPb(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthPb
+ }
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthPb
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPb
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipPb(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthPb
+ }
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthPb = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowPb = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/dgraph-io/badger/pb/pb.proto b/vendor/github.com/dgraph-io/badger/pb/pb.proto
new file mode 100644
index 000000000..c6e7f413d
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/pb/pb.proto
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Use protos/gen.sh to generate .pb.go files.
+syntax = "proto3";
+
+package pb;
+
+message KV {
+ bytes key = 1;
+ bytes value = 2;
+ bytes user_meta = 3;
+ uint64 version = 4;
+ uint64 expires_at = 5;
+ bytes meta = 6;
+
+ // Stream id is used to identify which stream the KV came from.
+ uint32 stream_id = 10;
+}
+
+message KVList {
+ repeated KV kv = 1;
+}
+
+message ManifestChangeSet {
+ // A set of changes that are applied atomically.
+ repeated ManifestChange changes = 1;
+}
+
+message ManifestChange {
+ uint64 Id = 1;
+ enum Operation {
+ CREATE = 0;
+ DELETE = 1;
+ }
+ Operation Op = 2;
+ uint32 Level = 3; // Only used for CREATE
+ bytes Checksum = 4; // Only used for CREATE
+}
diff --git a/vendor/github.com/dgraph-io/badger/publisher.go b/vendor/github.com/dgraph-io/badger/publisher.go
new file mode 100644
index 000000000..24588f5c6
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/publisher.go
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "bytes"
+ "sync"
+
+ "github.com/dgraph-io/badger/pb"
+ "github.com/dgraph-io/badger/y"
+)
+
+type subscriber struct {
+ prefixes [][]byte
+ sendCh chan<- *pb.KVList
+ subCloser *y.Closer
+}
+
+type publisher struct {
+ sync.Mutex
+ pubCh chan requests
+ subscribers map[uint64]subscriber
+ nextID uint64
+}
+
+func newPublisher() *publisher {
+ return &publisher{
+ pubCh: make(chan requests, 1000),
+ subscribers: make(map[uint64]subscriber),
+ nextID: 0,
+ }
+}
+
+func (p *publisher) listenForUpdates(c *y.Closer) {
+ defer func() {
+ p.cleanSubscribers()
+ c.Done()
+ }()
+ slurp := func(batch []*request) {
+ for {
+ select {
+ case reqs := <-p.pubCh:
+ batch = append(batch, reqs...)
+ default:
+ p.publishUpdates(batch)
+ return
+ }
+ }
+ }
+ for {
+ select {
+ case <-c.HasBeenClosed():
+ return
+ case reqs := <-p.pubCh:
+ slurp(reqs)
+ }
+ }
+}
+
+func (p *publisher) publishUpdates(reqs requests) {
+ kvs := &pb.KVList{}
+ p.Lock()
+ defer func() {
+ p.Unlock()
+ // Release all the request.
+ reqs.DecrRef()
+ }()
+
+ // TODO: Optimize this, so we can figure out key -> subscriber quickly, without iterating over
+ // all the prefixes.
+ // TODO: Use trie to find subscribers.
+ for _, s := range p.subscribers {
+ // BUG: This would send out the same entry multiple times on multiple matches for the same
+ // subscriber.
+ for _, prefix := range s.prefixes {
+ for _, req := range reqs {
+ for _, e := range req.Entries {
+ if bytes.HasPrefix(e.Key, prefix) {
+ // TODO: Maybe we can optimize this by creating the KV once and sending it
+ // over to multiple subscribers.
+ k := y.SafeCopy(nil, e.Key)
+ kv := &pb.KV{
+ Key: y.ParseKey(k),
+ Value: y.SafeCopy(nil, e.Value),
+ UserMeta: []byte{e.UserMeta},
+ ExpiresAt: e.ExpiresAt,
+ Version: y.ParseTs(k),
+ }
+ kvs.Kv = append(kvs.Kv, kv)
+ }
+ }
+ }
+ }
+ if len(kvs.GetKv()) > 0 {
+ s.sendCh <- kvs
+ }
+ }
+}
+
+func (p *publisher) newSubscriber(c *y.Closer, prefixes ...[]byte) (<-chan *pb.KVList, uint64) {
+ p.Lock()
+ defer p.Unlock()
+ ch := make(chan *pb.KVList, 1000)
+ id := p.nextID
+ // Increment next ID.
+ p.nextID++
+ p.subscribers[id] = subscriber{
+ prefixes: prefixes,
+ sendCh: ch,
+ subCloser: c,
+ }
+ return ch, id
+}
+
+// cleanSubscribers stops all the subscribers. Ideally, It should be called while closing DB.
+func (p *publisher) cleanSubscribers() {
+ p.Lock()
+ defer p.Unlock()
+ for id, s := range p.subscribers {
+ delete(p.subscribers, id)
+ s.subCloser.SignalAndWait()
+ }
+}
+
+func (p *publisher) deleteSubscriber(id uint64) {
+ p.Lock()
+ defer p.Unlock()
+ if _, ok := p.subscribers[id]; !ok {
+ return
+ }
+ delete(p.subscribers, id)
+}
+
+func (p *publisher) sendUpdates(reqs []*request) {
+ // TODO: Prefix check before pushing into pubCh.
+ if p.noOfSubscribers() != 0 {
+ p.pubCh <- reqs
+ }
+}
+
+func (p *publisher) noOfSubscribers() int {
+ p.Lock()
+ defer p.Unlock()
+ return len(p.subscribers)
+}
diff --git a/vendor/github.com/dgraph-io/badger/skl/README.md b/vendor/github.com/dgraph-io/badger/skl/README.md
new file mode 100644
index 000000000..e22e4590b
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/skl/README.md
@@ -0,0 +1,113 @@
+This is much better than `skiplist` and `slist`.
+
+```
+BenchmarkReadWrite/frac_0-8 3000000 537 ns/op
+BenchmarkReadWrite/frac_1-8 3000000 503 ns/op
+BenchmarkReadWrite/frac_2-8 3000000 492 ns/op
+BenchmarkReadWrite/frac_3-8 3000000 475 ns/op
+BenchmarkReadWrite/frac_4-8 3000000 440 ns/op
+BenchmarkReadWrite/frac_5-8 5000000 442 ns/op
+BenchmarkReadWrite/frac_6-8 5000000 380 ns/op
+BenchmarkReadWrite/frac_7-8 5000000 338 ns/op
+BenchmarkReadWrite/frac_8-8 5000000 294 ns/op
+BenchmarkReadWrite/frac_9-8 10000000 268 ns/op
+BenchmarkReadWrite/frac_10-8 100000000 26.3 ns/op
+```
+
+And even better than a simple map with read-write lock:
+
+```
+BenchmarkReadWriteMap/frac_0-8 2000000 774 ns/op
+BenchmarkReadWriteMap/frac_1-8 2000000 647 ns/op
+BenchmarkReadWriteMap/frac_2-8 3000000 605 ns/op
+BenchmarkReadWriteMap/frac_3-8 3000000 603 ns/op
+BenchmarkReadWriteMap/frac_4-8 3000000 556 ns/op
+BenchmarkReadWriteMap/frac_5-8 3000000 472 ns/op
+BenchmarkReadWriteMap/frac_6-8 3000000 476 ns/op
+BenchmarkReadWriteMap/frac_7-8 3000000 457 ns/op
+BenchmarkReadWriteMap/frac_8-8 5000000 444 ns/op
+BenchmarkReadWriteMap/frac_9-8 5000000 361 ns/op
+BenchmarkReadWriteMap/frac_10-8 10000000 212 ns/op
+```
+
+# Node Pooling
+
+Command used
+
+```
+rm -Rf tmp && /usr/bin/time -l ./populate -keys_mil 10
+```
+
+For pprof results, we run without using /usr/bin/time. There are four runs below.
+
+Results seem to vary quite a bit between runs.
+
+## Before node pooling
+
+```
+1311.53MB of 1338.69MB total (97.97%)
+Dropped 30 nodes (cum <= 6.69MB)
+Showing top 10 nodes out of 37 (cum >= 12.50MB)
+ flat flat% sum% cum cum%
+ 523.04MB 39.07% 39.07% 523.04MB 39.07% github.com/dgraph-io/badger/skl.(*Skiplist).Put
+ 184.51MB 13.78% 52.85% 184.51MB 13.78% runtime.stringtoslicebyte
+ 166.01MB 12.40% 65.25% 689.04MB 51.47% github.com/dgraph-io/badger/mem.(*Table).Put
+ 165MB 12.33% 77.58% 165MB 12.33% runtime.convT2E
+ 116.92MB 8.73% 86.31% 116.92MB 8.73% bytes.makeSlice
+ 62.50MB 4.67% 90.98% 62.50MB 4.67% main.newValue
+ 34.50MB 2.58% 93.56% 34.50MB 2.58% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV
+ 25.50MB 1.90% 95.46% 100.06MB 7.47% github.com/dgraph-io/badger/y.(*MergeIterator).Next
+ 21.06MB 1.57% 97.04% 21.06MB 1.57% github.com/dgraph-io/badger/table.(*Table).read
+ 12.50MB 0.93% 97.97% 12.50MB 0.93% github.com/dgraph-io/badger/table.header.Encode
+
+ 128.31 real 329.37 user 17.11 sys
+3355660288 maximum resident set size
+ 0 average shared memory size
+ 0 average unshared data size
+ 0 average unshared stack size
+ 2203080 page reclaims
+ 764 page faults
+ 0 swaps
+ 275 block input operations
+ 76 block output operations
+ 0 messages sent
+ 0 messages received
+ 0 signals received
+ 49173 voluntary context switches
+ 599922 involuntary context switches
+```
+
+## After node pooling
+
+```
+1963.13MB of 2026.09MB total (96.89%)
+Dropped 29 nodes (cum <= 10.13MB)
+Showing top 10 nodes out of 41 (cum >= 185.62MB)
+ flat flat% sum% cum cum%
+ 658.05MB 32.48% 32.48% 658.05MB 32.48% github.com/dgraph-io/badger/skl.glob..func1
+ 297.51MB 14.68% 47.16% 297.51MB 14.68% runtime.convT2E
+ 257.51MB 12.71% 59.87% 257.51MB 12.71% runtime.stringtoslicebyte
+ 249.01MB 12.29% 72.16% 1007.06MB 49.70% github.com/dgraph-io/badger/mem.(*Table).Put
+ 142.43MB 7.03% 79.19% 142.43MB 7.03% bytes.makeSlice
+ 100MB 4.94% 84.13% 758.05MB 37.41% github.com/dgraph-io/badger/skl.newNode
+ 99.50MB 4.91% 89.04% 99.50MB 4.91% main.newValue
+ 75MB 3.70% 92.74% 75MB 3.70% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV
+ 44.62MB 2.20% 94.94% 44.62MB 2.20% github.com/dgraph-io/badger/table.(*Table).read
+ 39.50MB 1.95% 96.89% 185.62MB 9.16% github.com/dgraph-io/badger/y.(*MergeIterator).Next
+
+ 135.58 real 374.29 user 17.65 sys
+3740614656 maximum resident set size
+ 0 average shared memory size
+ 0 average unshared data size
+ 0 average unshared stack size
+ 2276566 page reclaims
+ 770 page faults
+ 0 swaps
+ 128 block input operations
+ 90 block output operations
+ 0 messages sent
+ 0 messages received
+ 0 signals received
+ 46434 voluntary context switches
+ 597049 involuntary context switches
+```
diff --git a/vendor/github.com/dgraph-io/badger/skl/arena.go b/vendor/github.com/dgraph-io/badger/skl/arena.go
new file mode 100644
index 000000000..def550712
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/skl/arena.go
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package skl
+
+import (
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/dgraph-io/badger/y"
+)
+
+const (
+ offsetSize = int(unsafe.Sizeof(uint32(0)))
+
+ // Always align nodes on 64-bit boundaries, even on 32-bit architectures,
+ // so that the node.value field is 64-bit aligned. This is necessary because
+ // node.getValueOffset uses atomic.LoadUint64, which expects its input
+ // pointer to be 64-bit aligned.
+ nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1
+)
+
+// Arena should be lock-free.
+type Arena struct {
+ n uint32
+ buf []byte
+}
+
+// newArena returns a new arena.
+func newArena(n int64) *Arena {
+ // Don't store data at position 0 in order to reserve offset=0 as a kind
+ // of nil pointer.
+ out := &Arena{
+ n: 1,
+ buf: make([]byte, n),
+ }
+ return out
+}
+
+func (s *Arena) size() int64 {
+ return int64(atomic.LoadUint32(&s.n))
+}
+
+func (s *Arena) reset() {
+ atomic.StoreUint32(&s.n, 0)
+}
+
+// putNode allocates a node in the arena. The node is aligned on a pointer-sized
+// boundary. The arena offset of the node is returned.
+func (s *Arena) putNode(height int) uint32 {
+ // Compute the amount of the tower that will never be used, since the height
+ // is less than maxHeight.
+ unusedSize := (maxHeight - height) * offsetSize
+
+ // Pad the allocation with enough bytes to ensure pointer alignment.
+ l := uint32(MaxNodeSize - unusedSize + nodeAlign)
+ n := atomic.AddUint32(&s.n, l)
+ y.AssertTruef(int(n) <= len(s.buf),
+ "Arena too small, toWrite:%d newTotal:%d limit:%d",
+ l, n, len(s.buf))
+
+ // Return the aligned offset.
+ m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign)
+ return m
+}
+
+// Put will *copy* val into arena. To make better use of this, reuse your input
+// val buffer. Returns an offset into buf. User is responsible for remembering
+// size of val. We could also store this size inside arena but the encoding and
+// decoding will incur some overhead.
+func (s *Arena) putVal(v y.ValueStruct) uint32 {
+ l := uint32(v.EncodedSize())
+ n := atomic.AddUint32(&s.n, l)
+ y.AssertTruef(int(n) <= len(s.buf),
+ "Arena too small, toWrite:%d newTotal:%d limit:%d",
+ l, n, len(s.buf))
+ m := n - l
+ v.Encode(s.buf[m:])
+ return m
+}
+
+func (s *Arena) putKey(key []byte) uint32 {
+ l := uint32(len(key))
+ n := atomic.AddUint32(&s.n, l)
+ y.AssertTruef(int(n) <= len(s.buf),
+ "Arena too small, toWrite:%d newTotal:%d limit:%d",
+ l, n, len(s.buf))
+ m := n - l
+ y.AssertTrue(len(key) == copy(s.buf[m:n], key))
+ return m
+}
+
+// getNode returns a pointer to the node located at offset. If the offset is
+// zero, then the nil node pointer is returned.
+func (s *Arena) getNode(offset uint32) *node {
+ if offset == 0 {
+ return nil
+ }
+
+ return (*node)(unsafe.Pointer(&s.buf[offset]))
+}
+
+// getKey returns byte slice at offset.
+func (s *Arena) getKey(offset uint32, size uint16) []byte {
+ return s.buf[offset : offset+uint32(size)]
+}
+
+// getVal returns byte slice at offset. The given size should be just the value
+// size and should NOT include the meta bytes.
+func (s *Arena) getVal(offset uint32, size uint16) (ret y.ValueStruct) {
+ ret.Decode(s.buf[offset : offset+uint32(size)])
+ return
+}
+
+// getNodeOffset returns the offset of node in the arena. If the node pointer is
+// nil, then the zero offset is returned.
+func (s *Arena) getNodeOffset(nd *node) uint32 {
+ if nd == nil {
+ return 0
+ }
+
+ return uint32(uintptr(unsafe.Pointer(nd)) - uintptr(unsafe.Pointer(&s.buf[0])))
+}
diff --git a/vendor/github.com/dgraph-io/badger/skl/skl.go b/vendor/github.com/dgraph-io/badger/skl/skl.go
new file mode 100644
index 000000000..fc2eff982
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/skl/skl.go
@@ -0,0 +1,517 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+Adapted from RocksDB inline skiplist.
+
+Key differences:
+- No optimization for sequential inserts (no "prev").
+- No custom comparator.
+- Support overwrites. This requires care when we see the same key when inserting.
+ For RocksDB or LevelDB, overwrites are implemented as a newer sequence number in the key, so
+ there is no need for values. We don't intend to support versioning. In-place updates of values
+ would be more efficient.
+- We discard all non-concurrent code.
+- We do not support Splices. This simplifies the code a lot.
+- No AllocateNode or other pointer arithmetic.
+- We combine the findLessThan, findGreaterOrEqual, etc into one function.
+*/
+
+package skl
+
+import (
+ "math"
+ "math/rand"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/dgraph-io/badger/y"
+)
+
+const (
+ maxHeight = 20
+ heightIncrease = math.MaxUint32 / 3
+)
+
+// MaxNodeSize is the memory footprint of a node of maximum height.
+const MaxNodeSize = int(unsafe.Sizeof(node{}))
+
+type node struct {
+ // Multiple parts of the value are encoded as a single uint64 so that it
+ // can be atomically loaded and stored:
+ // value offset: uint32 (bits 0-31)
+ // value size : uint16 (bits 32-47)
+ value uint64
+
+ // A byte slice is 24 bytes. We are trying to save space here.
+ keyOffset uint32 // Immutable. No need to lock to access key.
+ keySize uint16 // Immutable. No need to lock to access key.
+
+ // Height of the tower.
+ height uint16
+
+ // Most nodes do not need to use the full height of the tower, since the
+ // probability of each successive level decreases exponentially. Because
+ // these elements are never accessed, they do not need to be allocated.
+ // Therefore, when a node is allocated in the arena, its memory footprint
+ // is deliberately truncated to not include unneeded tower elements.
+ //
+ // All accesses to elements should use CAS operations, with no need to lock.
+ tower [maxHeight]uint32
+}
+
+// Skiplist maps keys to values (in memory)
+type Skiplist struct {
+ height int32 // Current height. 1 <= height <= kMaxHeight. CAS.
+ head *node
+ ref int32
+ arena *Arena
+}
+
+// IncrRef increases the refcount
+func (s *Skiplist) IncrRef() {
+ atomic.AddInt32(&s.ref, 1)
+}
+
+// DecrRef decrements the refcount, deallocating the Skiplist when done using it
+func (s *Skiplist) DecrRef() {
+ newRef := atomic.AddInt32(&s.ref, -1)
+ if newRef > 0 {
+ return
+ }
+
+ s.arena.reset()
+ // Indicate we are closed. Good for testing. Also, lets GC reclaim memory. Race condition
+ // here would suggest we are accessing skiplist when we are supposed to have no reference!
+ s.arena = nil
+ // Since the head references the arena's buf, as long as the head is kept around
+ // GC can't release the buf.
+ s.head = nil
+}
+
+func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node {
+ // The base level is already allocated in the node struct.
+ offset := arena.putNode(height)
+ node := arena.getNode(offset)
+ node.keyOffset = arena.putKey(key)
+ node.keySize = uint16(len(key))
+ node.height = uint16(height)
+ node.value = encodeValue(arena.putVal(v), v.EncodedSize())
+ return node
+}
+
+func encodeValue(valOffset uint32, valSize uint16) uint64 {
+ return uint64(valSize)<<32 | uint64(valOffset)
+}
+
+func decodeValue(value uint64) (valOffset uint32, valSize uint16) {
+ valOffset = uint32(value)
+ valSize = uint16(value >> 32)
+ return
+}
+
+// NewSkiplist makes a new empty skiplist, with a given arena size
+func NewSkiplist(arenaSize int64) *Skiplist {
+ arena := newArena(arenaSize)
+ head := newNode(arena, nil, y.ValueStruct{}, maxHeight)
+ return &Skiplist{
+ height: 1,
+ head: head,
+ arena: arena,
+ ref: 1,
+ }
+}
+
+func (s *node) getValueOffset() (uint32, uint16) {
+ value := atomic.LoadUint64(&s.value)
+ return decodeValue(value)
+}
+
+func (s *node) key(arena *Arena) []byte {
+ return arena.getKey(s.keyOffset, s.keySize)
+}
+
+func (s *node) setValue(arena *Arena, v y.ValueStruct) {
+ valOffset := arena.putVal(v)
+ value := encodeValue(valOffset, v.EncodedSize())
+ atomic.StoreUint64(&s.value, value)
+}
+
+func (s *node) getNextOffset(h int) uint32 {
+ return atomic.LoadUint32(&s.tower[h])
+}
+
+func (s *node) casNextOffset(h int, old, val uint32) bool {
+ return atomic.CompareAndSwapUint32(&s.tower[h], old, val)
+}
+
+// Returns true if key is strictly > n.key.
+// If n is nil, this is an "end" marker and we return false.
+//func (s *Skiplist) keyIsAfterNode(key []byte, n *node) bool {
+// y.AssertTrue(n != s.head)
+// return n != nil && y.CompareKeys(key, n.key) > 0
+//}
+
+func randomHeight() int {
+ h := 1
+ for h < maxHeight && rand.Uint32() <= heightIncrease {
+ h++
+ }
+ return h
+}
+
+func (s *Skiplist) getNext(nd *node, height int) *node {
+ return s.arena.getNode(nd.getNextOffset(height))
+}
+
+// findNear finds the node near to key.
+// If less=true, it finds rightmost node such that node.key < key (if allowEqual=false) or
+// node.key <= key (if allowEqual=true).
+// If less=false, it finds leftmost node such that node.key > key (if allowEqual=false) or
+// node.key >= key (if allowEqual=true).
+// Returns the node found. The bool returned is true if the node has key equal to given key.
+func (s *Skiplist) findNear(key []byte, less bool, allowEqual bool) (*node, bool) {
+ x := s.head
+ level := int(s.getHeight() - 1)
+ for {
+ // Assume x.key < key.
+ next := s.getNext(x, level)
+ if next == nil {
+ // x.key < key < END OF LIST
+ if level > 0 {
+ // Can descend further to iterate closer to the end.
+ level--
+ continue
+ }
+ // Level=0. Cannot descend further. Let's return something that makes sense.
+ if !less {
+ return nil, false
+ }
+ // Try to return x. Make sure it is not a head node.
+ if x == s.head {
+ return nil, false
+ }
+ return x, false
+ }
+
+ nextKey := next.key(s.arena)
+ cmp := y.CompareKeys(key, nextKey)
+ if cmp > 0 {
+ // x.key < next.key < key. We can continue to move right.
+ x = next
+ continue
+ }
+ if cmp == 0 {
+ // x.key < key == next.key.
+ if allowEqual {
+ return next, true
+ }
+ if !less {
+ // We want >, so go to base level to grab the next bigger note.
+ return s.getNext(next, 0), false
+ }
+ // We want <. If not base level, we should go closer in the next level.
+ if level > 0 {
+ level--
+ continue
+ }
+ // On base level. Return x.
+ if x == s.head {
+ return nil, false
+ }
+ return x, false
+ }
+ // cmp < 0. In other words, x.key < key < next.
+ if level > 0 {
+ level--
+ continue
+ }
+ // At base level. Need to return something.
+ if !less {
+ return next, false
+ }
+ // Try to return x. Make sure it is not a head node.
+ if x == s.head {
+ return nil, false
+ }
+ return x, false
+ }
+}
+
+// findSpliceForLevel returns (outBefore, outAfter) with outBefore.key <= key <= outAfter.key.
+// The input "before" tells us where to start looking.
+// If we found a node with the same key, then we return outBefore = outAfter.
+// Otherwise, outBefore.key < key < outAfter.key.
+func (s *Skiplist) findSpliceForLevel(key []byte, before *node, level int) (*node, *node) {
+ for {
+ // Assume before.key < key.
+ next := s.getNext(before, level)
+ if next == nil {
+ return before, next
+ }
+ nextKey := next.key(s.arena)
+ cmp := y.CompareKeys(key, nextKey)
+ if cmp == 0 {
+ // Equality case.
+ return next, next
+ }
+ if cmp < 0 {
+ // before.key < key < next.key. We are done for this level.
+ return before, next
+ }
+ before = next // Keep moving right on this level.
+ }
+}
+
+func (s *Skiplist) getHeight() int32 {
+ return atomic.LoadInt32(&s.height)
+}
+
+// Put inserts the key-value pair.
+func (s *Skiplist) Put(key []byte, v y.ValueStruct) {
+ // Since we allow overwrite, we may not need to create a new node. We might not even need to
+ // increase the height. Let's defer these actions.
+
+ listHeight := s.getHeight()
+ var prev [maxHeight + 1]*node
+ var next [maxHeight + 1]*node
+ prev[listHeight] = s.head
+ next[listHeight] = nil
+ for i := int(listHeight) - 1; i >= 0; i-- {
+ // Use higher level to speed up for current level.
+ prev[i], next[i] = s.findSpliceForLevel(key, prev[i+1], i)
+ if prev[i] == next[i] {
+ prev[i].setValue(s.arena, v)
+ return
+ }
+ }
+
+ // We do need to create a new node.
+ height := randomHeight()
+ x := newNode(s.arena, key, v, height)
+
+ // Try to increase s.height via CAS.
+ listHeight = s.getHeight()
+ for height > int(listHeight) {
+ if atomic.CompareAndSwapInt32(&s.height, listHeight, int32(height)) {
+ // Successfully increased skiplist.height.
+ break
+ }
+ listHeight = s.getHeight()
+ }
+
+ // We always insert from the base level and up. After you add a node in base level, we cannot
+ // create a node in the level above because it would have discovered the node in the base level.
+ for i := 0; i < height; i++ {
+ for {
+ if prev[i] == nil {
+ y.AssertTrue(i > 1) // This cannot happen in base level.
+ // We haven't computed prev, next for this level because height exceeds old listHeight.
+ // For these levels, we expect the lists to be sparse, so we can just search from head.
+ prev[i], next[i] = s.findSpliceForLevel(key, s.head, i)
+ // Someone adds the exact same key before we are able to do so. This can only happen on
+ // the base level. But we know we are not on the base level.
+ y.AssertTrue(prev[i] != next[i])
+ }
+ nextOffset := s.arena.getNodeOffset(next[i])
+ x.tower[i] = nextOffset
+ if prev[i].casNextOffset(i, nextOffset, s.arena.getNodeOffset(x)) {
+ // Managed to insert x between prev[i] and next[i]. Go to the next level.
+ break
+ }
+ // CAS failed. We need to recompute prev and next.
+ // It is unlikely to be helpful to try to use a different level as we redo the search,
+ // because it is unlikely that lots of nodes are inserted between prev[i] and next[i].
+ prev[i], next[i] = s.findSpliceForLevel(key, prev[i], i)
+ if prev[i] == next[i] {
+ y.AssertTruef(i == 0, "Equality can happen only on base level: %d", i)
+ prev[i].setValue(s.arena, v)
+ return
+ }
+ }
+ }
+}
+
+// Empty returns if the Skiplist is empty.
+func (s *Skiplist) Empty() bool {
+ return s.findLast() == nil
+}
+
+// findLast returns the last element. If head (empty list), we return nil. All the find functions
+// will NEVER return the head nodes.
+func (s *Skiplist) findLast() *node {
+ n := s.head
+ level := int(s.getHeight()) - 1
+ for {
+ next := s.getNext(n, level)
+ if next != nil {
+ n = next
+ continue
+ }
+ if level == 0 {
+ if n == s.head {
+ return nil
+ }
+ return n
+ }
+ level--
+ }
+}
+
+// Get gets the value associated with the key. It returns a valid value if it finds equal or earlier
+// version of the same key.
+func (s *Skiplist) Get(key []byte) y.ValueStruct {
+ n, _ := s.findNear(key, false, true) // findGreaterOrEqual.
+ if n == nil {
+ return y.ValueStruct{}
+ }
+
+ nextKey := s.arena.getKey(n.keyOffset, n.keySize)
+ if !y.SameKey(key, nextKey) {
+ return y.ValueStruct{}
+ }
+
+ valOffset, valSize := n.getValueOffset()
+ vs := s.arena.getVal(valOffset, valSize)
+ vs.Version = y.ParseTs(nextKey)
+ return vs
+}
+
+// NewIterator returns a skiplist iterator. You have to Close() the iterator.
+func (s *Skiplist) NewIterator() *Iterator {
+ s.IncrRef()
+ return &Iterator{list: s}
+}
+
+// MemSize returns the size of the Skiplist in terms of how much memory is used within its internal
+// arena.
+func (s *Skiplist) MemSize() int64 { return s.arena.size() }
+
+// Iterator is an iterator over skiplist object. For new objects, you just
+// need to initialize Iterator.list.
+type Iterator struct {
+ list *Skiplist
+ n *node
+}
+
+// Close frees the resources held by the iterator
+func (s *Iterator) Close() error {
+ s.list.DecrRef()
+ return nil
+}
+
+// Valid returns true iff the iterator is positioned at a valid node.
+func (s *Iterator) Valid() bool { return s.n != nil }
+
+// Key returns the key at the current position.
+func (s *Iterator) Key() []byte {
+ return s.list.arena.getKey(s.n.keyOffset, s.n.keySize)
+}
+
+// Value returns value.
+func (s *Iterator) Value() y.ValueStruct {
+ valOffset, valSize := s.n.getValueOffset()
+ return s.list.arena.getVal(valOffset, valSize)
+}
+
+// Next advances to the next position.
+func (s *Iterator) Next() {
+ y.AssertTrue(s.Valid())
+ s.n = s.list.getNext(s.n, 0)
+}
+
+// Prev advances to the previous position.
+func (s *Iterator) Prev() {
+ y.AssertTrue(s.Valid())
+ s.n, _ = s.list.findNear(s.Key(), true, false) // find <. No equality allowed.
+}
+
+// Seek advances to the first entry with a key >= target.
+func (s *Iterator) Seek(target []byte) {
+ s.n, _ = s.list.findNear(target, false, true) // find >=.
+}
+
+// SeekForPrev finds an entry with key <= target.
+func (s *Iterator) SeekForPrev(target []byte) {
+ s.n, _ = s.list.findNear(target, true, true) // find <=.
+}
+
+// SeekToFirst seeks position at the first entry in list.
+// Final state of iterator is Valid() iff list is not empty.
+func (s *Iterator) SeekToFirst() {
+ s.n = s.list.getNext(s.list.head, 0)
+}
+
+// SeekToLast seeks position at the last entry in list.
+// Final state of iterator is Valid() iff list is not empty.
+func (s *Iterator) SeekToLast() {
+ s.n = s.list.findLast()
+}
+
+// UniIterator is a unidirectional memtable iterator. It is a thin wrapper around
+// Iterator. We like to keep Iterator as before, because it is more powerful and
+// we might support bidirectional iterators in the future.
+type UniIterator struct {
+ iter *Iterator
+ reversed bool
+}
+
+// NewUniIterator returns a UniIterator.
+func (s *Skiplist) NewUniIterator(reversed bool) *UniIterator {
+ return &UniIterator{
+ iter: s.NewIterator(),
+ reversed: reversed,
+ }
+}
+
+// Next implements y.Interface
+func (s *UniIterator) Next() {
+ if !s.reversed {
+ s.iter.Next()
+ } else {
+ s.iter.Prev()
+ }
+}
+
+// Rewind implements y.Interface
+func (s *UniIterator) Rewind() {
+ if !s.reversed {
+ s.iter.SeekToFirst()
+ } else {
+ s.iter.SeekToLast()
+ }
+}
+
+// Seek implements y.Interface
+func (s *UniIterator) Seek(key []byte) {
+ if !s.reversed {
+ s.iter.Seek(key)
+ } else {
+ s.iter.SeekForPrev(key)
+ }
+}
+
+// Key implements y.Interface
+func (s *UniIterator) Key() []byte { return s.iter.Key() }
+
+// Value implements y.Interface
+func (s *UniIterator) Value() y.ValueStruct { return s.iter.Value() }
+
+// Valid implements y.Interface
+func (s *UniIterator) Valid() bool { return s.iter.Valid() }
+
+// Close implements y.Interface (and frees up the iter's resources)
+func (s *UniIterator) Close() error { return s.iter.Close() }
diff --git a/vendor/github.com/dgraph-io/badger/stream.go b/vendor/github.com/dgraph-io/badger/stream.go
new file mode 100644
index 000000000..f0841a6a4
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/stream.go
@@ -0,0 +1,385 @@
+/*
+ * Copyright 2018 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "bytes"
+ "context"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/dgraph-io/badger/pb"
+ "github.com/dgraph-io/badger/y"
+ humanize "github.com/dustin/go-humanize"
+)
+
+const pageSize = 4 << 20 // 4MB
+
+// Stream provides a framework to concurrently iterate over a snapshot of Badger, pick up
+// key-values, batch them up and call Send. Stream does concurrent iteration over many smaller key
+// ranges. It does NOT send keys in lexicographical sorted order. To get keys in sorted
+// order, use Iterator.
+type Stream struct {
+ // Prefix to only iterate over certain range of keys. If set to nil (default), Stream would
+ // iterate over the entire DB.
+ Prefix []byte
+
+ // Number of goroutines to use for iterating over key ranges. Defaults to 16.
+ NumGo int
+
+ // Badger would produce log entries in Infof to indicate the progress of Stream. LogPrefix can
+ // be used to help differentiate them from other activities. Default is "Badger.Stream".
+ LogPrefix string
+
+ // ChooseKey is invoked each time a new key is encountered. Note that this is not called
+ // on every version of the value, only the first encountered version (i.e. the highest version
+ // of the value a key has). ChooseKey can be left nil to select all keys.
+ //
+ // Note: Calls to ChooseKey are concurrent.
+ ChooseKey func(item *Item) bool
+
+ // KeyToList, similar to ChooseKey, is only invoked on the highest version of the value. It
+ // is upto the caller to iterate over the versions and generate zero, one or more KVs. It
+ // is expected that the user would advance the iterator to go through the versions of the
+ // values. However, the user MUST immediately return from this function on the first encounter
+ // with a mismatching key. See example usage in ToList function. Can be left nil to use ToList
+ // function by default.
+ //
+ // Note: Calls to KeyToList are concurrent.
+ KeyToList func(key []byte, itr *Iterator) (*pb.KVList, error)
+
+ // This is the method where Stream sends the final output. All calls to Send are done by a
+ // single goroutine, i.e. logic within Send method can expect single threaded execution.
+ Send func(*pb.KVList) error
+
+ readTs uint64
+ db *DB
+ rangeCh chan keyRange
+ kvChan chan *pb.KVList
+ nextStreamId uint32
+}
+
+// ToList is a default implementation of KeyToList. It picks up all valid versions of the key,
+// skipping over deleted or expired keys.
+func (st *Stream) ToList(key []byte, itr *Iterator) (*pb.KVList, error) {
+ list := &pb.KVList{}
+ for ; itr.Valid(); itr.Next() {
+ item := itr.Item()
+ if item.IsDeletedOrExpired() {
+ break
+ }
+ if !bytes.Equal(key, item.Key()) {
+ // Break out on the first encounter with another key.
+ break
+ }
+
+ valCopy, err := item.ValueCopy(nil)
+ if err != nil {
+ return nil, err
+ }
+ kv := &pb.KV{
+ Key: item.KeyCopy(nil),
+ Value: valCopy,
+ UserMeta: []byte{item.UserMeta()},
+ Version: item.Version(),
+ ExpiresAt: item.ExpiresAt(),
+ }
+ list.Kv = append(list.Kv, kv)
+ if st.db.opt.NumVersionsToKeep == 1 {
+ break
+ }
+
+ if item.DiscardEarlierVersions() {
+ break
+ }
+ }
+ return list, nil
+}
+
+// keyRange is [start, end), including start, excluding end. Do ensure that the start,
+// end byte slices are owned by keyRange struct.
+func (st *Stream) produceRanges(ctx context.Context) {
+ splits := st.db.KeySplits(st.Prefix)
+
+ // We don't need to create more key ranges than NumGo goroutines. This way, we will have limited
+ // number of "streams" coming out, which then helps limit the memory used by SSWriter.
+ {
+ pickEvery := int(math.Floor(float64(len(splits)) / float64(st.NumGo)))
+ if pickEvery < 1 {
+ pickEvery = 1
+ }
+ filtered := splits[:0]
+ for i, split := range splits {
+ if (i+1)%pickEvery == 0 {
+ filtered = append(filtered, split)
+ }
+ }
+ splits = filtered
+ }
+
+ start := y.SafeCopy(nil, st.Prefix)
+ for _, key := range splits {
+ st.rangeCh <- keyRange{left: start, right: y.SafeCopy(nil, []byte(key))}
+ start = y.SafeCopy(nil, []byte(key))
+ }
+ // Edge case: prefix is empty and no splits exist. In that case, we should have at least one
+ // keyRange output.
+ st.rangeCh <- keyRange{left: start}
+ close(st.rangeCh)
+}
+
+// produceKVs picks up ranges from rangeCh, generates KV lists and sends them to kvChan.
+func (st *Stream) produceKVs(ctx context.Context) error {
+ var size int
+ var txn *Txn
+ if st.readTs > 0 {
+ txn = st.db.NewTransactionAt(st.readTs, false)
+ } else {
+ txn = st.db.NewTransaction(false)
+ }
+ defer txn.Discard()
+
+ iterate := func(kr keyRange) error {
+ iterOpts := DefaultIteratorOptions
+ iterOpts.AllVersions = true
+ iterOpts.Prefix = st.Prefix
+ iterOpts.PrefetchValues = false
+ itr := txn.NewIterator(iterOpts)
+ defer itr.Close()
+
+ // This unique stream id is used to identify all the keys from this iteration.
+ streamId := atomic.AddUint32(&st.nextStreamId, 1)
+
+ outList := new(pb.KVList)
+ var prevKey []byte
+ for itr.Seek(kr.left); itr.Valid(); {
+ // it.Valid would only return true for keys with the provided Prefix in iterOpts.
+ item := itr.Item()
+ if bytes.Equal(item.Key(), prevKey) {
+ itr.Next()
+ continue
+ }
+ prevKey = append(prevKey[:0], item.Key()...)
+
+ // Check if we reached the end of the key range.
+ if len(kr.right) > 0 && bytes.Compare(item.Key(), kr.right) >= 0 {
+ break
+ }
+ // Check if we should pick this key.
+ if st.ChooseKey != nil && !st.ChooseKey(item) {
+ continue
+ }
+
+ // Now convert to key value.
+ list, err := st.KeyToList(item.KeyCopy(nil), itr)
+ if err != nil {
+ return err
+ }
+ if list == nil || len(list.Kv) == 0 {
+ continue
+ }
+ outList.Kv = append(outList.Kv, list.Kv...)
+ size += list.Size()
+ if size >= pageSize {
+ for _, kv := range outList.Kv {
+ kv.StreamId = streamId
+ }
+ select {
+ case st.kvChan <- outList:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ outList = new(pb.KVList)
+ size = 0
+ }
+ }
+ if len(outList.Kv) > 0 {
+ for _, kv := range outList.Kv {
+ kv.StreamId = streamId
+ }
+ // TODO: Think of a way to indicate that a stream is over.
+ select {
+ case st.kvChan <- outList:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ return nil
+ }
+
+ for {
+ select {
+ case kr, ok := <-st.rangeCh:
+ if !ok {
+ // Done with the keys.
+ return nil
+ }
+ if err := iterate(kr); err != nil {
+ return err
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+}
+
+func (st *Stream) streamKVs(ctx context.Context) error {
+ var count int
+ var bytesSent uint64
+ t := time.NewTicker(time.Second)
+ defer t.Stop()
+ now := time.Now()
+
+ slurp := func(batch *pb.KVList) error {
+ loop:
+ for {
+ select {
+ case kvs, ok := <-st.kvChan:
+ if !ok {
+ break loop
+ }
+ y.AssertTrue(kvs != nil)
+ batch.Kv = append(batch.Kv, kvs.Kv...)
+ default:
+ break loop
+ }
+ }
+ sz := uint64(batch.Size())
+ bytesSent += sz
+ count += len(batch.Kv)
+ t := time.Now()
+ if err := st.Send(batch); err != nil {
+ return err
+ }
+ st.db.opt.Infof("%s Created batch of size: %s in %s.\n",
+ st.LogPrefix, humanize.Bytes(sz), time.Since(t))
+ return nil
+ }
+
+outer:
+ for {
+ var batch *pb.KVList
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+
+ case <-t.C:
+ dur := time.Since(now)
+ durSec := uint64(dur.Seconds())
+ if durSec == 0 {
+ continue
+ }
+ speed := bytesSent / durSec
+ st.db.opt.Infof("%s Time elapsed: %s, bytes sent: %s, speed: %s/sec\n", st.LogPrefix,
+ y.FixedDuration(dur), humanize.Bytes(bytesSent), humanize.Bytes(speed))
+
+ case kvs, ok := <-st.kvChan:
+ if !ok {
+ break outer
+ }
+ y.AssertTrue(kvs != nil)
+ batch = kvs
+ if err := slurp(batch); err != nil {
+ return err
+ }
+ }
+ }
+
+ st.db.opt.Infof("%s Sent %d keys\n", st.LogPrefix, count)
+ return nil
+}
+
+// Orchestrate runs Stream. It picks up ranges from the SSTables, then runs NumGo number of
+// goroutines to iterate over these ranges and batch up KVs in lists. It concurrently runs a single
+// goroutine to pick these lists, batch them up further and send to Output.Send. Orchestrate also
+// spits logs out to Infof, using provided LogPrefix. Note that all calls to Output.Send
+// are serial. In case any of these steps encounter an error, Orchestrate would stop execution and
+// return that error. Orchestrate can be called multiple times, but in serial order.
+func (st *Stream) Orchestrate(ctx context.Context) error {
+ st.rangeCh = make(chan keyRange, 3) // Contains keys for posting lists.
+
+ // kvChan should only have a small capacity to ensure that we don't buffer up too much data if
+ // sending is slow. Page size is set to 4MB, which is used to lazily cap the size of each
+ // KVList. To get 128MB buffer, we can set the channel size to 32.
+ st.kvChan = make(chan *pb.KVList, 32)
+
+ if st.KeyToList == nil {
+ st.KeyToList = st.ToList
+ }
+
+ // Picks up ranges from Badger, and sends them to rangeCh.
+ go st.produceRanges(ctx)
+
+ errCh := make(chan error, 1) // Stores error by consumeKeys.
+ var wg sync.WaitGroup
+ for i := 0; i < st.NumGo; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ // Picks up ranges from rangeCh, generates KV lists, and sends them to kvChan.
+ if err := st.produceKVs(ctx); err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }()
+ }
+
+ // Pick up key-values from kvChan and send to stream.
+ kvErr := make(chan error, 1)
+ go func() {
+ // Picks up KV lists from kvChan, and sends them to Output.
+ kvErr <- st.streamKVs(ctx)
+ }()
+ wg.Wait() // Wait for produceKVs to be over.
+ close(st.kvChan) // Now we can close kvChan.
+
+ select {
+ case err := <-errCh: // Check error from produceKVs.
+ return err
+ default:
+ }
+
+ // Wait for key streaming to be over.
+ err := <-kvErr
+ return err
+}
+
+func (db *DB) newStream() *Stream {
+ return &Stream{db: db, NumGo: 16, LogPrefix: "Badger.Stream"}
+}
+
+// NewStream creates a new Stream.
+func (db *DB) NewStream() *Stream {
+ if db.opt.managedTxns {
+ panic("This API can not be called in managed mode.")
+ }
+ return db.newStream()
+}
+
+// NewStreamAt creates a new Stream at a particular timestamp. Should only be used with managed DB.
+func (db *DB) NewStreamAt(readTs uint64) *Stream {
+ if !db.opt.managedTxns {
+ panic("This API can only be called in managed mode.")
+ }
+ stream := db.newStream()
+ stream.readTs = readTs
+ return stream
+}
diff --git a/vendor/github.com/dgraph-io/badger/stream_writer.go b/vendor/github.com/dgraph-io/badger/stream_writer.go
new file mode 100644
index 000000000..3d2a7992e
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/stream_writer.go
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "math"
+
+ "github.com/dgraph-io/badger/pb"
+ "github.com/dgraph-io/badger/table"
+ "github.com/dgraph-io/badger/y"
+ humanize "github.com/dustin/go-humanize"
+ "github.com/pkg/errors"
+)
+
+const headStreamId uint32 = math.MaxUint32
+
+// StreamWriter is used to write data coming from multiple streams. The streams must not have any
+// overlapping key ranges. Within each stream, the keys must be sorted. Badger Stream framework is
+// capable of generating such an output. So, this StreamWriter can be used at the other end to build
+// BadgerDB at a much faster pace by writing SSTables (and value logs) directly to LSM tree levels
+// without causing any compactions at all. This is way faster than using batched writer or using
+// transactions, but only applicable in situations where the keys are pre-sorted and the DB is being
+// bootstrapped. Existing data would get deleted when using this writer. So, this is only useful
+// when restoring from backup or replicating DB across servers.
+//
+// StreamWriter should not be called on in-use DB instances. It is designed only to bootstrap new
+// DBs.
+type StreamWriter struct {
+ db *DB
+ done func()
+ throttle *y.Throttle
+ maxVersion uint64
+ writers map[uint32]*sortedWriter
+ closer *y.Closer
+}
+
+// NewStreamWriter creates a StreamWriter. Right after creating StreamWriter, Prepare must be
+// called. The memory usage of a StreamWriter is directly proportional to the number of streams
+// possible. So, efforts must be made to keep the number of streams low. Stream framework would
+// typically use 16 goroutines and hence create 16 streams.
+func (db *DB) NewStreamWriter() *StreamWriter {
+ return &StreamWriter{
+ db: db,
+ // throttle shouldn't make much difference. Memory consumption is based on the number of
+ // concurrent streams being processed.
+ throttle: y.NewThrottle(16),
+ writers: make(map[uint32]*sortedWriter),
+ closer: y.NewCloser(0),
+ }
+}
+
+// Prepare should be called before writing any entry to StreamWriter. It deletes all data present in
+// existing DB, stops compactions and any writes being done by other means. Be very careful when
+// calling Prepare, because it could result in permanent data loss. Not calling Prepare would result
+// in a corrupt Badger instance.
+func (sw *StreamWriter) Prepare() error {
+ var err error
+ sw.done, err = sw.db.dropAll()
+ return err
+}
+
+// Write writes KVList to DB. Each KV within the list contains the stream id which StreamWriter
+// would use to demux the writes. Write is not thread safe and it should NOT be called concurrently.
+func (sw *StreamWriter) Write(kvs *pb.KVList) error {
+ if len(kvs.GetKv()) == 0 {
+ return nil
+ }
+ streamReqs := make(map[uint32]*request)
+ for _, kv := range kvs.Kv {
+ var meta, userMeta byte
+ if len(kv.Meta) > 0 {
+ meta = kv.Meta[0]
+ }
+ if len(kv.UserMeta) > 0 {
+ userMeta = kv.UserMeta[0]
+ }
+ if sw.maxVersion < kv.Version {
+ sw.maxVersion = kv.Version
+ }
+ e := &Entry{
+ Key: y.KeyWithTs(kv.Key, kv.Version),
+ Value: kv.Value,
+ UserMeta: userMeta,
+ ExpiresAt: kv.ExpiresAt,
+ meta: meta,
+ }
+ // If the value can be colocated with the key in LSM tree, we can skip
+ // writing the value to value log.
+ e.skipVlog = sw.db.shouldWriteValueToLSM(*e)
+ req := streamReqs[kv.StreamId]
+ if req == nil {
+ req = &request{}
+ streamReqs[kv.StreamId] = req
+ }
+ req.Entries = append(req.Entries, e)
+ }
+ var all []*request
+ for _, req := range streamReqs {
+ all = append(all, req)
+ }
+ if err := sw.db.vlog.write(all); err != nil {
+ return err
+ }
+
+ for streamId, req := range streamReqs {
+ writer, ok := sw.writers[streamId]
+ if !ok {
+ writer = sw.newWriter(streamId)
+ sw.writers[streamId] = writer
+ }
+ writer.reqCh <- req
+ }
+ return nil
+}
+
+// Flush is called once we are done writing all the entries. It syncs DB directories. It also
+// updates Oracle with maxVersion found in all entries (if DB is not managed).
+func (sw *StreamWriter) Flush() error {
+ defer sw.done()
+
+ sw.closer.SignalAndWait()
+ var maxHead valuePointer
+ for _, writer := range sw.writers {
+ if err := writer.Done(); err != nil {
+ return err
+ }
+ if maxHead.Less(writer.head) {
+ maxHead = writer.head
+ }
+ }
+
+ // Encode and write the value log head into a new table.
+ data := make([]byte, vptrSize)
+ maxHead.Encode(data)
+ headWriter := sw.newWriter(headStreamId)
+ if err := headWriter.Add(
+ y.KeyWithTs(head, sw.maxVersion),
+ y.ValueStruct{Value: data}); err != nil {
+ return err
+ }
+ if err := headWriter.Done(); err != nil {
+ return err
+ }
+
+ if !sw.db.opt.managedTxns {
+ if sw.db.orc != nil {
+ sw.db.orc.Stop()
+ }
+ sw.db.orc = newOracle(sw.db.opt)
+ sw.db.orc.nextTxnTs = sw.maxVersion
+ sw.db.orc.txnMark.Done(sw.maxVersion)
+ sw.db.orc.readMark.Done(sw.maxVersion)
+ sw.db.orc.incrementNextTs()
+ }
+
+ // Wait for all files to be written.
+ if err := sw.throttle.Finish(); err != nil {
+ return err
+ }
+
+ // Now sync the directories, so all the files are registered.
+ if sw.db.opt.ValueDir != sw.db.opt.Dir {
+ if err := syncDir(sw.db.opt.ValueDir); err != nil {
+ return err
+ }
+ }
+ if err := syncDir(sw.db.opt.Dir); err != nil {
+ return err
+ }
+ return sw.db.lc.validate()
+}
+
+type sortedWriter struct {
+ db *DB
+ throttle *y.Throttle
+
+ builder *table.Builder
+ lastKey []byte
+ streamId uint32
+ reqCh chan *request
+ head valuePointer
+}
+
+func (sw *StreamWriter) newWriter(streamId uint32) *sortedWriter {
+ w := &sortedWriter{
+ db: sw.db,
+ streamId: streamId,
+ throttle: sw.throttle,
+ builder: table.NewTableBuilder(),
+ reqCh: make(chan *request, 3),
+ }
+ sw.closer.AddRunning(1)
+ go w.handleRequests(sw.closer)
+ return w
+}
+
+// ErrUnsortedKey is returned when any out of order key arrives at sortedWriter during call to Add.
+var ErrUnsortedKey = errors.New("Keys not in sorted order")
+
+func (w *sortedWriter) handleRequests(closer *y.Closer) {
+ defer closer.Done()
+
+ process := func(req *request) {
+ for i, e := range req.Entries {
+ vptr := req.Ptrs[i]
+ if !vptr.IsZero() {
+ y.AssertTrue(w.head.Less(vptr))
+ w.head = vptr
+ }
+
+ var vs y.ValueStruct
+ if e.skipVlog {
+ vs = y.ValueStruct{
+ Value: e.Value,
+ Meta: e.meta,
+ UserMeta: e.UserMeta,
+ ExpiresAt: e.ExpiresAt,
+ }
+ } else {
+ vbuf := make([]byte, vptrSize)
+ vs = y.ValueStruct{
+ Value: vptr.Encode(vbuf),
+ Meta: e.meta | bitValuePointer,
+ UserMeta: e.UserMeta,
+ ExpiresAt: e.ExpiresAt,
+ }
+ }
+ if err := w.Add(e.Key, vs); err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ for {
+ select {
+ case req := <-w.reqCh:
+ process(req)
+ case <-closer.HasBeenClosed():
+ close(w.reqCh)
+ for req := range w.reqCh {
+ process(req)
+ }
+ return
+ }
+ }
+}
+
+// Add adds key and vs to sortedWriter.
+func (w *sortedWriter) Add(key []byte, vs y.ValueStruct) error {
+ if len(w.lastKey) > 0 && y.CompareKeys(key, w.lastKey) <= 0 {
+ return ErrUnsortedKey
+ }
+
+ sameKey := y.SameKey(key, w.lastKey)
+ // Same keys should go into the same SSTable.
+ if !sameKey && w.builder.ReachedCapacity(w.db.opt.MaxTableSize) {
+ if err := w.send(); err != nil {
+ return err
+ }
+ }
+
+ w.lastKey = y.SafeCopy(w.lastKey, key)
+ return w.builder.Add(key, vs)
+}
+
+func (w *sortedWriter) send() error {
+ if err := w.throttle.Do(); err != nil {
+ return err
+ }
+ go func(builder *table.Builder) {
+ data := builder.Finish()
+ err := w.createTable(data)
+ w.throttle.Done(err)
+ }(w.builder)
+ w.builder = table.NewTableBuilder()
+ return nil
+}
+
+// Done is called once we are done writing all keys and valueStructs
+// to sortedWriter. It completes writing current SST to disk.
+func (w *sortedWriter) Done() error {
+ if w.builder.Empty() {
+ return nil
+ }
+ return w.send()
+}
+
+func (w *sortedWriter) createTable(data []byte) error {
+ if len(data) == 0 {
+ return nil
+ }
+ fileID := w.db.lc.reserveFileID()
+ fd, err := y.CreateSyncedFile(table.NewFilename(fileID, w.db.opt.Dir), true)
+ if err != nil {
+ return err
+ }
+ if _, err := fd.Write(data); err != nil {
+ return err
+ }
+ tbl, err := table.OpenTable(fd, w.db.opt.TableLoadingMode, nil)
+ if err != nil {
+ return err
+ }
+ lc := w.db.lc
+
+ var lhandler *levelHandler
+ // We should start the levels from 1, because we need level 0 to set the !badger!head key. We
+ // cannot mix up this key with other keys from the DB, otherwise we would introduce a range
+ // overlap violation.
+ y.AssertTrue(len(lc.levels) > 1)
+ for _, l := range lc.levels[1:] {
+ ratio := float64(l.getTotalSize()) / float64(l.maxTotalSize)
+ if ratio < 1.0 {
+ lhandler = l
+ break
+ }
+ }
+ if lhandler == nil {
+ // If we're exceeding the size of the lowest level, shove it in the lowest level. Can't do
+ // better than that.
+ lhandler = lc.levels[len(lc.levels)-1]
+ }
+ if w.streamId == headStreamId {
+ // This is a special !badger!head key. We should store it at level 0, separate from all the
+ // other keys to avoid an overlap.
+ lhandler = lc.levels[0]
+ }
+ // Now that table can be opened successfully, let's add this to the MANIFEST.
+ change := &pb.ManifestChange{
+ Id: tbl.ID(),
+ Op: pb.ManifestChange_CREATE,
+ Level: uint32(lhandler.level),
+ Checksum: tbl.Checksum,
+ }
+ if err := w.db.manifest.addChanges([]*pb.ManifestChange{change}); err != nil {
+ return err
+ }
+ if err := lhandler.replaceTables([]*table.Table{}, []*table.Table{tbl}); err != nil {
+ return err
+ }
+ w.db.opt.Infof("Table created: %d at level: %d for stream: %d. Size: %s\n",
+ fileID, lhandler.level, w.streamId, humanize.Bytes(uint64(tbl.Size())))
+ return nil
+}
diff --git a/vendor/github.com/dgraph-io/badger/structs.go b/vendor/github.com/dgraph-io/badger/structs.go
new file mode 100644
index 000000000..51d16cdb2
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/structs.go
@@ -0,0 +1,186 @@
+package badger
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+ "time"
+
+ "github.com/dgraph-io/badger/y"
+)
+
+type valuePointer struct {
+ Fid uint32
+ Len uint32
+ Offset uint32
+}
+
+func (p valuePointer) Less(o valuePointer) bool {
+ if p.Fid != o.Fid {
+ return p.Fid < o.Fid
+ }
+ if p.Offset != o.Offset {
+ return p.Offset < o.Offset
+ }
+ return p.Len < o.Len
+}
+
+func (p valuePointer) IsZero() bool {
+ return p.Fid == 0 && p.Offset == 0 && p.Len == 0
+}
+
+const vptrSize = 12
+
+// Encode encodes Pointer into byte buffer.
+func (p valuePointer) Encode(b []byte) []byte {
+ binary.BigEndian.PutUint32(b[:4], p.Fid)
+ binary.BigEndian.PutUint32(b[4:8], p.Len)
+ binary.BigEndian.PutUint32(b[8:12], p.Offset)
+ return b[:vptrSize]
+}
+
+func (p *valuePointer) Decode(b []byte) {
+ p.Fid = binary.BigEndian.Uint32(b[:4])
+ p.Len = binary.BigEndian.Uint32(b[4:8])
+ p.Offset = binary.BigEndian.Uint32(b[8:12])
+}
+
+// header is used in value log as a header before Entry.
+type header struct {
+ klen uint32
+ vlen uint32
+ expiresAt uint64
+ meta byte
+ userMeta byte
+}
+
+const (
+ headerBufSize = 18
+)
+
+func (h header) Encode(out []byte) {
+ y.AssertTrue(len(out) >= headerBufSize)
+ binary.BigEndian.PutUint32(out[0:4], h.klen)
+ binary.BigEndian.PutUint32(out[4:8], h.vlen)
+ binary.BigEndian.PutUint64(out[8:16], h.expiresAt)
+ out[16] = h.meta
+ out[17] = h.userMeta
+}
+
+// Decodes h from buf.
+func (h *header) Decode(buf []byte) {
+ h.klen = binary.BigEndian.Uint32(buf[0:4])
+ h.vlen = binary.BigEndian.Uint32(buf[4:8])
+ h.expiresAt = binary.BigEndian.Uint64(buf[8:16])
+ h.meta = buf[16]
+ h.userMeta = buf[17]
+}
+
+// Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by
+// the user to set data.
+type Entry struct {
+ Key []byte
+ Value []byte
+ UserMeta byte
+ ExpiresAt uint64 // time.Unix
+ meta byte
+
+ // Fields maintained internally.
+ offset uint32
+ skipVlog bool
+}
+
+func (e *Entry) estimateSize(threshold int) int {
+ if len(e.Value) < threshold {
+ return len(e.Key) + len(e.Value) + 2 // Meta, UserMeta
+ }
+ return len(e.Key) + 12 + 2 // 12 for ValuePointer, 2 for metas.
+}
+
+// Encodes e to buf. Returns number of bytes written.
+func encodeEntry(e *Entry, buf *bytes.Buffer) (int, error) {
+ h := header{
+ klen: uint32(len(e.Key)),
+ vlen: uint32(len(e.Value)),
+ expiresAt: e.ExpiresAt,
+ meta: e.meta,
+ userMeta: e.UserMeta,
+ }
+
+ var headerEnc [headerBufSize]byte
+ h.Encode(headerEnc[:])
+
+ hash := crc32.New(y.CastagnoliCrcTable)
+
+ buf.Write(headerEnc[:])
+ if _, err := hash.Write(headerEnc[:]); err != nil {
+ return 0, err
+ }
+
+ buf.Write(e.Key)
+ if _, err := hash.Write(e.Key); err != nil {
+ return 0, err
+ }
+
+ buf.Write(e.Value)
+ if _, err := hash.Write(e.Value); err != nil {
+ return 0, err
+ }
+
+ var crcBuf [crc32.Size]byte
+ binary.BigEndian.PutUint32(crcBuf[:], hash.Sum32())
+ buf.Write(crcBuf[:])
+
+ return len(headerEnc) + len(e.Key) + len(e.Value) + len(crcBuf), nil
+}
+
+func (e Entry) print(prefix string) {
+ fmt.Printf("%s Key: %s Meta: %d UserMeta: %d Offset: %d len(val)=%d",
+ prefix, e.Key, e.meta, e.UserMeta, e.offset, len(e.Value))
+}
+
+// NewEntry creates a new entry with key and value passed in args. This newly created entry can be
+// set in a transaction by calling txn.SetEntry(). All other properties of Entry can be set by
+// calling WithMeta, WithDiscard, WithTTL methods on it.
+// This function uses key and value reference, hence users must
+// not modify key and value until the end of transaction.
+func NewEntry(key, value []byte) *Entry {
+ return &Entry{
+ Key: key,
+ Value: value,
+ }
+}
+
+// WithMeta adds meta data to Entry e. This byte is stored alongside the key
+// and can be used as an aid to interpret the value or store other contextual
+// bits corresponding to the key-value pair of entry.
+func (e *Entry) WithMeta(meta byte) *Entry {
+ e.UserMeta = meta
+ return e
+}
+
+// WithDiscard adds a marker to Entry e. This means all the previous versions of the key (of the
+// Entry) will be eligible for garbage collection.
+// This method is only useful if you have set a higher limit for options.NumVersionsToKeep. The
+// default setting is 1, in which case, this function doesn't add any more benefit. If however, you
+// have a higher setting for NumVersionsToKeep (in Dgraph, we set it to infinity), you can use this
+// method to indicate that all the older versions can be discarded and removed during compactions.
+func (e *Entry) WithDiscard() *Entry {
+ e.meta = bitDiscardEarlierVersions
+ return e
+}
+
+// WithTTL adds time to live duration to Entry e. Entry stored with a TTL would automatically expire
+// after the time has elapsed, and will be eligible for garbage collection.
+func (e *Entry) WithTTL(dur time.Duration) *Entry {
+ e.ExpiresAt = uint64(time.Now().Add(dur).Unix())
+ return e
+}
+
+// withMergeBit sets merge bit in entry's metadata. This
+// function is called by MergeOperator's Add method.
+func (e *Entry) withMergeBit() *Entry {
+ e.meta = bitMergeEntry
+ return e
+}
diff --git a/vendor/github.com/dgraph-io/badger/table/README.md b/vendor/github.com/dgraph-io/badger/table/README.md
new file mode 100644
index 000000000..a784f1268
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/table/README.md
@@ -0,0 +1,69 @@
+Size of table is 122,173,606 bytes for all benchmarks.
+
+# BenchmarkRead
+```
+$ go test -bench ^BenchmarkRead$ -run ^$ -count 3
+goos: linux
+goarch: amd64
+pkg: github.com/dgraph-io/badger/table
+BenchmarkRead-16 10 153281932 ns/op
+BenchmarkRead-16 10 153454443 ns/op
+BenchmarkRead-16 10 155349696 ns/op
+PASS
+ok github.com/dgraph-io/badger/table 23.549s
+```
+
+Size of table is 122,173,606 bytes, which is ~117MB.
+
+The rate is ~750MB/s using LoadToRAM (when table is in RAM).
+
+To read a 64MB table, this would take ~0.0853s, which is negligible.
+
+# BenchmarkReadAndBuild
+```go
+$ go test -bench BenchmarkReadAndBuild -run ^$ -count 3
+goos: linux
+goarch: amd64
+pkg: github.com/dgraph-io/badger/table
+BenchmarkReadAndBuild-16 2 945041628 ns/op
+BenchmarkReadAndBuild-16 2 947120893 ns/op
+BenchmarkReadAndBuild-16 2 954909506 ns/op
+PASS
+ok github.com/dgraph-io/badger/table 26.856s
+```
+
+The rate is ~122MB/s. To build a 64MB table, this would take ~0.52s. Note that this
+does NOT include the flushing of the table to disk. All we are doing above is
+reading one table (which is in RAM) and write one table in memory.
+
+The table building takes 0.52-0.0853s ~ 0.4347s.
+
+# BenchmarkReadMerged
+Below, we merge 5 tables. The total size remains unchanged at ~122M.
+
+```go
+$ go test -bench ReadMerged -run ^$ -count 3
+BenchmarkReadMerged-16 2 954475788 ns/op
+BenchmarkReadMerged-16 2 955252462 ns/op
+BenchmarkReadMerged-16 2 956857353 ns/op
+PASS
+ok github.com/dgraph-io/badger/table 33.327s
+```
+
+The rate is ~122MB/s. To read a 64MB table using merge iterator, this would take ~0.52s.
+
+# BenchmarkRandomRead
+
+```go
+go test -bench BenchmarkRandomRead$ -run ^$ -count 3
+goos: linux
+goarch: amd64
+pkg: github.com/dgraph-io/badger/table
+BenchmarkRandomRead-16 300000 3596 ns/op
+BenchmarkRandomRead-16 300000 3621 ns/op
+BenchmarkRandomRead-16 300000 3596 ns/op
+PASS
+ok github.com/dgraph-io/badger/table 44.727s
+```
+
+For random read benchmarking, we are randomly reading a key and verifying its value.
diff --git a/vendor/github.com/dgraph-io/badger/table/builder.go b/vendor/github.com/dgraph-io/badger/table/builder.go
new file mode 100644
index 000000000..0657cbca1
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/table/builder.go
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package table
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "math"
+
+ "github.com/AndreasBriese/bbloom"
+ "github.com/dgraph-io/badger/y"
+)
+
+var (
+ restartInterval = 100 // Might want to change this to be based on total size instead of numKeys.
+)
+
+func newBuffer(sz int) *bytes.Buffer {
+ b := new(bytes.Buffer)
+ b.Grow(sz)
+ return b
+}
+
+type header struct {
+ plen uint16 // Overlap with base key.
+ klen uint16 // Length of the diff.
+ vlen uint16 // Length of value.
+ prev uint32 // Offset for the previous key-value pair. The offset is relative to block base offset.
+}
+
+// Encode encodes the header.
+func (h header) Encode(b []byte) {
+ binary.BigEndian.PutUint16(b[0:2], h.plen)
+ binary.BigEndian.PutUint16(b[2:4], h.klen)
+ binary.BigEndian.PutUint16(b[4:6], h.vlen)
+ binary.BigEndian.PutUint32(b[6:10], h.prev)
+}
+
+// Decode decodes the header.
+func (h *header) Decode(buf []byte) int {
+ h.plen = binary.BigEndian.Uint16(buf[0:2])
+ h.klen = binary.BigEndian.Uint16(buf[2:4])
+ h.vlen = binary.BigEndian.Uint16(buf[4:6])
+ h.prev = binary.BigEndian.Uint32(buf[6:10])
+ return h.Size()
+}
+
+// Size returns size of the header. Currently it's just a constant.
+func (h header) Size() int { return 10 }
+
+// Builder is used in building a table.
+type Builder struct {
+ counter int // Number of keys written for the current block.
+
+ // Typically tens or hundreds of meg. This is for one single file.
+ buf *bytes.Buffer
+
+ baseKey []byte // Base key for the current block.
+ baseOffset uint32 // Offset for the current block.
+
+ restarts []uint32 // Base offsets of every block.
+
+ // Tracks offset for the previous key-value pair. Offset is relative to block base offset.
+ prevOffset uint32
+
+ keyBuf *bytes.Buffer
+ keyCount int
+}
+
+// NewTableBuilder makes a new TableBuilder.
+func NewTableBuilder() *Builder {
+ return &Builder{
+ keyBuf: newBuffer(1 << 20),
+ buf: newBuffer(1 << 20),
+ prevOffset: math.MaxUint32, // Used for the first element!
+ }
+}
+
+// Close closes the TableBuilder.
+func (b *Builder) Close() {}
+
+// Empty returns whether it's empty.
+func (b *Builder) Empty() bool { return b.buf.Len() == 0 }
+
+// keyDiff returns a suffix of newKey that is different from b.baseKey.
+func (b Builder) keyDiff(newKey []byte) []byte {
+ var i int
+ for i = 0; i < len(newKey) && i < len(b.baseKey); i++ {
+ if newKey[i] != b.baseKey[i] {
+ break
+ }
+ }
+ return newKey[i:]
+}
+
+func (b *Builder) addHelper(key []byte, v y.ValueStruct) {
+ // Add key to bloom filter.
+ if len(key) > 0 {
+ var klen [2]byte
+ keyNoTs := y.ParseKey(key)
+ binary.BigEndian.PutUint16(klen[:], uint16(len(keyNoTs)))
+ b.keyBuf.Write(klen[:])
+ b.keyBuf.Write(keyNoTs)
+ b.keyCount++
+ }
+
+ // diffKey stores the difference of key with baseKey.
+ var diffKey []byte
+ if len(b.baseKey) == 0 {
+ // Make a copy. Builder should not keep references. Otherwise, caller has to be very careful
+ // and will have to make copies of keys every time they add to builder, which is even worse.
+ b.baseKey = append(b.baseKey[:0], key...)
+ diffKey = key
+ } else {
+ diffKey = b.keyDiff(key)
+ }
+
+ h := header{
+ plen: uint16(len(key) - len(diffKey)),
+ klen: uint16(len(diffKey)),
+ vlen: uint16(v.EncodedSize()),
+ prev: b.prevOffset, // prevOffset is the location of the last key-value added.
+ }
+ b.prevOffset = uint32(b.buf.Len()) - b.baseOffset // Remember current offset for the next Add call.
+
+ // Layout: header, diffKey, value.
+ var hbuf [10]byte
+ h.Encode(hbuf[:])
+ b.buf.Write(hbuf[:])
+ b.buf.Write(diffKey) // We only need to store the key difference.
+
+ v.EncodeTo(b.buf)
+ b.counter++ // Increment number of keys added for this current block.
+}
+
+func (b *Builder) finishBlock() {
+ // When we are at the end of the block and Valid=false, and the user wants to do a Prev,
+ // we need a dummy header to tell us the offset of the previous key-value pair.
+ b.addHelper([]byte{}, y.ValueStruct{})
+}
+
+// Add adds a key-value pair to the block.
+// If doNotRestart is true, we will not restart even if b.counter >= restartInterval.
+func (b *Builder) Add(key []byte, value y.ValueStruct) error {
+ if b.counter >= restartInterval {
+ b.finishBlock()
+ // Start a new block. Initialize the block.
+ b.restarts = append(b.restarts, uint32(b.buf.Len()))
+ b.counter = 0
+ b.baseKey = []byte{}
+ b.baseOffset = uint32(b.buf.Len())
+ b.prevOffset = math.MaxUint32 // First key-value pair of block has header.prev=MaxInt.
+ }
+ b.addHelper(key, value)
+ return nil // Currently, there is no meaningful error.
+}
+
+// TODO: vvv this was the comment on ReachedCapacity.
+// FinalSize returns the *rough* final size of the array, counting the header which is
+// not yet written.
+// TODO: Look into why there is a discrepancy. I suspect it is because of Write(empty, empty)
+// at the end. The diff can vary.
+
+// ReachedCapacity returns true if we... roughly (?) reached capacity?
+func (b *Builder) ReachedCapacity(cap int64) bool {
+ estimateSz := b.buf.Len() + 8 /* empty header */ + 4*len(b.restarts) +
+ 8 /* 8 = end of buf offset + len(restarts) */
+ return int64(estimateSz) > cap
+}
+
+// blockIndex generates the block index for the table.
+// It is mainly a list of all the block base offsets.
+func (b *Builder) blockIndex() []byte {
+ // Store the end offset, so we know the length of the final block.
+ b.restarts = append(b.restarts, uint32(b.buf.Len()))
+
+ // Add 4 because we want to write out number of restarts at the end.
+ sz := 4*len(b.restarts) + 4
+ out := make([]byte, sz)
+ buf := out
+ for _, r := range b.restarts {
+ binary.BigEndian.PutUint32(buf[:4], r)
+ buf = buf[4:]
+ }
+ binary.BigEndian.PutUint32(buf[:4], uint32(len(b.restarts)))
+ return out
+}
+
+// Finish finishes the table by appending the index.
+func (b *Builder) Finish() []byte {
+ bf := bbloom.New(float64(b.keyCount), 0.01)
+ var klen [2]byte
+ key := make([]byte, 1024)
+ for {
+ if _, err := b.keyBuf.Read(klen[:]); err == io.EOF {
+ break
+ } else if err != nil {
+ y.Check(err)
+ }
+ kl := int(binary.BigEndian.Uint16(klen[:]))
+ if cap(key) < kl {
+ key = make([]byte, 2*int(kl)) // 2 * uint16 will overflow
+ }
+ key = key[:kl]
+ y.Check2(b.keyBuf.Read(key))
+ bf.Add(key)
+ }
+
+ b.finishBlock() // This will never start a new block.
+ index := b.blockIndex()
+ b.buf.Write(index)
+
+ // Write bloom filter.
+ bdata := bf.JSONMarshal()
+ n, err := b.buf.Write(bdata)
+ y.Check(err)
+ var buf [4]byte
+ binary.BigEndian.PutUint32(buf[:], uint32(n))
+ b.buf.Write(buf[:])
+
+ return b.buf.Bytes()
+}
diff --git a/vendor/github.com/dgraph-io/badger/table/iterator.go b/vendor/github.com/dgraph-io/badger/table/iterator.go
new file mode 100644
index 000000000..0eb5ed01a
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/table/iterator.go
@@ -0,0 +1,539 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package table
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "sort"
+
+ "github.com/dgraph-io/badger/y"
+ "github.com/pkg/errors"
+)
+
+type blockIterator struct {
+ data []byte
+ pos uint32
+ err error
+ baseKey []byte
+
+ key []byte
+ val []byte
+ init bool
+
+ last header // The last header we saw.
+}
+
+func (itr *blockIterator) Reset() {
+ itr.pos = 0
+ itr.err = nil
+ itr.baseKey = []byte{}
+ itr.key = []byte{}
+ itr.val = []byte{}
+ itr.init = false
+ itr.last = header{}
+}
+
+func (itr *blockIterator) Init() {
+ if !itr.init {
+ itr.Next()
+ }
+}
+
+func (itr *blockIterator) Valid() bool {
+ return itr != nil && itr.err == nil
+}
+
+func (itr *blockIterator) Error() error {
+ return itr.err
+}
+
+func (itr *blockIterator) Close() {}
+
+var (
+ origin = 0
+ current = 1
+)
+
+// Seek brings us to the first block element that is >= input key.
+func (itr *blockIterator) Seek(key []byte, whence int) {
+ itr.err = nil
+
+ switch whence {
+ case origin:
+ itr.Reset()
+ case current:
+ }
+
+ var done bool
+ for itr.Init(); itr.Valid(); itr.Next() {
+ k := itr.Key()
+ if y.CompareKeys(k, key) >= 0 {
+ // We are done as k is >= key.
+ done = true
+ break
+ }
+ }
+ if !done {
+ itr.err = io.EOF
+ }
+}
+
+func (itr *blockIterator) SeekToFirst() {
+ itr.err = nil
+ itr.Init()
+}
+
+// SeekToLast brings us to the last element. Valid should return true.
+func (itr *blockIterator) SeekToLast() {
+ itr.err = nil
+ for itr.Init(); itr.Valid(); itr.Next() {
+ }
+ itr.Prev()
+}
+
+// parseKV would allocate a new byte slice for key and for value.
+func (itr *blockIterator) parseKV(h header) {
+ if cap(itr.key) < int(h.plen+h.klen) {
+ sz := int(h.plen) + int(h.klen) // Convert to int before adding to avoid uint16 overflow.
+ itr.key = make([]byte, 2*sz)
+ }
+ itr.key = itr.key[:h.plen+h.klen]
+ copy(itr.key, itr.baseKey[:h.plen])
+ copy(itr.key[h.plen:], itr.data[itr.pos:itr.pos+uint32(h.klen)])
+ itr.pos += uint32(h.klen)
+
+ if itr.pos+uint32(h.vlen) > uint32(len(itr.data)) {
+ itr.err = errors.Errorf("Value exceeded size of block: %d %d %d %d %v",
+ itr.pos, h.klen, h.vlen, len(itr.data), h)
+ return
+ }
+ itr.val = y.SafeCopy(itr.val, itr.data[itr.pos:itr.pos+uint32(h.vlen)])
+ itr.pos += uint32(h.vlen)
+}
+
+func (itr *blockIterator) Next() {
+ itr.init = true
+ itr.err = nil
+ if itr.pos >= uint32(len(itr.data)) {
+ itr.err = io.EOF
+ return
+ }
+
+ var h header
+ itr.pos += uint32(h.Decode(itr.data[itr.pos:]))
+ itr.last = h // Store the last header.
+
+ if h.klen == 0 && h.plen == 0 {
+ // Last entry in the table.
+ itr.err = io.EOF
+ return
+ }
+
+ // Populate baseKey if it isn't set yet. This would only happen for the first Next.
+ if len(itr.baseKey) == 0 {
+ // This should be the first Next() for this block. Hence, prefix length should be zero.
+ y.AssertTrue(h.plen == 0)
+ itr.baseKey = itr.data[itr.pos : itr.pos+uint32(h.klen)]
+ }
+ itr.parseKV(h)
+}
+
+func (itr *blockIterator) Prev() {
+ if !itr.init {
+ return
+ }
+ itr.err = nil
+ if itr.last.prev == math.MaxUint32 {
+ // This is the first element of the block!
+ itr.err = io.EOF
+ itr.pos = 0
+ return
+ }
+
+ // Move back using current header's prev.
+ itr.pos = itr.last.prev
+
+ var h header
+ y.AssertTruef(itr.pos < uint32(len(itr.data)), "%d %d", itr.pos, len(itr.data))
+ itr.pos += uint32(h.Decode(itr.data[itr.pos:]))
+ itr.parseKV(h)
+ itr.last = h
+}
+
+func (itr *blockIterator) Key() []byte {
+ if itr.err != nil {
+ return nil
+ }
+ return itr.key
+}
+
+func (itr *blockIterator) Value() []byte {
+ if itr.err != nil {
+ return nil
+ }
+ return itr.val
+}
+
+// Iterator is an iterator for a Table.
+type Iterator struct {
+ t *Table
+ bpos int
+ bi *blockIterator
+ err error
+
+ // Internally, Iterator is bidirectional. However, we only expose the
+ // unidirectional functionality for now.
+ reversed bool
+}
+
+// NewIterator returns a new iterator of the Table
+func (t *Table) NewIterator(reversed bool) *Iterator {
+ t.IncrRef() // Important.
+ ti := &Iterator{t: t, reversed: reversed}
+ ti.next()
+ return ti
+}
+
+// Close closes the iterator (and it must be called).
+func (itr *Iterator) Close() error {
+ return itr.t.DecrRef()
+}
+
+func (itr *Iterator) reset() {
+ itr.bpos = 0
+ itr.err = nil
+}
+
+// Valid follows the y.Iterator interface
+func (itr *Iterator) Valid() bool {
+ return itr.err == nil
+}
+
+func (itr *Iterator) seekToFirst() {
+ numBlocks := len(itr.t.blockIndex)
+ if numBlocks == 0 {
+ itr.err = io.EOF
+ return
+ }
+ itr.bpos = 0
+ block, err := itr.t.block(itr.bpos)
+ if err != nil {
+ itr.err = err
+ return
+ }
+ itr.bi = block.NewIterator()
+ itr.bi.SeekToFirst()
+ itr.err = itr.bi.Error()
+}
+
+func (itr *Iterator) seekToLast() {
+ numBlocks := len(itr.t.blockIndex)
+ if numBlocks == 0 {
+ itr.err = io.EOF
+ return
+ }
+ itr.bpos = numBlocks - 1
+ block, err := itr.t.block(itr.bpos)
+ if err != nil {
+ itr.err = err
+ return
+ }
+ itr.bi = block.NewIterator()
+ itr.bi.SeekToLast()
+ itr.err = itr.bi.Error()
+}
+
+func (itr *Iterator) seekHelper(blockIdx int, key []byte) {
+ itr.bpos = blockIdx
+ block, err := itr.t.block(blockIdx)
+ if err != nil {
+ itr.err = err
+ return
+ }
+ itr.bi = block.NewIterator()
+ itr.bi.Seek(key, origin)
+ itr.err = itr.bi.Error()
+}
+
+// seekFrom brings us to a key that is >= input key.
+func (itr *Iterator) seekFrom(key []byte, whence int) {
+ itr.err = nil
+ switch whence {
+ case origin:
+ itr.reset()
+ case current:
+ }
+
+ idx := sort.Search(len(itr.t.blockIndex), func(idx int) bool {
+ ko := itr.t.blockIndex[idx]
+ return y.CompareKeys(ko.key, key) > 0
+ })
+ if idx == 0 {
+ // The smallest key in our table is already strictly > key. We can return that.
+ // This is like a SeekToFirst.
+ itr.seekHelper(0, key)
+ return
+ }
+
+ // block[idx].smallest is > key.
+ // Since idx>0, we know block[idx-1].smallest is <= key.
+ // There are two cases.
+ // 1) Everything in block[idx-1] is strictly < key. In this case, we should go to the first
+ // element of block[idx].
+ // 2) Some element in block[idx-1] is >= key. We should go to that element.
+ itr.seekHelper(idx-1, key)
+ if itr.err == io.EOF {
+ // Case 1. Need to visit block[idx].
+ if idx == len(itr.t.blockIndex) {
+ // If idx == len(itr.t.blockIndex), then input key is greater than ANY element of table.
+ // There's nothing we can do. Valid() should return false as we seek to end of table.
+ return
+ }
+ // Since block[idx].smallest is > key. This is essentially a block[idx].SeekToFirst.
+ itr.seekHelper(idx, key)
+ }
+ // Case 2: No need to do anything. We already did the seek in block[idx-1].
+}
+
+// seek will reset iterator and seek to >= key.
+func (itr *Iterator) seek(key []byte) {
+ itr.seekFrom(key, origin)
+}
+
+// seekForPrev will reset iterator and seek to <= key.
+func (itr *Iterator) seekForPrev(key []byte) {
+ // TODO: Optimize this. We shouldn't have to take a Prev step.
+ itr.seekFrom(key, origin)
+ if !bytes.Equal(itr.Key(), key) {
+ itr.prev()
+ }
+}
+
+func (itr *Iterator) next() {
+ itr.err = nil
+
+ if itr.bpos >= len(itr.t.blockIndex) {
+ itr.err = io.EOF
+ return
+ }
+
+ if itr.bi == nil {
+ block, err := itr.t.block(itr.bpos)
+ if err != nil {
+ itr.err = err
+ return
+ }
+ itr.bi = block.NewIterator()
+ itr.bi.SeekToFirst()
+ itr.err = itr.bi.Error()
+ return
+ }
+
+ itr.bi.Next()
+ if !itr.bi.Valid() {
+ itr.bpos++
+ itr.bi = nil
+ itr.next()
+ return
+ }
+}
+
+func (itr *Iterator) prev() {
+ itr.err = nil
+ if itr.bpos < 0 {
+ itr.err = io.EOF
+ return
+ }
+
+ if itr.bi == nil {
+ block, err := itr.t.block(itr.bpos)
+ if err != nil {
+ itr.err = err
+ return
+ }
+ itr.bi = block.NewIterator()
+ itr.bi.SeekToLast()
+ itr.err = itr.bi.Error()
+ return
+ }
+
+ itr.bi.Prev()
+ if !itr.bi.Valid() {
+ itr.bpos--
+ itr.bi = nil
+ itr.prev()
+ return
+ }
+}
+
+// Key follows the y.Iterator interface
+func (itr *Iterator) Key() []byte {
+ return itr.bi.Key()
+}
+
+// Value follows the y.Iterator interface
+func (itr *Iterator) Value() (ret y.ValueStruct) {
+ ret.Decode(itr.bi.Value())
+ return
+}
+
+// Next follows the y.Iterator interface
+func (itr *Iterator) Next() {
+ if !itr.reversed {
+ itr.next()
+ } else {
+ itr.prev()
+ }
+}
+
+// Rewind follows the y.Iterator interface
+func (itr *Iterator) Rewind() {
+ if !itr.reversed {
+ itr.seekToFirst()
+ } else {
+ itr.seekToLast()
+ }
+}
+
+// Seek follows the y.Iterator interface
+func (itr *Iterator) Seek(key []byte) {
+ if !itr.reversed {
+ itr.seek(key)
+ } else {
+ itr.seekForPrev(key)
+ }
+}
+
+// ConcatIterator concatenates the sequences defined by several iterators. (It only works with
+// TableIterators, probably just because it's faster to not be so generic.)
+type ConcatIterator struct {
+ idx int // Which iterator is active now.
+ cur *Iterator
+ iters []*Iterator // Corresponds to tables.
+ tables []*Table // Disregarding reversed, this is in ascending order.
+ reversed bool
+}
+
+// NewConcatIterator creates a new concatenated iterator
+func NewConcatIterator(tbls []*Table, reversed bool) *ConcatIterator {
+ iters := make([]*Iterator, len(tbls))
+ for i := 0; i < len(tbls); i++ {
+ iters[i] = tbls[i].NewIterator(reversed)
+ }
+ return &ConcatIterator{
+ reversed: reversed,
+ iters: iters,
+ tables: tbls,
+ idx: -1, // Not really necessary because s.it.Valid()=false, but good to have.
+ }
+}
+
+func (s *ConcatIterator) setIdx(idx int) {
+ s.idx = idx
+ if idx < 0 || idx >= len(s.iters) {
+ s.cur = nil
+ } else {
+ s.cur = s.iters[s.idx]
+ }
+}
+
+// Rewind implements y.Interface
+func (s *ConcatIterator) Rewind() {
+ if len(s.iters) == 0 {
+ return
+ }
+ if !s.reversed {
+ s.setIdx(0)
+ } else {
+ s.setIdx(len(s.iters) - 1)
+ }
+ s.cur.Rewind()
+}
+
+// Valid implements y.Interface
+func (s *ConcatIterator) Valid() bool {
+ return s.cur != nil && s.cur.Valid()
+}
+
+// Key implements y.Interface
+func (s *ConcatIterator) Key() []byte {
+ return s.cur.Key()
+}
+
+// Value implements y.Interface
+func (s *ConcatIterator) Value() y.ValueStruct {
+ return s.cur.Value()
+}
+
+// Seek brings us to element >= key if reversed is false. Otherwise, <= key.
+func (s *ConcatIterator) Seek(key []byte) {
+ var idx int
+ if !s.reversed {
+ idx = sort.Search(len(s.tables), func(i int) bool {
+ return y.CompareKeys(s.tables[i].Biggest(), key) >= 0
+ })
+ } else {
+ n := len(s.tables)
+ idx = n - 1 - sort.Search(n, func(i int) bool {
+ return y.CompareKeys(s.tables[n-1-i].Smallest(), key) <= 0
+ })
+ }
+ if idx >= len(s.tables) || idx < 0 {
+ s.setIdx(-1)
+ return
+ }
+ // For reversed=false, we know s.tables[i-1].Biggest() < key. Thus, the
+ // previous table cannot possibly contain key.
+ s.setIdx(idx)
+ s.cur.Seek(key)
+}
+
+// Next advances our concat iterator.
+func (s *ConcatIterator) Next() {
+ s.cur.Next()
+ if s.cur.Valid() {
+ // Nothing to do. Just stay with the current table.
+ return
+ }
+ for { // In case there are empty tables.
+ if !s.reversed {
+ s.setIdx(s.idx + 1)
+ } else {
+ s.setIdx(s.idx - 1)
+ }
+ if s.cur == nil {
+ // End of list. Valid will become false.
+ return
+ }
+ s.cur.Rewind()
+ if s.cur.Valid() {
+ break
+ }
+ }
+}
+
+// Close implements y.Interface.
+func (s *ConcatIterator) Close() error {
+ for _, it := range s.iters {
+ if err := it.Close(); err != nil {
+ return errors.Wrap(err, "ConcatIterator")
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/dgraph-io/badger/table/table.go b/vendor/github.com/dgraph-io/badger/table/table.go
new file mode 100644
index 000000000..0a1f42d46
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/table/table.go
@@ -0,0 +1,360 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package table
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/AndreasBriese/bbloom"
+ "github.com/dgraph-io/badger/options"
+ "github.com/dgraph-io/badger/y"
+ "github.com/pkg/errors"
+)
+
+const fileSuffix = ".sst"
+
+type keyOffset struct {
+ key []byte
+ offset int
+ len int
+}
+
+// TableInterface is useful for testing.
+type TableInterface interface {
+ Smallest() []byte
+ Biggest() []byte
+ DoesNotHave(key []byte) bool
+}
+
+// Table represents a loaded table file with the info we have about it
+type Table struct {
+ sync.Mutex
+
+ fd *os.File // Own fd.
+ tableSize int // Initialized in OpenTable, using fd.Stat().
+
+ blockIndex []keyOffset
+ ref int32 // For file garbage collection. Atomic.
+
+ loadingMode options.FileLoadingMode
+ mmap []byte // Memory mapped.
+
+ // The following are initialized once and const.
+ smallest, biggest []byte // Smallest and largest keys.
+ id uint64 // file id, part of filename
+
+ bf bbloom.Bloom
+
+ Checksum []byte
+}
+
+// IncrRef increments the refcount (having to do with whether the file should be deleted)
+func (t *Table) IncrRef() {
+ atomic.AddInt32(&t.ref, 1)
+}
+
+// DecrRef decrements the refcount and possibly deletes the table
+func (t *Table) DecrRef() error {
+ newRef := atomic.AddInt32(&t.ref, -1)
+ if newRef == 0 {
+ // We can safely delete this file, because for all the current files, we always have
+ // at least one reference pointing to them.
+
+ // It's necessary to delete windows files
+ if t.loadingMode == options.MemoryMap {
+ if err := y.Munmap(t.mmap); err != nil {
+ return err
+ }
+ }
+ if err := t.fd.Truncate(0); err != nil {
+ // This is very important to let the FS know that the file is deleted.
+ return err
+ }
+ filename := t.fd.Name()
+ if err := t.fd.Close(); err != nil {
+ return err
+ }
+ if err := os.Remove(filename); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type block struct {
+ offset int
+ data []byte
+}
+
+func (b block) NewIterator() *blockIterator {
+ return &blockIterator{data: b.data}
+}
+
+// OpenTable assumes file has only one table and opens it. Takes ownership of fd upon function
+// entry. Returns a table with one reference count on it (decrementing which may delete the file!
+// -- consider t.Close() instead). The fd has to writeable because we call Truncate on it before
+// deleting.
+func OpenTable(fd *os.File, mode options.FileLoadingMode, cksum []byte) (*Table, error) {
+ fileInfo, err := fd.Stat()
+ if err != nil {
+ // It's OK to ignore fd.Close() errs in this function because we have only read
+ // from the file.
+ _ = fd.Close()
+ return nil, y.Wrap(err)
+ }
+
+ filename := fileInfo.Name()
+ id, ok := ParseFileID(filename)
+ if !ok {
+ _ = fd.Close()
+ return nil, errors.Errorf("Invalid filename: %s", filename)
+ }
+ t := &Table{
+ fd: fd,
+ ref: 1, // Caller is given one reference.
+ id: id,
+ loadingMode: mode,
+ }
+
+ t.tableSize = int(fileInfo.Size())
+
+ // We first load to RAM, so we can read the index and do checksum.
+ if err := t.loadToRAM(); err != nil {
+ return nil, err
+ }
+ // Enforce checksum before we read index. Otherwise, if the file was
+ // truncated, we'd end up with panics in readIndex.
+ if len(cksum) > 0 && !bytes.Equal(t.Checksum, cksum) {
+ return nil, fmt.Errorf(
+ "CHECKSUM_MISMATCH: Table checksum does not match checksum in MANIFEST."+
+ " NOT including table %s. This would lead to missing data."+
+ "\n sha256 %x Expected\n sha256 %x Found\n", filename, cksum, t.Checksum)
+ }
+ if err := t.readIndex(); err != nil {
+ return nil, y.Wrap(err)
+ }
+
+ it := t.NewIterator(false)
+ defer it.Close()
+ it.Rewind()
+ if it.Valid() {
+ t.smallest = it.Key()
+ }
+
+ it2 := t.NewIterator(true)
+ defer it2.Close()
+ it2.Rewind()
+ if it2.Valid() {
+ t.biggest = it2.Key()
+ }
+
+ switch mode {
+ case options.LoadToRAM:
+ // No need to do anything. t.mmap is already filled.
+ case options.MemoryMap:
+ t.mmap, err = y.Mmap(fd, false, fileInfo.Size())
+ if err != nil {
+ _ = fd.Close()
+ return nil, y.Wrapf(err, "Unable to map file: %q", fileInfo.Name())
+ }
+ case options.FileIO:
+ t.mmap = nil
+ default:
+ panic(fmt.Sprintf("Invalid loading mode: %v", mode))
+ }
+ return t, nil
+}
+
+// Close closes the open table. (Releases resources back to the OS.)
+func (t *Table) Close() error {
+ if t.loadingMode == options.MemoryMap {
+ if err := y.Munmap(t.mmap); err != nil {
+ return err
+ }
+ }
+
+ return t.fd.Close()
+}
+
+func (t *Table) read(off, sz int) ([]byte, error) {
+ if len(t.mmap) > 0 {
+ if len(t.mmap[off:]) < sz {
+ return nil, y.ErrEOF
+ }
+ return t.mmap[off : off+sz], nil
+ }
+
+ res := make([]byte, sz)
+ nbr, err := t.fd.ReadAt(res, int64(off))
+ y.NumReads.Add(1)
+ y.NumBytesRead.Add(int64(nbr))
+ return res, err
+}
+
+func (t *Table) readNoFail(off, sz int) []byte {
+ res, err := t.read(off, sz)
+ y.Check(err)
+ return res
+}
+
+func (t *Table) readIndex() error {
+ if len(t.mmap) != t.tableSize {
+ panic("Table size does not match the read bytes")
+ }
+ readPos := t.tableSize
+
+ // Read bloom filter.
+ readPos -= 4
+ buf := t.readNoFail(readPos, 4)
+ bloomLen := int(binary.BigEndian.Uint32(buf))
+ readPos -= bloomLen
+ data := t.readNoFail(readPos, bloomLen)
+ t.bf = bbloom.JSONUnmarshal(data)
+
+ readPos -= 4
+ buf = t.readNoFail(readPos, 4)
+ restartsLen := int(binary.BigEndian.Uint32(buf))
+
+ readPos -= 4 * restartsLen
+ buf = t.readNoFail(readPos, 4*restartsLen)
+
+ offsets := make([]int, restartsLen)
+ for i := 0; i < restartsLen; i++ {
+ offsets[i] = int(binary.BigEndian.Uint32(buf[:4]))
+ buf = buf[4:]
+ }
+
+ // The last offset stores the end of the last block.
+ for i := 0; i < len(offsets); i++ {
+ var o int
+ if i == 0 {
+ o = 0
+ } else {
+ o = offsets[i-1]
+ }
+
+ ko := keyOffset{
+ offset: o,
+ len: offsets[i] - o,
+ }
+ t.blockIndex = append(t.blockIndex, ko)
+ }
+
+ // Execute this index read serially, because we already have table data in memory.
+ var h header
+ for idx := range t.blockIndex {
+ ko := &t.blockIndex[idx]
+
+ hbuf := t.readNoFail(ko.offset, h.Size())
+ h.Decode(hbuf)
+ y.AssertTrue(h.plen == 0)
+
+ key := t.readNoFail(ko.offset+len(hbuf), int(h.klen))
+ ko.key = append([]byte{}, key...)
+ }
+
+ return nil
+}
+
+func (t *Table) block(idx int) (block, error) {
+ y.AssertTruef(idx >= 0, "idx=%d", idx)
+ if idx >= len(t.blockIndex) {
+ return block{}, errors.New("block out of index")
+ }
+
+ ko := t.blockIndex[idx]
+ blk := block{
+ offset: ko.offset,
+ }
+ var err error
+ blk.data, err = t.read(blk.offset, ko.len)
+ return blk, err
+}
+
+// Size is its file size in bytes
+func (t *Table) Size() int64 { return int64(t.tableSize) }
+
+// Smallest is its smallest key, or nil if there are none
+func (t *Table) Smallest() []byte { return t.smallest }
+
+// Biggest is its biggest key, or nil if there are none
+func (t *Table) Biggest() []byte { return t.biggest }
+
+// Filename is NOT the file name. Just kidding, it is.
+func (t *Table) Filename() string { return t.fd.Name() }
+
+// ID is the table's ID number (used to make the file name).
+func (t *Table) ID() uint64 { return t.id }
+
+// DoesNotHave returns true if (but not "only if") the table does not have the key. It does a
+// bloom filter lookup.
+func (t *Table) DoesNotHave(key []byte) bool { return !t.bf.Has(key) }
+
+// ParseFileID reads the file id out of a filename.
+func ParseFileID(name string) (uint64, bool) {
+ name = path.Base(name)
+ if !strings.HasSuffix(name, fileSuffix) {
+ return 0, false
+ }
+ // suffix := name[len(fileSuffix):]
+ name = strings.TrimSuffix(name, fileSuffix)
+ id, err := strconv.Atoi(name)
+ if err != nil {
+ return 0, false
+ }
+ y.AssertTrue(id >= 0)
+ return uint64(id), true
+}
+
+// IDToFilename does the inverse of ParseFileID
+func IDToFilename(id uint64) string {
+ return fmt.Sprintf("%06d", id) + fileSuffix
+}
+
+// NewFilename should be named TableFilepath -- it combines the dir with the ID to make a table
+// filepath.
+func NewFilename(id uint64, dir string) string {
+ return filepath.Join(dir, IDToFilename(id))
+}
+
+func (t *Table) loadToRAM() error {
+ if _, err := t.fd.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ t.mmap = make([]byte, t.tableSize)
+ sum := sha256.New()
+ tee := io.TeeReader(t.fd, sum)
+ read, err := tee.Read(t.mmap)
+ if err != nil || read != t.tableSize {
+ return y.Wrapf(err, "Unable to load file in memory. Table file: %s", t.Filename())
+ }
+ t.Checksum = sum.Sum(nil)
+ y.NumReads.Add(1)
+ y.NumBytesRead.Add(int64(read))
+ return nil
+}
diff --git a/vendor/github.com/dgraph-io/badger/test.sh b/vendor/github.com/dgraph-io/badger/test.sh
new file mode 100644
index 000000000..5b14bfd8f
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/test.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+set -e
+
+# Ensure that we can compile the binary.
+pushd badger
+go build -v .
+popd
+
+# Run the memory intensive tests first.
+go test -v --manual=true -run='TestBigKeyValuePairs$'
+go test -v --manual=true -run='TestPushValueLogLimit'
+
+# Run the special Truncate test.
+rm -rf p
+go test -v --manual=true -run='TestTruncateVlogNoClose$' .
+truncate --size=4096 p/000000.vlog
+go test -v --manual=true -run='TestTruncateVlogNoClose2$' .
+go test -v --manual=true -run='TestTruncateVlogNoClose3$' .
+rm -rf p
+
+# Then the normal tests.
+echo
+echo "==> Starting tests with value log mmapped..."
+sleep 5
+go test -v --vlog_mmap=true -race ./...
+
+echo
+echo "==> Starting tests with value log not mmapped..."
+sleep 5
+go test -v --vlog_mmap=false -race ./...
diff --git a/vendor/github.com/dgraph-io/badger/txn.go b/vendor/github.com/dgraph-io/badger/txn.go
new file mode 100644
index 000000000..67411a8f5
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/txn.go
@@ -0,0 +1,701 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "math"
+ "sort"
+ "strconv"
+ "sync"
+ "sync/atomic"
+
+ "github.com/dgraph-io/badger/y"
+ farm "github.com/dgryski/go-farm"
+ "github.com/pkg/errors"
+)
+
+type oracle struct {
+ // A 64-bit integer must be at the top for memory alignment. See issue #311.
+ refCount int64
+ isManaged bool // Does not change value, so no locking required.
+
+ sync.Mutex // For nextTxnTs and commits.
+ // writeChLock lock is for ensuring that transactions go to the write
+ // channel in the same order as their commit timestamps.
+ writeChLock sync.Mutex
+ nextTxnTs uint64
+
+ // Used to block NewTransaction, so all previous commits are visible to a new read.
+ txnMark *y.WaterMark
+
+ // Either of these is used to determine which versions can be permanently
+ // discarded during compaction.
+ discardTs uint64 // Used by ManagedDB.
+ readMark *y.WaterMark // Used by DB.
+
+ // commits stores a key fingerprint and latest commit counter for it.
+ // refCount is used to clear out commits map to avoid a memory blowup.
+ commits map[uint64]uint64
+
+ // closer is used to stop watermarks.
+ closer *y.Closer
+}
+
+func newOracle(opt Options) *oracle {
+ orc := &oracle{
+ isManaged: opt.managedTxns,
+ commits: make(map[uint64]uint64),
+ // We're not initializing nextTxnTs and readOnlyTs. It would be done after replay in Open.
+ //
+ // WaterMarks must be 64-bit aligned for atomic package, hence we must use pointers here.
+ // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ readMark: &y.WaterMark{Name: "badger.PendingReads"},
+ txnMark: &y.WaterMark{Name: "badger.TxnTimestamp"},
+ closer: y.NewCloser(2),
+ }
+ orc.readMark.Init(orc.closer)
+ orc.txnMark.Init(orc.closer)
+ return orc
+}
+
+func (o *oracle) Stop() {
+ o.closer.SignalAndWait()
+}
+
+func (o *oracle) addRef() {
+ atomic.AddInt64(&o.refCount, 1)
+}
+
+func (o *oracle) decrRef() {
+ if atomic.AddInt64(&o.refCount, -1) != 0 {
+ return
+ }
+
+ // Clear out commits maps to release memory.
+ o.Lock()
+ defer o.Unlock()
+ // Avoids the race where something new is added to commitsMap
+ // after we check refCount and before we take Lock.
+ if atomic.LoadInt64(&o.refCount) != 0 {
+ return
+ }
+ if len(o.commits) >= 1000 { // If the map is still small, let it slide.
+ o.commits = make(map[uint64]uint64)
+ }
+}
+
+func (o *oracle) readTs() uint64 {
+ if o.isManaged {
+ panic("ReadTs should not be retrieved for managed DB")
+ }
+
+ var readTs uint64
+ o.Lock()
+ readTs = o.nextTxnTs - 1
+ o.readMark.Begin(readTs)
+ o.Unlock()
+
+ // Wait for all txns which have no conflicts, have been assigned a commit
+ // timestamp and are going through the write to value log and LSM tree
+ // process. Not waiting here could mean that some txns which have been
+ // committed would not be read.
+ y.Check(o.txnMark.WaitForMark(context.Background(), readTs))
+ return readTs
+}
+
+func (o *oracle) nextTs() uint64 {
+ o.Lock()
+ defer o.Unlock()
+ return o.nextTxnTs
+}
+
+func (o *oracle) incrementNextTs() {
+ o.Lock()
+ defer o.Unlock()
+ o.nextTxnTs++
+}
+
+// Any deleted or invalid versions at or below ts would be discarded during
+// compaction to reclaim disk space in LSM tree and thence value log.
+func (o *oracle) setDiscardTs(ts uint64) {
+ o.Lock()
+ defer o.Unlock()
+ o.discardTs = ts
+}
+
+func (o *oracle) discardAtOrBelow() uint64 {
+ if o.isManaged {
+ o.Lock()
+ defer o.Unlock()
+ return o.discardTs
+ }
+ return o.readMark.DoneUntil()
+}
+
+// hasConflict must be called while having a lock.
+func (o *oracle) hasConflict(txn *Txn) bool {
+ if len(txn.reads) == 0 {
+ return false
+ }
+ for _, ro := range txn.reads {
+ // A commit at the read timestamp is expected.
+ // But, any commit after the read timestamp should cause a conflict.
+ if ts, has := o.commits[ro]; has && ts > txn.readTs {
+ return true
+ }
+ }
+ return false
+}
+
+func (o *oracle) newCommitTs(txn *Txn) uint64 {
+ o.Lock()
+ defer o.Unlock()
+
+ if o.hasConflict(txn) {
+ return 0
+ }
+
+ var ts uint64
+ if !o.isManaged {
+ // This is the general case, when user doesn't specify the read and commit ts.
+ ts = o.nextTxnTs
+ o.nextTxnTs++
+ o.txnMark.Begin(ts)
+
+ } else {
+ // If commitTs is set, use it instead.
+ ts = txn.commitTs
+ }
+
+ for _, w := range txn.writes {
+ o.commits[w] = ts // Update the commitTs.
+ }
+ return ts
+}
+
+func (o *oracle) doneCommit(cts uint64) {
+ if o.isManaged {
+ // No need to update anything.
+ return
+ }
+ o.txnMark.Done(cts)
+}
+
+// Txn represents a Badger transaction.
+type Txn struct {
+ readTs uint64
+ commitTs uint64
+
+ update bool // update is used to conditionally keep track of reads.
+ reads []uint64 // contains fingerprints of keys read.
+ writes []uint64 // contains fingerprints of keys written.
+
+ pendingWrites map[string]*Entry // cache stores any writes done by txn.
+
+ db *DB
+ discarded bool
+
+ size int64
+ count int64
+ numIterators int32
+}
+
+type pendingWritesIterator struct {
+ entries []*Entry
+ nextIdx int
+ readTs uint64
+ reversed bool
+}
+
+func (pi *pendingWritesIterator) Next() {
+ pi.nextIdx++
+}
+
+func (pi *pendingWritesIterator) Rewind() {
+ pi.nextIdx = 0
+}
+
+func (pi *pendingWritesIterator) Seek(key []byte) {
+ key = y.ParseKey(key)
+ pi.nextIdx = sort.Search(len(pi.entries), func(idx int) bool {
+ cmp := bytes.Compare(pi.entries[idx].Key, key)
+ if !pi.reversed {
+ return cmp >= 0
+ }
+ return cmp <= 0
+ })
+}
+
+func (pi *pendingWritesIterator) Key() []byte {
+ y.AssertTrue(pi.Valid())
+ entry := pi.entries[pi.nextIdx]
+ return y.KeyWithTs(entry.Key, pi.readTs)
+}
+
+func (pi *pendingWritesIterator) Value() y.ValueStruct {
+ y.AssertTrue(pi.Valid())
+ entry := pi.entries[pi.nextIdx]
+ return y.ValueStruct{
+ Value: entry.Value,
+ Meta: entry.meta,
+ UserMeta: entry.UserMeta,
+ ExpiresAt: entry.ExpiresAt,
+ Version: pi.readTs,
+ }
+}
+
+func (pi *pendingWritesIterator) Valid() bool {
+ return pi.nextIdx < len(pi.entries)
+}
+
+func (pi *pendingWritesIterator) Close() error {
+ return nil
+}
+
+func (txn *Txn) newPendingWritesIterator(reversed bool) *pendingWritesIterator {
+ if !txn.update || len(txn.pendingWrites) == 0 {
+ return nil
+ }
+ entries := make([]*Entry, 0, len(txn.pendingWrites))
+ for _, e := range txn.pendingWrites {
+ entries = append(entries, e)
+ }
+ // Number of pending writes per transaction shouldn't be too big in general.
+ sort.Slice(entries, func(i, j int) bool {
+ cmp := bytes.Compare(entries[i].Key, entries[j].Key)
+ if !reversed {
+ return cmp < 0
+ }
+ return cmp > 0
+ })
+ return &pendingWritesIterator{
+ readTs: txn.readTs,
+ entries: entries,
+ reversed: reversed,
+ }
+}
+
+func (txn *Txn) checkSize(e *Entry) error {
+ count := txn.count + 1
+ // Extra bytes for version in key.
+ size := txn.size + int64(e.estimateSize(txn.db.opt.ValueThreshold)) + 10
+ if count >= txn.db.opt.maxBatchCount || size >= txn.db.opt.maxBatchSize {
+ return ErrTxnTooBig
+ }
+ txn.count, txn.size = count, size
+ return nil
+}
+
+func exceedsSize(prefix string, max int64, key []byte) error {
+ return errors.Errorf("%s with size %d exceeded %d limit. %s:\n%s",
+ prefix, len(key), max, prefix, hex.Dump(key[:1<<10]))
+}
+
+func (txn *Txn) modify(e *Entry) error {
+ const maxKeySize = 65000
+
+ switch {
+ case !txn.update:
+ return ErrReadOnlyTxn
+ case txn.discarded:
+ return ErrDiscardedTxn
+ case len(e.Key) == 0:
+ return ErrEmptyKey
+ case bytes.HasPrefix(e.Key, badgerPrefix):
+ return ErrInvalidKey
+ case len(e.Key) > maxKeySize:
+ // Key length can't be more than uint16, as determined by table::header. To
+ // keep things safe and allow badger move prefix and a timestamp suffix, let's
+ // cut it down to 65000, instead of using 65536.
+ return exceedsSize("Key", maxKeySize, e.Key)
+ case int64(len(e.Value)) > txn.db.opt.ValueLogFileSize:
+ return exceedsSize("Value", txn.db.opt.ValueLogFileSize, e.Value)
+ }
+
+ if err := txn.checkSize(e); err != nil {
+ return err
+ }
+ fp := farm.Fingerprint64(e.Key) // Avoid dealing with byte arrays.
+ txn.writes = append(txn.writes, fp)
+ txn.pendingWrites[string(e.Key)] = e
+ return nil
+}
+
+// Set adds a key-value pair to the database.
+// It will return ErrReadOnlyTxn if update flag was set to false when creating the transaction.
+//
+// The current transaction keeps a reference to the key and val byte slice
+// arguments. Users must not modify key and val until the end of the transaction.
+func (txn *Txn) Set(key, val []byte) error {
+ return txn.SetEntry(NewEntry(key, val))
+}
+
+// SetEntry takes an Entry struct and adds the key-value pair in the struct,
+// along with other metadata to the database.
+//
+// The current transaction keeps a reference to the entry passed in argument.
+// Users must not modify the entry until the end of the transaction.
+func (txn *Txn) SetEntry(e *Entry) error {
+ return txn.modify(e)
+}
+
+// Delete deletes a key.
+//
+// This is done by adding a delete marker for the key at commit timestamp. Any
+// reads happening before this timestamp would be unaffected. Any reads after
+// this commit would see the deletion.
+//
+// The current transaction keeps a reference to the key byte slice argument.
+// Users must not modify the key until the end of the transaction.
+func (txn *Txn) Delete(key []byte) error {
+ e := &Entry{
+ Key: key,
+ meta: bitDelete,
+ }
+ return txn.modify(e)
+}
+
+// Get looks for key and returns corresponding Item.
+// If key is not found, ErrKeyNotFound is returned.
+func (txn *Txn) Get(key []byte) (item *Item, rerr error) {
+ if len(key) == 0 {
+ return nil, ErrEmptyKey
+ } else if txn.discarded {
+ return nil, ErrDiscardedTxn
+ }
+
+ item = new(Item)
+ if txn.update {
+ if e, has := txn.pendingWrites[string(key)]; has && bytes.Equal(key, e.Key) {
+ if isDeletedOrExpired(e.meta, e.ExpiresAt) {
+ return nil, ErrKeyNotFound
+ }
+ // Fulfill from cache.
+ item.meta = e.meta
+ item.val = e.Value
+ item.userMeta = e.UserMeta
+ item.key = key
+ item.status = prefetched
+ item.version = txn.readTs
+ item.expiresAt = e.ExpiresAt
+ // We probably don't need to set db on item here.
+ return item, nil
+ }
+ // Only track reads if this is update txn. No need to track read if txn serviced it
+ // internally.
+ txn.addReadKey(key)
+ }
+
+ seek := y.KeyWithTs(key, txn.readTs)
+ vs, err := txn.db.get(seek)
+ if err != nil {
+ return nil, errors.Wrapf(err, "DB::Get key: %q", key)
+ }
+ if vs.Value == nil && vs.Meta == 0 {
+ return nil, ErrKeyNotFound
+ }
+ if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
+ return nil, ErrKeyNotFound
+ }
+
+ item.key = key
+ item.version = vs.Version
+ item.meta = vs.Meta
+ item.userMeta = vs.UserMeta
+ item.db = txn.db
+ item.vptr = vs.Value // TODO: Do we need to copy this over?
+ item.txn = txn
+ item.expiresAt = vs.ExpiresAt
+ return item, nil
+}
+
+func (txn *Txn) addReadKey(key []byte) {
+ if txn.update {
+ fp := farm.Fingerprint64(key)
+ txn.reads = append(txn.reads, fp)
+ }
+}
+
+// Discard discards a created transaction. This method is very important and must be called. Commit
+// method calls this internally, however, calling this multiple times doesn't cause any issues. So,
+// this can safely be called via a defer right when transaction is created.
+//
+// NOTE: If any operations are run on a discarded transaction, ErrDiscardedTxn is returned.
+func (txn *Txn) Discard() {
+ if txn.discarded { // Avoid a re-run.
+ return
+ }
+ if atomic.LoadInt32(&txn.numIterators) > 0 {
+ panic("Unclosed iterator at time of Txn.Discard.")
+ }
+ txn.discarded = true
+ if !txn.db.orc.isManaged {
+ txn.db.orc.readMark.Done(txn.readTs)
+ }
+ if txn.update {
+ txn.db.orc.decrRef()
+ }
+}
+
+func (txn *Txn) commitAndSend() (func() error, error) {
+ orc := txn.db.orc
+ // Ensure that the order in which we get the commit timestamp is the same as
+ // the order in which we push these updates to the write channel. So, we
+ // acquire a writeChLock before getting a commit timestamp, and only release
+ // it after pushing the entries to it.
+ orc.writeChLock.Lock()
+ defer orc.writeChLock.Unlock()
+
+ commitTs := orc.newCommitTs(txn)
+ if commitTs == 0 {
+ return nil, ErrConflict
+ }
+
+ // The following debug information is what led to determining the cause of
+ // bank txn violation bug, and it took a whole bunch of effort to narrow it
+ // down to here. So, keep this around for at least a couple of months.
+ // var b strings.Builder
+ // fmt.Fprintf(&b, "Read: %d. Commit: %d. reads: %v. writes: %v. Keys: ",
+ // txn.readTs, commitTs, txn.reads, txn.writes)
+ entries := make([]*Entry, 0, len(txn.pendingWrites)+1)
+ for _, e := range txn.pendingWrites {
+ // fmt.Fprintf(&b, "[%q : %q], ", e.Key, e.Value)
+
+ // Suffix the keys with commit ts, so the key versions are sorted in
+ // descending order of commit timestamp.
+ e.Key = y.KeyWithTs(e.Key, commitTs)
+ e.meta |= bitTxn
+ entries = append(entries, e)
+ }
+ // log.Printf("%s\n", b.String())
+ e := &Entry{
+ Key: y.KeyWithTs(txnKey, commitTs),
+ Value: []byte(strconv.FormatUint(commitTs, 10)),
+ meta: bitFinTxn,
+ }
+ entries = append(entries, e)
+
+ req, err := txn.db.sendToWriteCh(entries)
+ if err != nil {
+ orc.doneCommit(commitTs)
+ return nil, err
+ }
+ ret := func() error {
+ err := req.Wait()
+ // Wait before marking commitTs as done.
+ // We can't defer doneCommit above, because it is being called from a
+ // callback here.
+ orc.doneCommit(commitTs)
+ return err
+ }
+ return ret, nil
+}
+
+func (txn *Txn) commitPrecheck() {
+ if txn.commitTs == 0 && txn.db.opt.managedTxns {
+ panic("Commit cannot be called with managedDB=true. Use CommitAt.")
+ }
+ if txn.discarded {
+ panic("Trying to commit a discarded txn")
+ }
+}
+
+// Commit commits the transaction, following these steps:
+//
+// 1. If there are no writes, return immediately.
+//
+// 2. Check if read rows were updated since txn started. If so, return ErrConflict.
+//
+// 3. If no conflict, generate a commit timestamp and update written rows' commit ts.
+//
+// 4. Batch up all writes, write them to value log and LSM tree.
+//
+// 5. If callback is provided, Badger will return immediately after checking
+// for conflicts. Writes to the database will happen in the background. If
+// there is a conflict, an error will be returned and the callback will not
+// run. If there are no conflicts, the callback will be called in the
+// background upon successful completion of writes or any error during write.
+//
+// If error is nil, the transaction is successfully committed. In case of a non-nil error, the LSM
+// tree won't be updated, so there's no need for any rollback.
+func (txn *Txn) Commit() error {
+ txn.commitPrecheck() // Precheck before discarding txn.
+ defer txn.Discard()
+
+ if len(txn.writes) == 0 {
+ return nil // Nothing to do.
+ }
+
+ txnCb, err := txn.commitAndSend()
+ if err != nil {
+ return err
+ }
+ // If batchSet failed, LSM would not have been updated. So, no need to rollback anything.
+
+ // TODO: What if some of the txns successfully make it to value log, but others fail.
+ // Nothing gets updated to LSM, until a restart happens.
+ return txnCb()
+}
+
+type txnCb struct {
+ commit func() error
+ user func(error)
+ err error
+}
+
+func runTxnCallback(cb *txnCb) {
+ switch {
+ case cb == nil:
+ panic("txn callback is nil")
+ case cb.user == nil:
+ panic("Must have caught a nil callback for txn.CommitWith")
+ case cb.err != nil:
+ cb.user(cb.err)
+ case cb.commit != nil:
+ err := cb.commit()
+ cb.user(err)
+ default:
+ cb.user(nil)
+ }
+}
+
+// CommitWith acts like Commit, but takes a callback, which gets run via a
+// goroutine to avoid blocking this function. The callback is guaranteed to run,
+// so it is safe to increment sync.WaitGroup before calling CommitWith, and
+// decrementing it in the callback; to block until all callbacks are run.
+func (txn *Txn) CommitWith(cb func(error)) {
+ txn.commitPrecheck() // Precheck before discarding txn.
+ defer txn.Discard()
+
+ if cb == nil {
+ panic("Nil callback provided to CommitWith")
+ }
+
+ if len(txn.writes) == 0 {
+ // Do not run these callbacks from here, because the CommitWith and the
+ // callback might be acquiring the same locks. Instead run the callback
+ // from another goroutine.
+ go runTxnCallback(&txnCb{user: cb, err: nil})
+ return
+ }
+
+ commitCb, err := txn.commitAndSend()
+ if err != nil {
+ go runTxnCallback(&txnCb{user: cb, err: err})
+ return
+ }
+
+ go runTxnCallback(&txnCb{user: cb, commit: commitCb})
+}
+
+// ReadTs returns the read timestamp of the transaction.
+func (txn *Txn) ReadTs() uint64 {
+ return txn.readTs
+}
+
+// NewTransaction creates a new transaction. Badger supports concurrent execution of transactions,
+// providing serializable snapshot isolation, avoiding write skews. Badger achieves this by tracking
+// the keys read and at Commit time, ensuring that these read keys weren't concurrently modified by
+// another transaction.
+//
+// For read-only transactions, set update to false. In this mode, we don't track the rows read for
+// any changes. Thus, any long running iterations done in this mode wouldn't pay this overhead.
+//
+// Running transactions concurrently is OK. However, a transaction itself isn't thread safe, and
+// should only be run serially. It doesn't matter if a transaction is created by one goroutine and
+// passed down to other, as long as the Txn APIs are called serially.
+//
+// When you create a new transaction, it is absolutely essential to call
+// Discard(). This should be done irrespective of what the update param is set
+// to. Commit API internally runs Discard, but running it twice wouldn't cause
+// any issues.
+//
+// txn := db.NewTransaction(false)
+// defer txn.Discard()
+// // Call various APIs.
+func (db *DB) NewTransaction(update bool) *Txn {
+ return db.newTransaction(update, false)
+}
+
+func (db *DB) newTransaction(update, isManaged bool) *Txn {
+ if db.opt.ReadOnly && update {
+ // DB is read-only, force read-only transaction.
+ update = false
+ }
+
+ txn := &Txn{
+ update: update,
+ db: db,
+ count: 1, // One extra entry for BitFin.
+ size: int64(len(txnKey) + 10), // Some buffer for the extra entry.
+ }
+ if update {
+ txn.pendingWrites = make(map[string]*Entry)
+ txn.db.orc.addRef()
+ }
+ // It is important that the oracle addRef happens BEFORE we retrieve a read
+ // timestamp. Otherwise, it is possible that the oracle commit map would
+ // become nil after we get the read timestamp.
+ // The sequence of events can be:
+ // 1. This txn gets a read timestamp.
+ // 2. Another txn working on the same keyset commits them, and decrements
+ // the reference to oracle.
+ // 3. Oracle ref reaches zero, resetting commit map.
+ // 4. This txn increments the oracle reference.
+ // 5. Now this txn would go on to commit the keyset, and no conflicts
+ // would be detected.
+ // See issue: https://github.com/dgraph-io/badger/issues/574
+ if !isManaged {
+ txn.readTs = db.orc.readTs()
+ }
+ return txn
+}
+
+// View executes a function creating and managing a read-only transaction for the user. Error
+// returned by the function is relayed by the View method.
+// If View is used with managed transactions, it would assume a read timestamp of MaxUint64.
+func (db *DB) View(fn func(txn *Txn) error) error {
+ var txn *Txn
+ if db.opt.managedTxns {
+ txn = db.NewTransactionAt(math.MaxUint64, false)
+ } else {
+ txn = db.NewTransaction(false)
+ }
+ defer txn.Discard()
+
+ return fn(txn)
+}
+
+// Update executes a function, creating and managing a read-write transaction
+// for the user. Error returned by the function is relayed by the Update method.
+// Update cannot be used with managed transactions.
+func (db *DB) Update(fn func(txn *Txn) error) error {
+ if db.opt.managedTxns {
+ panic("Update can only be used with managedDB=false.")
+ }
+ txn := db.NewTransaction(true)
+ defer txn.Discard()
+
+ if err := fn(txn); err != nil {
+ return err
+ }
+
+ return txn.Commit()
+}
diff --git a/vendor/github.com/dgraph-io/badger/util.go b/vendor/github.com/dgraph-io/badger/util.go
new file mode 100644
index 000000000..c5173e26c
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/util.go
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "encoding/hex"
+ "io/ioutil"
+ "math/rand"
+ "sync/atomic"
+ "time"
+
+ "github.com/dgraph-io/badger/table"
+ "github.com/dgraph-io/badger/y"
+ "github.com/pkg/errors"
+)
+
+func (s *levelsController) validate() error {
+ for _, l := range s.levels {
+ if err := l.validate(); err != nil {
+ return errors.Wrap(err, "Levels Controller")
+ }
+ }
+ return nil
+}
+
+// Check does some sanity check on one level of data or in-memory index.
+func (s *levelHandler) validate() error {
+ if s.level == 0 {
+ return nil
+ }
+
+ s.RLock()
+ defer s.RUnlock()
+ numTables := len(s.tables)
+ for j := 1; j < numTables; j++ {
+ if j >= len(s.tables) {
+ return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables)
+ }
+
+ if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 {
+ return errors.Errorf(
+ "Inter: Biggest(j-1) \n%s\n vs Smallest(j): \n%s\n: level=%d j=%d numTables=%d",
+ hex.Dump(s.tables[j-1].Biggest()), hex.Dump(s.tables[j].Smallest()),
+ s.level, j, numTables)
+ }
+
+ if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 {
+ return errors.Errorf(
+ "Intra: %q vs %q: level=%d j=%d numTables=%d",
+ s.tables[j].Smallest(), s.tables[j].Biggest(), s.level, j, numTables)
+ }
+ }
+ return nil
+}
+
+// func (s *KV) debugPrintMore() { s.lc.debugPrintMore() }
+
+// // debugPrintMore shows key ranges of each level.
+// func (s *levelsController) debugPrintMore() {
+// s.Lock()
+// defer s.Unlock()
+// for i := 0; i < s.kv.opt.MaxLevels; i++ {
+// s.levels[i].debugPrintMore()
+// }
+// }
+
+// func (s *levelHandler) debugPrintMore() {
+// s.RLock()
+// defer s.RUnlock()
+// s.elog.Printf("Level %d:", s.level)
+// for _, t := range s.tables {
+// y.Printf(" [%s, %s]", t.Smallest(), t.Biggest())
+// }
+// y.Printf("\n")
+// }
+
+// reserveFileID reserves a unique file id.
+func (s *levelsController) reserveFileID() uint64 {
+ id := atomic.AddUint64(&s.nextFileID, 1)
+ return id - 1
+}
+
+func getIDMap(dir string) map[uint64]struct{} {
+ fileInfos, err := ioutil.ReadDir(dir)
+ y.Check(err)
+ idMap := make(map[uint64]struct{})
+ for _, info := range fileInfos {
+ if info.IsDir() {
+ continue
+ }
+ fileID, ok := table.ParseFileID(info.Name())
+ if !ok {
+ continue
+ }
+ idMap[fileID] = struct{}{}
+ }
+ return idMap
+}
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
diff --git a/vendor/github.com/dgraph-io/badger/value.go b/vendor/github.com/dgraph-io/badger/value.go
new file mode 100644
index 000000000..f57f1b3ba
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/value.go
@@ -0,0 +1,1455 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package badger
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "io/ioutil"
+ "math"
+ "math/rand"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/dgraph-io/badger/options"
+ "github.com/dgraph-io/badger/y"
+ "github.com/pkg/errors"
+ "golang.org/x/net/trace"
+)
+
+// Values have their first byte being byteData or byteDelete. This helps us distinguish between
+// a key that has never been seen and a key that has been explicitly deleted.
+const (
+ bitDelete byte = 1 << 0 // Set if the key has been deleted.
+ bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key.
+ bitDiscardEarlierVersions byte = 1 << 2 // Set if earlier versions can be discarded.
+ // Set if item shouldn't be discarded via compactions (used by merge operator)
+ bitMergeEntry byte = 1 << 3
+ // The MSB 2 bits are for transactions.
+ bitTxn byte = 1 << 6 // Set if the entry is part of a txn.
+ bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log.
+
+ mi int64 = 1 << 20
+
+ // The number of updates after which discard map should be flushed into badger.
+ discardStatsFlushThreshold = 100
+)
+
+type logFile struct {
+ path string
+ // This is a lock on the log file. It guards the fd’s value, the file’s
+ // existence and the file’s memory map.
+ //
+ // Use shared ownership when reading/writing the file or memory map, use
+ // exclusive ownership to open/close the descriptor, unmap or remove the file.
+ lock sync.RWMutex
+ fd *os.File
+ fid uint32
+ fmap []byte
+ size uint32
+ loadingMode options.FileLoadingMode
+}
+
+// openReadOnly assumes that we have a write lock on logFile.
+func (lf *logFile) openReadOnly() error {
+ var err error
+ lf.fd, err = os.OpenFile(lf.path, os.O_RDONLY, 0666)
+ if err != nil {
+ return errors.Wrapf(err, "Unable to open %q as RDONLY.", lf.path)
+ }
+
+ fi, err := lf.fd.Stat()
+ if err != nil {
+ return errors.Wrapf(err, "Unable to check stat for %q", lf.path)
+ }
+ y.AssertTrue(fi.Size() <= math.MaxUint32)
+ lf.size = uint32(fi.Size())
+
+ if err = lf.mmap(fi.Size()); err != nil {
+ _ = lf.fd.Close()
+ return y.Wrapf(err, "Unable to map file: %q", fi.Name())
+ }
+
+ return nil
+}
+
+func (lf *logFile) mmap(size int64) (err error) {
+ if lf.loadingMode != options.MemoryMap {
+ // Nothing to do
+ return nil
+ }
+ lf.fmap, err = y.Mmap(lf.fd, false, size)
+ if err == nil {
+ err = y.Madvise(lf.fmap, false) // Disable readahead
+ }
+ return err
+}
+
+func (lf *logFile) munmap() (err error) {
+ if lf.loadingMode != options.MemoryMap {
+ // Nothing to do
+ return nil
+ }
+ if err := y.Munmap(lf.fmap); err != nil {
+ return errors.Wrapf(err, "Unable to munmap value log: %q", lf.path)
+ }
+ return nil
+}
+
+// Acquire lock on mmap/file if you are calling this
+func (lf *logFile) read(p valuePointer, s *y.Slice) (buf []byte, err error) {
+ var nbr int64
+ offset := p.Offset
+ if lf.loadingMode == options.FileIO {
+ buf = s.Resize(int(p.Len))
+ var n int
+ n, err = lf.fd.ReadAt(buf, int64(offset))
+ nbr = int64(n)
+ } else {
+ // Do not convert size to uint32, because the lf.fmap can be of size
+ // 4GB, which overflows the uint32 during conversion to make the size 0,
+ // causing the read to fail with ErrEOF. See issue #585.
+ size := int64(len(lf.fmap))
+ valsz := p.Len
+ if int64(offset) >= size || int64(offset+valsz) > size {
+ err = y.ErrEOF
+ } else {
+ buf = lf.fmap[offset : offset+valsz]
+ nbr = int64(valsz)
+ }
+ }
+ y.NumReads.Add(1)
+ y.NumBytesRead.Add(nbr)
+ return buf, err
+}
+
+func (lf *logFile) doneWriting(offset uint32) error {
+ // Sync before acquiring lock. (We call this from write() and thus know we have shared access
+ // to the fd.)
+ if err := y.FileSync(lf.fd); err != nil {
+ return errors.Wrapf(err, "Unable to sync value log: %q", lf.path)
+ }
+ // Close and reopen the file read-only. Acquire lock because fd will become invalid for a bit.
+ // Acquiring the lock is bad because, while we don't hold the lock for a long time, it forces
+ // one batch of readers wait for the preceding batch of readers to finish.
+ //
+ // If there's a benefit to reopening the file read-only, it might be on Windows. I don't know
+ // what the benefit is. Consider keeping the file read-write, or use fcntl to change
+ // permissions.
+ lf.lock.Lock()
+ defer lf.lock.Unlock()
+ if err := lf.munmap(); err != nil {
+ return err
+ }
+ // TODO: Confirm if we need to run a file sync after truncation.
+ // Truncation must run after unmapping, otherwise Windows would crap itself.
+ if err := lf.fd.Truncate(int64(offset)); err != nil {
+ return errors.Wrapf(err, "Unable to truncate file: %q", lf.path)
+ }
+ if err := lf.fd.Close(); err != nil {
+ return errors.Wrapf(err, "Unable to close value log: %q", lf.path)
+ }
+
+ return lf.openReadOnly()
+}
+
+// You must hold lf.lock to sync()
+func (lf *logFile) sync() error {
+ return y.FileSync(lf.fd)
+}
+
+var errStop = errors.New("Stop iteration")
+var errTruncate = errors.New("Do truncate")
+var errDeleteVlogFile = errors.New("Delete vlog file")
+
+type logEntry func(e Entry, vp valuePointer) error
+
+type safeRead struct {
+ k []byte
+ v []byte
+
+ recordOffset uint32
+}
+
+func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
+ var hbuf [headerBufSize]byte
+ var err error
+
+ hash := crc32.New(y.CastagnoliCrcTable)
+ tee := io.TeeReader(reader, hash)
+ if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
+ return nil, err
+ }
+
+ var h header
+ h.Decode(hbuf[:])
+ if h.klen > uint32(1<<16) { // Key length must be below uint16.
+ return nil, errTruncate
+ }
+ kl := int(h.klen)
+ if cap(r.k) < kl {
+ r.k = make([]byte, 2*kl)
+ }
+ vl := int(h.vlen)
+ if cap(r.v) < vl {
+ r.v = make([]byte, 2*vl)
+ }
+
+ e := &Entry{}
+ e.offset = r.recordOffset
+ e.Key = r.k[:kl]
+ e.Value = r.v[:vl]
+
+ if _, err = io.ReadFull(tee, e.Key); err != nil {
+ if err == io.EOF {
+ err = errTruncate
+ }
+ return nil, err
+ }
+ if _, err = io.ReadFull(tee, e.Value); err != nil {
+ if err == io.EOF {
+ err = errTruncate
+ }
+ return nil, err
+ }
+ var crcBuf [4]byte
+ if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
+ if err == io.EOF {
+ err = errTruncate
+ }
+ return nil, err
+ }
+ crc := binary.BigEndian.Uint32(crcBuf[:])
+ if crc != hash.Sum32() {
+ return nil, errTruncate
+ }
+ e.meta = h.meta
+ e.UserMeta = h.userMeta
+ e.ExpiresAt = h.expiresAt
+ return e, nil
+}
+
+// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
+// Therefore, the kv pair is only valid for the duration of fn call.
+func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
+ fi, err := lf.fd.Stat()
+ if err != nil {
+ return 0, err
+ }
+ if int64(offset) == fi.Size() {
+ // We're at the end of the file already. No need to do anything.
+ return offset, nil
+ }
+ if vlog.opt.ReadOnly {
+ // We're not at the end of the file. We'd need to replay the entries, or
+ // possibly truncate the file.
+ return 0, ErrReplayNeeded
+ }
+
+ // We're not at the end of the file. Let's Seek to the offset and start reading.
+ if _, err := lf.fd.Seek(int64(offset), io.SeekStart); err != nil {
+ return 0, errFile(err, lf.path, "Unable to seek")
+ }
+
+ reader := bufio.NewReader(lf.fd)
+ read := &safeRead{
+ k: make([]byte, 10),
+ v: make([]byte, 10),
+ recordOffset: offset,
+ }
+
+ var lastCommit uint64
+ var validEndOffset uint32
+ for {
+ e, err := read.Entry(reader)
+ if err == io.EOF {
+ break
+ } else if err == io.ErrUnexpectedEOF || err == errTruncate {
+ break
+ } else if err != nil {
+ return 0, err
+ } else if e == nil {
+ continue
+ }
+
+ var vp valuePointer
+ vp.Len = uint32(headerBufSize + len(e.Key) + len(e.Value) + crc32.Size)
+ read.recordOffset += vp.Len
+
+ vp.Offset = e.offset
+ vp.Fid = lf.fid
+
+ if e.meta&bitTxn > 0 {
+ txnTs := y.ParseTs(e.Key)
+ if lastCommit == 0 {
+ lastCommit = txnTs
+ }
+ if lastCommit != txnTs {
+ break
+ }
+
+ } else if e.meta&bitFinTxn > 0 {
+ txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
+ if err != nil || lastCommit != txnTs {
+ break
+ }
+ // Got the end of txn. Now we can store them.
+ lastCommit = 0
+ validEndOffset = read.recordOffset
+
+ } else {
+ if lastCommit != 0 {
+ // This is most likely an entry which was moved as part of GC.
+ // We shouldn't get this entry in the middle of a transaction.
+ break
+ }
+ validEndOffset = read.recordOffset
+ }
+
+ if err := fn(*e, vp); err != nil {
+ if err == errStop {
+ break
+ }
+ return 0, errFile(err, lf.path, "Iteration function")
+ }
+ }
+ return validEndOffset, nil
+}
+
+func (vlog *valueLog) rewrite(f *logFile, tr trace.Trace) error {
+ maxFid := atomic.LoadUint32(&vlog.maxFid)
+ y.AssertTruef(uint32(f.fid) < maxFid, "fid to move: %d. Current max fid: %d", f.fid, maxFid)
+ tr.LazyPrintf("Rewriting fid: %d", f.fid)
+
+ wb := make([]*Entry, 0, 1000)
+ var size int64
+
+ y.AssertTrue(vlog.db != nil)
+ var count, moved int
+ fe := func(e Entry) error {
+ count++
+ if count%100000 == 0 {
+ tr.LazyPrintf("Processing entry %d", count)
+ }
+
+ vs, err := vlog.db.get(e.Key)
+ if err != nil {
+ return err
+ }
+ if discardEntry(e, vs) {
+ return nil
+ }
+
+ // Value is still present in value log.
+ if len(vs.Value) == 0 {
+ return errors.Errorf("Empty value: %+v", vs)
+ }
+ var vp valuePointer
+ vp.Decode(vs.Value)
+
+ if vp.Fid > f.fid {
+ return nil
+ }
+ if vp.Offset > e.offset {
+ return nil
+ }
+ if vp.Fid == f.fid && vp.Offset == e.offset {
+ moved++
+ // This new entry only contains the key, and a pointer to the value.
+ ne := new(Entry)
+ ne.meta = 0 // Remove all bits. Different keyspace doesn't need these bits.
+ ne.UserMeta = e.UserMeta
+
+ // Create a new key in a separate keyspace, prefixed by moveKey. We are not
+ // allowed to rewrite an older version of key in the LSM tree, because then this older
+ // version would be at the top of the LSM tree. To work correctly, reads expect the
+ // latest versions to be at the top, and the older versions at the bottom.
+ if bytes.HasPrefix(e.Key, badgerMove) {
+ ne.Key = append([]byte{}, e.Key...)
+ } else {
+ ne.Key = make([]byte, len(badgerMove)+len(e.Key))
+ n := copy(ne.Key, badgerMove)
+ copy(ne.Key[n:], e.Key)
+ }
+
+ ne.Value = append([]byte{}, e.Value...)
+ wb = append(wb, ne)
+ size += int64(e.estimateSize(vlog.opt.ValueThreshold))
+ if size >= 64*mi {
+ tr.LazyPrintf("request has %d entries, size %d", len(wb), size)
+ if err := vlog.db.batchSet(wb); err != nil {
+ return err
+ }
+ size = 0
+ wb = wb[:0]
+ }
+ } else {
+ vlog.db.opt.Warningf("This entry should have been caught. %+v\n", e)
+ }
+ return nil
+ }
+
+ _, err := vlog.iterate(f, 0, func(e Entry, vp valuePointer) error {
+ return fe(e)
+ })
+ if err != nil {
+ return err
+ }
+
+ tr.LazyPrintf("request has %d entries, size %d", len(wb), size)
+ batchSize := 1024
+ var loops int
+ for i := 0; i < len(wb); {
+ loops++
+ if batchSize == 0 {
+ vlog.db.opt.Warningf("We shouldn't reach batch size of zero.")
+ return ErrNoRewrite
+ }
+ end := i + batchSize
+ if end > len(wb) {
+ end = len(wb)
+ }
+ if err := vlog.db.batchSet(wb[i:end]); err != nil {
+ if err == ErrTxnTooBig {
+ // Decrease the batch size to half.
+ batchSize = batchSize / 2
+ tr.LazyPrintf("Dropped batch size to %d", batchSize)
+ continue
+ }
+ return err
+ }
+ i += batchSize
+ }
+ tr.LazyPrintf("Processed %d entries in %d loops", len(wb), loops)
+ tr.LazyPrintf("Total entries: %d. Moved: %d", count, moved)
+ tr.LazyPrintf("Removing fid: %d", f.fid)
+ var deleteFileNow bool
+ // Entries written to LSM. Remove the older file now.
+ {
+ vlog.filesLock.Lock()
+ // Just a sanity-check.
+ if _, ok := vlog.filesMap[f.fid]; !ok {
+ vlog.filesLock.Unlock()
+ return errors.Errorf("Unable to find fid: %d", f.fid)
+ }
+ if vlog.iteratorCount() == 0 {
+ delete(vlog.filesMap, f.fid)
+ deleteFileNow = true
+ } else {
+ vlog.filesToBeDeleted = append(vlog.filesToBeDeleted, f.fid)
+ }
+ vlog.filesLock.Unlock()
+ }
+
+ if deleteFileNow {
+ if err := vlog.deleteLogFile(f); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (vlog *valueLog) deleteMoveKeysFor(fid uint32, tr trace.Trace) error {
+ db := vlog.db
+ var result []*Entry
+ var count, pointers uint64
+ tr.LazyPrintf("Iterating over move keys to find invalids for fid: %d", fid)
+ err := db.View(func(txn *Txn) error {
+ opt := DefaultIteratorOptions
+ opt.InternalAccess = true
+ opt.PrefetchValues = false
+ itr := txn.NewIterator(opt)
+ defer itr.Close()
+
+ for itr.Seek(badgerMove); itr.ValidForPrefix(badgerMove); itr.Next() {
+ count++
+ item := itr.Item()
+ if item.meta&bitValuePointer == 0 {
+ continue
+ }
+ pointers++
+ var vp valuePointer
+ vp.Decode(item.vptr)
+ if vp.Fid == fid {
+ e := &Entry{Key: y.KeyWithTs(item.Key(), item.Version()), meta: bitDelete}
+ result = append(result, e)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ tr.LazyPrintf("Got error while iterating move keys: %v", err)
+ tr.SetError()
+ return err
+ }
+ tr.LazyPrintf("Num total move keys: %d. Num pointers: %d", count, pointers)
+ tr.LazyPrintf("Number of invalid move keys found: %d", len(result))
+ batchSize := 10240
+ for i := 0; i < len(result); {
+ end := i + batchSize
+ if end > len(result) {
+ end = len(result)
+ }
+ if err := db.batchSet(result[i:end]); err != nil {
+ if err == ErrTxnTooBig {
+ batchSize /= 2
+ tr.LazyPrintf("Dropped batch size to %d", batchSize)
+ continue
+ }
+ tr.LazyPrintf("Error while doing batchSet: %v", err)
+ tr.SetError()
+ return err
+ }
+ i += batchSize
+ }
+ tr.LazyPrintf("Move keys deletion done.")
+ return nil
+}
+
+func (vlog *valueLog) incrIteratorCount() {
+ atomic.AddInt32(&vlog.numActiveIterators, 1)
+}
+
+func (vlog *valueLog) iteratorCount() int {
+ return int(atomic.LoadInt32(&vlog.numActiveIterators))
+}
+
+func (vlog *valueLog) decrIteratorCount() error {
+ num := atomic.AddInt32(&vlog.numActiveIterators, -1)
+ if num != 0 {
+ return nil
+ }
+
+ vlog.filesLock.Lock()
+ lfs := make([]*logFile, 0, len(vlog.filesToBeDeleted))
+ for _, id := range vlog.filesToBeDeleted {
+ lfs = append(lfs, vlog.filesMap[id])
+ delete(vlog.filesMap, id)
+ }
+ vlog.filesToBeDeleted = nil
+ vlog.filesLock.Unlock()
+
+ for _, lf := range lfs {
+ if err := vlog.deleteLogFile(lf); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (vlog *valueLog) deleteLogFile(lf *logFile) error {
+ if lf == nil {
+ return nil
+ }
+ path := vlog.fpath(lf.fid)
+ if err := lf.munmap(); err != nil {
+ _ = lf.fd.Close()
+ return err
+ }
+ if err := lf.fd.Close(); err != nil {
+ return err
+ }
+ return os.Remove(path)
+}
+
+func (vlog *valueLog) dropAll() (int, error) {
+ // We don't want to block dropAll on any pending transactions. So, don't worry about iterator
+ // count.
+ var count int
+ deleteAll := func() error {
+ vlog.filesLock.Lock()
+ defer vlog.filesLock.Unlock()
+ for _, lf := range vlog.filesMap {
+ if err := vlog.deleteLogFile(lf); err != nil {
+ return err
+ }
+ count++
+ }
+ vlog.filesMap = make(map[uint32]*logFile)
+ return nil
+ }
+ if err := deleteAll(); err != nil {
+ return count, err
+ }
+
+ vlog.db.opt.Infof("Value logs deleted. Creating value log file: 0")
+ if _, err := vlog.createVlogFile(0); err != nil {
+ return count, err
+ }
+ atomic.StoreUint32(&vlog.maxFid, 0)
+ return count, nil
+}
+
+// lfDiscardStats keeps track of the amount of data that could be discarded for
+// a given logfile.
+type lfDiscardStats struct {
+ sync.Mutex
+ m map[uint32]int64
+ updatesSinceFlush int
+}
+
+type valueLog struct {
+ dirPath string
+ elog trace.EventLog
+
+ // guards our view of which files exist, which to be deleted, how many active iterators
+ filesLock sync.RWMutex
+ filesMap map[uint32]*logFile
+ filesToBeDeleted []uint32
+ // A refcount of iterators -- when this hits zero, we can delete the filesToBeDeleted.
+ numActiveIterators int32
+
+ db *DB
+ maxFid uint32 // accessed via atomics.
+ writableLogOffset uint32 // read by read, written by write. Must access via atomics.
+ numEntriesWritten uint32
+ opt Options
+
+ garbageCh chan struct{}
+ lfDiscardStats *lfDiscardStats
+}
+
+func vlogFilePath(dirPath string, fid uint32) string {
+ return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
+}
+
+func (vlog *valueLog) fpath(fid uint32) string {
+ return vlogFilePath(vlog.dirPath, fid)
+}
+
+func (vlog *valueLog) populateFilesMap() error {
+ vlog.filesMap = make(map[uint32]*logFile)
+
+ files, err := ioutil.ReadDir(vlog.dirPath)
+ if err != nil {
+ return errFile(err, vlog.dirPath, "Unable to open log dir.")
+ }
+
+ found := make(map[uint64]struct{})
+ for _, file := range files {
+ if !strings.HasSuffix(file.Name(), ".vlog") {
+ continue
+ }
+ fsz := len(file.Name())
+ fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
+ if err != nil {
+ return errFile(err, file.Name(), "Unable to parse log id.")
+ }
+ if _, ok := found[fid]; ok {
+ return errFile(err, file.Name(), "Duplicate file found. Please delete one.")
+ }
+ found[fid] = struct{}{}
+
+ lf := &logFile{
+ fid: uint32(fid),
+ path: vlog.fpath(uint32(fid)),
+ loadingMode: vlog.opt.ValueLogLoadingMode,
+ }
+ vlog.filesMap[uint32(fid)] = lf
+ if vlog.maxFid < uint32(fid) {
+ vlog.maxFid = uint32(fid)
+ }
+ }
+ return nil
+}
+
+func (vlog *valueLog) createVlogFile(fid uint32) (*logFile, error) {
+ path := vlog.fpath(fid)
+ lf := &logFile{
+ fid: fid,
+ path: path,
+ loadingMode: vlog.opt.ValueLogLoadingMode,
+ }
+ // writableLogOffset is only written by write func, by read by Read func.
+ // To avoid a race condition, all reads and updates to this variable must be
+ // done via atomics.
+ atomic.StoreUint32(&vlog.writableLogOffset, 0)
+ vlog.numEntriesWritten = 0
+
+ var err error
+ if lf.fd, err = y.CreateSyncedFile(path, vlog.opt.SyncWrites); err != nil {
+ return nil, errFile(err, lf.path, "Create value log file")
+ }
+ if err = syncDir(vlog.dirPath); err != nil {
+ return nil, errFile(err, vlog.dirPath, "Sync value log dir")
+ }
+ if err = lf.mmap(2 * vlog.opt.ValueLogFileSize); err != nil {
+ return nil, errFile(err, lf.path, "Mmap value log file")
+ }
+
+ vlog.filesLock.Lock()
+ vlog.filesMap[fid] = lf
+ vlog.filesLock.Unlock()
+
+ return lf, nil
+}
+
+func errFile(err error, path string, msg string) error {
+ return fmt.Errorf("%s. Path=%s. Error=%v", msg, path, err)
+}
+
+func (vlog *valueLog) replayLog(lf *logFile, offset uint32, replayFn logEntry) error {
+ var err error
+ mode := os.O_RDONLY
+ if vlog.opt.Truncate {
+ // We should open the file in RW mode, so it can be truncated.
+ mode = os.O_RDWR
+ }
+ lf.fd, err = os.OpenFile(lf.path, mode, 0)
+ if err != nil {
+ return errFile(err, lf.path, "Open file")
+ }
+ defer lf.fd.Close()
+
+ fi, err := lf.fd.Stat()
+ if err != nil {
+ return errFile(err, lf.path, "Unable to run file.Stat")
+ }
+
+ // Alright, let's iterate now.
+ endOffset, err := vlog.iterate(lf, offset, replayFn)
+ if err != nil {
+ return errFile(err, lf.path, "Unable to replay logfile")
+ }
+ if int64(endOffset) == fi.Size() {
+ return nil
+ }
+
+ // End offset is different from file size. So, we should truncate the file
+ // to that size.
+ y.AssertTrue(int64(endOffset) <= fi.Size())
+ if !vlog.opt.Truncate {
+ return ErrTruncateNeeded
+ }
+
+ // The entire file should be truncated (i.e. it should be deleted).
+ // If fid == maxFid then it's okay to truncate the entire file since it will be
+ // used for future additions. Also, it's okay if the last file has size zero.
+ // We mmap 2*opt.ValueLogSize for the last file. See vlog.Open() function
+ if endOffset == 0 && lf.fid != vlog.maxFid {
+ return errDeleteVlogFile
+ }
+ if err := lf.fd.Truncate(int64(endOffset)); err != nil {
+ return errFile(err, lf.path, fmt.Sprintf(
+ "Truncation needed at offset %d. Can be done manually as well.", endOffset))
+ }
+ return nil
+}
+
+func (vlog *valueLog) open(db *DB, ptr valuePointer, replayFn logEntry) error {
+ opt := db.opt
+ vlog.opt = opt
+ vlog.dirPath = opt.ValueDir
+ vlog.db = db
+ vlog.elog = trace.NewEventLog("Badger", "Valuelog")
+ vlog.garbageCh = make(chan struct{}, 1) // Only allow one GC at a time.
+ vlog.lfDiscardStats = &lfDiscardStats{m: make(map[uint32]int64)}
+ if err := vlog.populateFilesMap(); err != nil {
+ return err
+ }
+ // If no files are found, then create a new file.
+ if len(vlog.filesMap) == 0 {
+ _, err := vlog.createVlogFile(0)
+ return err
+ }
+
+ fids := vlog.sortedFids()
+ for _, fid := range fids {
+ lf, ok := vlog.filesMap[fid]
+ y.AssertTrue(ok)
+
+ // This file is before the value head pointer. So, we don't need to
+ // replay it, and can just open it in readonly mode.
+ if fid < ptr.Fid {
+ if err := lf.openReadOnly(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ var offset uint32
+ if fid == ptr.Fid {
+ offset = ptr.Offset + ptr.Len
+ }
+ vlog.db.opt.Infof("Replaying file id: %d at offset: %d\n", fid, offset)
+ now := time.Now()
+ // Replay and possible truncation done. Now we can open the file as per
+ // user specified options.
+ if err := vlog.replayLog(lf, offset, replayFn); err != nil {
+ // Log file is corrupted. Delete it.
+ if err == errDeleteVlogFile {
+ delete(vlog.filesMap, fid)
+ path := vlog.fpath(lf.fid)
+ if err := os.Remove(path); err != nil {
+ return y.Wrapf(err, "failed to delete empty value log file: %q", path)
+ }
+ continue
+ }
+ return err
+ }
+ vlog.db.opt.Infof("Replay took: %s\n", time.Since(now))
+
+ if fid < vlog.maxFid {
+ if err := lf.openReadOnly(); err != nil {
+ return err
+ }
+ } else {
+ var flags uint32
+ switch {
+ case vlog.opt.ReadOnly:
+ // If we have read only, we don't need SyncWrites.
+ flags |= y.ReadOnly
+ case vlog.opt.SyncWrites:
+ flags |= y.Sync
+ }
+ var err error
+ if lf.fd, err = y.OpenExistingFile(vlog.fpath(fid), flags); err != nil {
+ return errFile(err, lf.path, "Open existing file")
+ }
+ }
+ }
+
+ // Seek to the end to start writing.
+ last, ok := vlog.filesMap[vlog.maxFid]
+ y.AssertTrue(ok)
+ lastOffset, err := last.fd.Seek(0, io.SeekEnd)
+ if err != nil {
+ return errFile(err, last.path, "file.Seek to end")
+ }
+ vlog.writableLogOffset = uint32(lastOffset)
+
+ // Update the head to point to the updated tail. Otherwise, even after doing a successful
+ // replay and closing the DB, the value log head does not get updated, which causes the replay
+ // to happen repeatedly.
+ vlog.db.vhead = valuePointer{Fid: vlog.maxFid, Offset: uint32(lastOffset)}
+
+ // Map the file if needed. When we create a file, it is automatically mapped.
+ if err = last.mmap(2 * opt.ValueLogFileSize); err != nil {
+ return errFile(err, last.path, "Map log file")
+ }
+ if err := vlog.populateDiscardStats(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (vlog *valueLog) Close() error {
+ vlog.elog.Printf("Stopping garbage collection of values.")
+ defer vlog.elog.Finish()
+
+ var err error
+ for id, f := range vlog.filesMap {
+ f.lock.Lock() // We won’t release the lock.
+ if munmapErr := f.munmap(); munmapErr != nil && err == nil {
+ err = munmapErr
+ }
+
+ maxFid := atomic.LoadUint32(&vlog.maxFid)
+ if !vlog.opt.ReadOnly && id == maxFid {
+ // truncate writable log file to correct offset.
+ if truncErr := f.fd.Truncate(
+ int64(vlog.woffset())); truncErr != nil && err == nil {
+ err = truncErr
+ }
+ }
+
+ if closeErr := f.fd.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ }
+ return err
+}
+
+// sortedFids returns the file id's not pending deletion, sorted. Assumes we have shared access to
+// filesMap.
+func (vlog *valueLog) sortedFids() []uint32 {
+ toBeDeleted := make(map[uint32]struct{})
+ for _, fid := range vlog.filesToBeDeleted {
+ toBeDeleted[fid] = struct{}{}
+ }
+ ret := make([]uint32, 0, len(vlog.filesMap))
+ for fid := range vlog.filesMap {
+ if _, ok := toBeDeleted[fid]; !ok {
+ ret = append(ret, fid)
+ }
+ }
+ sort.Slice(ret, func(i, j int) bool {
+ return ret[i] < ret[j]
+ })
+ return ret
+}
+
+type request struct {
+ // Input values
+ Entries []*Entry
+ // Output values and wait group stuff below
+ Ptrs []valuePointer
+ Wg sync.WaitGroup
+ Err error
+ ref int32
+}
+
+func (req *request) IncrRef() {
+ atomic.AddInt32(&req.ref, 1)
+}
+
+func (req *request) DecrRef() {
+ nRef := atomic.AddInt32(&req.ref, -1)
+ if nRef > 0 {
+ return
+ }
+ req.Entries = nil
+ requestPool.Put(req)
+}
+
+func (req *request) Wait() error {
+ req.Wg.Wait()
+ err := req.Err
+ req.DecrRef() // DecrRef after writing to DB.
+ return err
+}
+
+type requests []*request
+
+func (reqs requests) DecrRef() {
+ for _, req := range reqs {
+ req.DecrRef()
+ }
+}
+
+// sync function syncs content of latest value log file to disk. Syncing of value log directory is
+// not required here as it happens every time a value log file rotation happens(check createVlogFile
+// function). During rotation, previous value log file also gets synced to disk. It only syncs file
+// if fid >= vlog.maxFid. In some cases such as replay(while openning db), it might be called with
+// fid < vlog.maxFid. To sync irrespective of file id just call it with math.MaxUint32.
+func (vlog *valueLog) sync(fid uint32) error {
+ if vlog.opt.SyncWrites {
+ return nil
+ }
+
+ vlog.filesLock.RLock()
+ maxFid := atomic.LoadUint32(&vlog.maxFid)
+ // During replay it is possible to get sync call with fid less than maxFid.
+ // Because older file has already been synced, we can return from here.
+ if fid < maxFid || len(vlog.filesMap) == 0 {
+ vlog.filesLock.RUnlock()
+ return nil
+ }
+ curlf := vlog.filesMap[maxFid]
+ // Sometimes it is possible that vlog.maxFid has been increased but file creation
+ // with same id is still in progress and this function is called. In those cases
+ // entry for the file might not be present in vlog.filesMap.
+ if curlf == nil {
+ vlog.filesLock.RUnlock()
+ return nil
+ }
+ curlf.lock.RLock()
+ vlog.filesLock.RUnlock()
+
+ err := curlf.sync()
+ curlf.lock.RUnlock()
+ return err
+}
+
+func (vlog *valueLog) woffset() uint32 {
+ return atomic.LoadUint32(&vlog.writableLogOffset)
+}
+
+// write is thread-unsafe by design and should not be called concurrently.
+func (vlog *valueLog) write(reqs []*request) error {
+ vlog.filesLock.RLock()
+ maxFid := atomic.LoadUint32(&vlog.maxFid)
+ curlf := vlog.filesMap[maxFid]
+ vlog.filesLock.RUnlock()
+
+ var buf bytes.Buffer
+ toDisk := func() error {
+ if buf.Len() == 0 {
+ return nil
+ }
+ vlog.elog.Printf("Flushing %d blocks of total size: %d", len(reqs), buf.Len())
+ n, err := curlf.fd.Write(buf.Bytes())
+ if err != nil {
+ return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path)
+ }
+ buf.Reset()
+ y.NumWrites.Add(1)
+ y.NumBytesWritten.Add(int64(n))
+ vlog.elog.Printf("Done")
+ atomic.AddUint32(&vlog.writableLogOffset, uint32(n))
+
+ if vlog.woffset() > uint32(vlog.opt.ValueLogFileSize) ||
+ vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries {
+ var err error
+ if err = curlf.doneWriting(vlog.woffset()); err != nil {
+ return err
+ }
+
+ newid := atomic.AddUint32(&vlog.maxFid, 1)
+ y.AssertTruef(newid > 0, "newid has overflown uint32: %v", newid)
+ newlf, err := vlog.createVlogFile(newid)
+ if err != nil {
+ return err
+ }
+ curlf = newlf
+ atomic.AddInt32(&vlog.db.logRotates, 1)
+ }
+ return nil
+ }
+
+ for i := range reqs {
+ b := reqs[i]
+ b.Ptrs = b.Ptrs[:0]
+ var written int
+ for j := range b.Entries {
+ e := b.Entries[j]
+ if e.skipVlog {
+ b.Ptrs = append(b.Ptrs, valuePointer{})
+ continue
+ }
+ var p valuePointer
+
+ p.Fid = curlf.fid
+ // Use the offset including buffer length so far.
+ p.Offset = vlog.woffset() + uint32(buf.Len())
+ plen, err := encodeEntry(e, &buf) // Now encode the entry into buffer.
+ if err != nil {
+ return err
+ }
+ p.Len = uint32(plen)
+ b.Ptrs = append(b.Ptrs, p)
+ written++
+ }
+ vlog.numEntriesWritten += uint32(written)
+ // We write to disk here so that all entries that are part of the same transaction are
+ // written to the same vlog file.
+ writeNow :=
+ vlog.woffset()+uint32(buf.Len()) > uint32(vlog.opt.ValueLogFileSize) ||
+ vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries)
+ if writeNow {
+ if err := toDisk(); err != nil {
+ return err
+ }
+ }
+ }
+ return toDisk()
+}
+
+// Gets the logFile and acquires and RLock() for the mmap. You must call RUnlock on the file
+// (if non-nil)
+func (vlog *valueLog) getFileRLocked(fid uint32) (*logFile, error) {
+ vlog.filesLock.RLock()
+ defer vlog.filesLock.RUnlock()
+ ret, ok := vlog.filesMap[fid]
+ if !ok {
+ // log file has gone away, will need to retry the operation.
+ return nil, ErrRetry
+ }
+ ret.lock.RLock()
+ return ret, nil
+}
+
+// Read reads the value log at a given location.
+// TODO: Make this read private.
+func (vlog *valueLog) Read(vp valuePointer, s *y.Slice) ([]byte, func(), error) {
+ // Check for valid offset if we are reading to writable log.
+ maxFid := atomic.LoadUint32(&vlog.maxFid)
+ if vp.Fid == maxFid && vp.Offset >= vlog.woffset() {
+ return nil, nil, errors.Errorf(
+ "Invalid value pointer offset: %d greater than current offset: %d",
+ vp.Offset, vlog.woffset())
+ }
+
+ buf, cb, err := vlog.readValueBytes(vp, s)
+ if err != nil {
+ return nil, cb, err
+ }
+ var h header
+ h.Decode(buf)
+ n := uint32(headerBufSize) + h.klen
+ return buf[n : n+h.vlen], cb, nil
+}
+
+func (vlog *valueLog) readValueBytes(vp valuePointer, s *y.Slice) ([]byte, func(), error) {
+ lf, err := vlog.getFileRLocked(vp.Fid)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ buf, err := lf.read(vp, s)
+ if vlog.opt.ValueLogLoadingMode == options.MemoryMap {
+ return buf, lf.lock.RUnlock, err
+ }
+ // If we are using File I/O we unlock the file immediately
+ // and return an empty function as callback.
+ lf.lock.RUnlock()
+ return buf, nil, err
+}
+
+// Test helper
+func valueBytesToEntry(buf []byte) (e Entry) {
+ var h header
+ h.Decode(buf)
+ n := uint32(headerBufSize)
+
+ e.Key = buf[n : n+h.klen]
+ n += h.klen
+ e.meta = h.meta
+ e.UserMeta = h.userMeta
+ e.Value = buf[n : n+h.vlen]
+ return
+}
+
+func (vlog *valueLog) pickLog(head valuePointer, tr trace.Trace) (files []*logFile) {
+ vlog.filesLock.RLock()
+ defer vlog.filesLock.RUnlock()
+ fids := vlog.sortedFids()
+ if len(fids) <= 1 {
+ tr.LazyPrintf("Only one or less value log file.")
+ return nil
+ } else if head.Fid == 0 {
+ tr.LazyPrintf("Head pointer is at zero.")
+ return nil
+ }
+
+ // Pick a candidate that contains the largest amount of discardable data
+ candidate := struct {
+ fid uint32
+ discard int64
+ }{math.MaxUint32, 0}
+ vlog.lfDiscardStats.Lock()
+ for _, fid := range fids {
+ if fid >= head.Fid {
+ break
+ }
+ if vlog.lfDiscardStats.m[fid] > candidate.discard {
+ candidate.fid = fid
+ candidate.discard = vlog.lfDiscardStats.m[fid]
+ }
+ }
+ vlog.lfDiscardStats.Unlock()
+
+ if candidate.fid != math.MaxUint32 { // Found a candidate
+ tr.LazyPrintf("Found candidate via discard stats: %v", candidate)
+ files = append(files, vlog.filesMap[candidate.fid])
+ } else {
+ tr.LazyPrintf("Could not find candidate via discard stats. Randomly picking one.")
+ }
+
+ // Fallback to randomly picking a log file
+ var idxHead int
+ for i, fid := range fids {
+ if fid == head.Fid {
+ idxHead = i
+ break
+ }
+ }
+ if idxHead == 0 { // Not found or first file
+ tr.LazyPrintf("Could not find any file.")
+ return nil
+ }
+ idx := rand.Intn(idxHead) // Don’t include head.Fid. We pick a random file before it.
+ if idx > 0 {
+ idx = rand.Intn(idx + 1) // Another level of rand to favor smaller fids.
+ }
+ tr.LazyPrintf("Randomly chose fid: %d", fids[idx])
+ files = append(files, vlog.filesMap[fids[idx]])
+ return files
+}
+
+func discardEntry(e Entry, vs y.ValueStruct) bool {
+ if vs.Version != y.ParseTs(e.Key) {
+ // Version not found. Discard.
+ return true
+ }
+ if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
+ return true
+ }
+ if (vs.Meta & bitValuePointer) == 0 {
+ // Key also stores the value in LSM. Discard.
+ return true
+ }
+ if (vs.Meta & bitFinTxn) > 0 {
+ // Just a txn finish entry. Discard.
+ return true
+ }
+ return false
+}
+
+func (vlog *valueLog) doRunGC(lf *logFile, discardRatio float64, tr trace.Trace) (err error) {
+ // Update stats before exiting
+ defer func() {
+ if err == nil {
+ vlog.lfDiscardStats.Lock()
+ delete(vlog.lfDiscardStats.m, lf.fid)
+ vlog.lfDiscardStats.Unlock()
+ }
+ }()
+
+ type reason struct {
+ total float64
+ discard float64
+ count int
+ }
+
+ fi, err := lf.fd.Stat()
+ if err != nil {
+ tr.LazyPrintf("Error while finding file size: %v", err)
+ tr.SetError()
+ return err
+ }
+
+ // Set up the sampling window sizes.
+ sizeWindow := float64(fi.Size()) * 0.1 // 10% of the file as window.
+ sizeWindowM := sizeWindow / (1 << 20) // in MBs.
+ countWindow := int(float64(vlog.opt.ValueLogMaxEntries) * 0.01) // 1% of num entries.
+ tr.LazyPrintf("Size window: %5.2f. Count window: %d.", sizeWindow, countWindow)
+
+ // Pick a random start point for the log.
+ skipFirstM := float64(rand.Int63n(fi.Size())) // Pick a random starting location.
+ skipFirstM -= sizeWindow // Avoid hitting EOF by moving back by window.
+ skipFirstM /= float64(mi) // Convert to MBs.
+ tr.LazyPrintf("Skip first %5.2f MB of file of size: %d MB", skipFirstM, fi.Size()/mi)
+ var skipped float64
+
+ var r reason
+ start := time.Now()
+ y.AssertTrue(vlog.db != nil)
+ s := new(y.Slice)
+ var numIterations int
+ _, err = vlog.iterate(lf, 0, func(e Entry, vp valuePointer) error {
+ numIterations++
+ esz := float64(vp.Len) / (1 << 20) // in MBs.
+ if skipped < skipFirstM {
+ skipped += esz
+ return nil
+ }
+
+ // Sample until we reach the window sizes or exceed 10 seconds.
+ if r.count > countWindow {
+ tr.LazyPrintf("Stopping sampling after %d entries.", countWindow)
+ return errStop
+ }
+ if r.total > sizeWindowM {
+ tr.LazyPrintf("Stopping sampling after reaching window size.")
+ return errStop
+ }
+ if time.Since(start) > 10*time.Second {
+ tr.LazyPrintf("Stopping sampling after 10 seconds.")
+ return errStop
+ }
+ r.total += esz
+ r.count++
+
+ vs, err := vlog.db.get(e.Key)
+ if err != nil {
+ return err
+ }
+ if discardEntry(e, vs) {
+ r.discard += esz
+ return nil
+ }
+
+ // Value is still present in value log.
+ y.AssertTrue(len(vs.Value) > 0)
+ vp.Decode(vs.Value)
+
+ if vp.Fid > lf.fid {
+ // Value is present in a later log. Discard.
+ r.discard += esz
+ return nil
+ }
+ if vp.Offset > e.offset {
+ // Value is present in a later offset, but in the same log.
+ r.discard += esz
+ return nil
+ }
+ if vp.Fid == lf.fid && vp.Offset == e.offset {
+ // This is still the active entry. This would need to be rewritten.
+
+ } else {
+ vlog.elog.Printf("Reason=%+v\n", r)
+
+ buf, cb, err := vlog.readValueBytes(vp, s)
+ if err != nil {
+ return errStop
+ }
+ ne := valueBytesToEntry(buf)
+ ne.offset = vp.Offset
+ ne.print("Latest Entry Header in LSM")
+ e.print("Latest Entry in Log")
+ runCallback(cb)
+ return errors.Errorf("This shouldn't happen. Latest Pointer:%+v. Meta:%v.",
+ vp, vs.Meta)
+ }
+ return nil
+ })
+
+ if err != nil {
+ tr.LazyPrintf("Error while iterating for RunGC: %v", err)
+ tr.SetError()
+ return err
+ }
+ tr.LazyPrintf("Fid: %d. Skipped: %5.2fMB Num iterations: %d. Data status=%+v\n",
+ lf.fid, skipped, numIterations, r)
+
+ // If we couldn't sample at least a 1000 KV pairs or at least 75% of the window size,
+ // and what we can discard is below the threshold, we should skip the rewrite.
+ if (r.count < countWindow && r.total < sizeWindowM*0.75) || r.discard < discardRatio*r.total {
+ tr.LazyPrintf("Skipping GC on fid: %d", lf.fid)
+ return ErrNoRewrite
+ }
+ if err = vlog.rewrite(lf, tr); err != nil {
+ return err
+ }
+ tr.LazyPrintf("Done rewriting.")
+ return nil
+}
+
+func (vlog *valueLog) waitOnGC(lc *y.Closer) {
+ defer lc.Done()
+
+ <-lc.HasBeenClosed() // Wait for lc to be closed.
+
+ // Block any GC in progress to finish, and don't allow any more writes to runGC by filling up
+ // the channel of size 1.
+ vlog.garbageCh <- struct{}{}
+}
+
+func (vlog *valueLog) runGC(discardRatio float64, head valuePointer) error {
+ select {
+ case vlog.garbageCh <- struct{}{}:
+ // Pick a log file for GC.
+ tr := trace.New("Badger.ValueLog", "GC")
+ tr.SetMaxEvents(100)
+ defer func() {
+ tr.Finish()
+ <-vlog.garbageCh
+ }()
+
+ var err error
+ files := vlog.pickLog(head, tr)
+ if len(files) == 0 {
+ tr.LazyPrintf("PickLog returned zero results.")
+ return ErrNoRewrite
+ }
+ tried := make(map[uint32]bool)
+ for _, lf := range files {
+ if _, done := tried[lf.fid]; done {
+ continue
+ }
+ tried[lf.fid] = true
+ err = vlog.doRunGC(lf, discardRatio, tr)
+ if err == nil {
+ return vlog.deleteMoveKeysFor(lf.fid, tr)
+ }
+ }
+ return err
+ default:
+ return ErrRejected
+ }
+}
+
+func (vlog *valueLog) updateDiscardStats(stats map[uint32]int64) error {
+ vlog.lfDiscardStats.Lock()
+ for fid, sz := range stats {
+ vlog.lfDiscardStats.m[fid] += sz
+ vlog.lfDiscardStats.updatesSinceFlush++
+ }
+ vlog.lfDiscardStats.Unlock()
+ if vlog.lfDiscardStats.updatesSinceFlush > discardStatsFlushThreshold {
+ if err := vlog.flushDiscardStats(); err != nil {
+ return err
+ }
+ vlog.lfDiscardStats.updatesSinceFlush = 0
+ }
+ return nil
+}
+
+// flushDiscardStats inserts discard stats into badger. Returns error on failure.
+func (vlog *valueLog) flushDiscardStats() error {
+ if len(vlog.lfDiscardStats.m) == 0 {
+ return nil
+ }
+ entries := []*Entry{{
+ Key: y.KeyWithTs(lfDiscardStatsKey, 1),
+ Value: vlog.encodedDiscardStats(),
+ }}
+ req, err := vlog.db.sendToWriteCh(entries)
+ if err != nil {
+ return errors.Wrapf(err, "failed to push discard stats to write channel")
+ }
+ return req.Wait()
+}
+
+// encodedDiscardStats returns []byte representation of lfDiscardStats
+// This will be called while storing stats in BadgerDB
+func (vlog *valueLog) encodedDiscardStats() []byte {
+ vlog.lfDiscardStats.Lock()
+ defer vlog.lfDiscardStats.Unlock()
+
+ encodedStats, _ := json.Marshal(vlog.lfDiscardStats.m)
+ return encodedStats
+}
+
+// populateDiscardStats populates vlog.lfDiscardStats
+// This function will be called while initializing valueLog
+func (vlog *valueLog) populateDiscardStats() error {
+ discardStatsKey := y.KeyWithTs(lfDiscardStatsKey, math.MaxUint64)
+ vs, err := vlog.db.get(discardStatsKey)
+ if err != nil {
+ return err
+ }
+
+ // check if value is Empty
+ if vs.Value == nil || len(vs.Value) == 0 {
+ return nil
+ }
+
+ var statsMap map[uint32]int64
+ // discard map is stored in the vlog file.
+ if vs.Meta&bitValuePointer > 0 {
+ var vp valuePointer
+ vp.Decode(vs.Value)
+ result, cb, err := vlog.Read(vp, new(y.Slice))
+ if err != nil {
+ return errors.Wrapf(err, "failed to read value pointer from vlog file: %+v", vp)
+ }
+ defer runCallback(cb)
+ if err := json.Unmarshal(result, &statsMap); err != nil {
+ return errors.Wrapf(err, "failed to unmarshal discard stats")
+ }
+ } else {
+ if err := json.Unmarshal(vs.Value, &statsMap); err != nil {
+ return errors.Wrapf(err, "failed to unmarshal discard stats")
+ }
+ }
+ vlog.opt.Debugf("Value Log Discard stats: %v", statsMap)
+ vlog.lfDiscardStats = &lfDiscardStats{m: statsMap}
+ return nil
+}
diff --git a/vendor/github.com/dgraph-io/badger/y/error.go b/vendor/github.com/dgraph-io/badger/y/error.go
new file mode 100644
index 000000000..59bb28358
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/error.go
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+// This file contains some functions for error handling. Note that we are moving
+// towards using x.Trace, i.e., rpc tracing using net/tracer. But for now, these
+// functions are useful for simple checks logged on one machine.
+// Some common use cases are:
+// (1) You receive an error from external lib, and would like to check/log fatal.
+// For this, use x.Check, x.Checkf. These will check for err != nil, which is
+// more common in Go. If you want to check for boolean being true, use
+// x.Assert, x.Assertf.
+// (2) You receive an error from external lib, and would like to pass on with some
+// stack trace information. In this case, use x.Wrap or x.Wrapf.
+// (3) You want to generate a new error with stack trace info. Use x.Errorf.
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/pkg/errors"
+)
+
+var debugMode = true
+
+// Check logs fatal if err != nil.
+func Check(err error) {
+ if err != nil {
+ log.Fatalf("%+v", Wrap(err))
+ }
+}
+
+// Check2 acts as convenience wrapper around Check, using the 2nd argument as error.
+func Check2(_ interface{}, err error) {
+ Check(err)
+}
+
+// AssertTrue asserts that b is true. Otherwise, it would log fatal.
+func AssertTrue(b bool) {
+ if !b {
+ log.Fatalf("%+v", errors.Errorf("Assert failed"))
+ }
+}
+
+// AssertTruef is AssertTrue with extra info.
+func AssertTruef(b bool, format string, args ...interface{}) {
+ if !b {
+ log.Fatalf("%+v", errors.Errorf(format, args...))
+ }
+}
+
+// Wrap wraps errors from external lib.
+func Wrap(err error) error {
+ if !debugMode {
+ return err
+ }
+ return errors.Wrap(err, "")
+}
+
+// Wrapf is Wrap with extra info.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if !debugMode {
+ if err == nil {
+ return nil
+ }
+ return fmt.Errorf(format+" error: %+v", append(args, err)...)
+ }
+ return errors.Wrapf(err, format, args...)
+}
diff --git a/vendor/github.com/dgraph-io/badger/y/file_dsync.go b/vendor/github.com/dgraph-io/badger/y/file_dsync.go
new file mode 100644
index 000000000..3f3445e2e
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/file_dsync.go
@@ -0,0 +1,25 @@
+// +build !dragonfly,!freebsd,!windows
+
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+import "golang.org/x/sys/unix"
+
+func init() {
+ datasyncFileFlag = unix.O_DSYNC
+}
diff --git a/vendor/github.com/dgraph-io/badger/y/file_nodsync.go b/vendor/github.com/dgraph-io/badger/y/file_nodsync.go
new file mode 100644
index 000000000..b68be7ab9
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/file_nodsync.go
@@ -0,0 +1,25 @@
+// +build dragonfly freebsd windows
+
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+import "syscall"
+
+func init() {
+ datasyncFileFlag = syscall.O_SYNC
+}
diff --git a/vendor/github.com/dgraph-io/badger/y/file_sync.go b/vendor/github.com/dgraph-io/badger/y/file_sync.go
new file mode 100644
index 000000000..19016ef69
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/file_sync.go
@@ -0,0 +1,28 @@
+// +build !darwin go1.12
+
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+import "os"
+
+// FileSync calls os.File.Sync with the right parameters.
+// This function can be removed once we stop supporting Go 1.11
+// on MacOS.
+//
+// More info: https://golang.org/issue/26650.
+func FileSync(f *os.File) error { return f.Sync() }
diff --git a/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go b/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go
new file mode 100644
index 000000000..01c79f230
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go
@@ -0,0 +1,37 @@
+// +build darwin,!go1.12
+
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+import (
+ "os"
+ "syscall"
+)
+
+// FileSync calls os.File.Sync with the right parameters.
+// This function can be removed once we stop supporting Go 1.11
+// on MacOS.
+//
+// More info: https://golang.org/issue/26650.
+func FileSync(f *os.File) error {
+ _, _, err := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), syscall.F_FULLFSYNC, 0)
+ if err == 0 {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/dgraph-io/badger/y/iterator.go b/vendor/github.com/dgraph-io/badger/y/iterator.go
new file mode 100644
index 000000000..719e8ec8e
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/iterator.go
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+import (
+ "bytes"
+ "container/heap"
+ "encoding/binary"
+
+ "github.com/pkg/errors"
+)
+
+// ValueStruct represents the value info that can be associated with a key, but also the internal
+// Meta field.
+type ValueStruct struct {
+ Meta byte
+ UserMeta byte
+ ExpiresAt uint64
+ Value []byte
+
+ Version uint64 // This field is not serialized. Only for internal usage.
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodedSize is the size of the ValueStruct when encoded
+func (v *ValueStruct) EncodedSize() uint16 {
+ sz := len(v.Value) + 2 // meta, usermeta.
+ if v.ExpiresAt == 0 {
+ return uint16(sz + 1)
+ }
+
+ enc := sizeVarint(v.ExpiresAt)
+ return uint16(sz + enc)
+}
+
+// Decode uses the length of the slice to infer the length of the Value field.
+func (v *ValueStruct) Decode(b []byte) {
+ v.Meta = b[0]
+ v.UserMeta = b[1]
+ var sz int
+ v.ExpiresAt, sz = binary.Uvarint(b[2:])
+ v.Value = b[2+sz:]
+}
+
+// Encode expects a slice of length at least v.EncodedSize().
+func (v *ValueStruct) Encode(b []byte) {
+ b[0] = v.Meta
+ b[1] = v.UserMeta
+ sz := binary.PutUvarint(b[2:], v.ExpiresAt)
+ copy(b[2+sz:], v.Value)
+}
+
+// EncodeTo should be kept in sync with the Encode function above. The reason
+// this function exists is to avoid creating byte arrays per key-value pair in
+// table/builder.go.
+func (v *ValueStruct) EncodeTo(buf *bytes.Buffer) {
+ buf.WriteByte(v.Meta)
+ buf.WriteByte(v.UserMeta)
+ var enc [binary.MaxVarintLen64]byte
+ sz := binary.PutUvarint(enc[:], v.ExpiresAt)
+ buf.Write(enc[:sz])
+ buf.Write(v.Value)
+}
+
+// Iterator is an interface for a basic iterator.
+type Iterator interface {
+ Next()
+ Rewind()
+ Seek(key []byte)
+ Key() []byte
+ Value() ValueStruct
+ Valid() bool
+
+ // All iterators should be closed so that file garbage collection works.
+ Close() error
+}
+
+type elem struct {
+ itr Iterator
+ nice int
+ reversed bool
+}
+
+type elemHeap []*elem
+
+func (eh elemHeap) Len() int { return len(eh) }
+func (eh elemHeap) Swap(i, j int) { eh[i], eh[j] = eh[j], eh[i] }
+func (eh *elemHeap) Push(x interface{}) { *eh = append(*eh, x.(*elem)) }
+func (eh *elemHeap) Pop() interface{} {
+ // Remove the last element, because Go has already swapped 0th elem <-> last.
+ old := *eh
+ n := len(old)
+ x := old[n-1]
+ *eh = old[0 : n-1]
+ return x
+}
+func (eh elemHeap) Less(i, j int) bool {
+ cmp := CompareKeys(eh[i].itr.Key(), eh[j].itr.Key())
+ if cmp < 0 {
+ return !eh[i].reversed
+ }
+ if cmp > 0 {
+ return eh[i].reversed
+ }
+ // The keys are equal. In this case, lower nice take precedence. This is important.
+ return eh[i].nice < eh[j].nice
+}
+
+// MergeIterator merges multiple iterators.
+// NOTE: MergeIterator owns the array of iterators and is responsible for closing them.
+type MergeIterator struct {
+ h elemHeap
+ curKey []byte
+ reversed bool
+
+ all []Iterator
+}
+
+// NewMergeIterator returns a new MergeIterator from a list of Iterators.
+func NewMergeIterator(iters []Iterator, reversed bool) *MergeIterator {
+ m := &MergeIterator{all: iters, reversed: reversed}
+ m.h = make(elemHeap, 0, len(iters))
+ m.initHeap()
+ return m
+}
+
+func (s *MergeIterator) storeKey(smallest Iterator) {
+ if cap(s.curKey) < len(smallest.Key()) {
+ s.curKey = make([]byte, 2*len(smallest.Key()))
+ }
+ s.curKey = s.curKey[:len(smallest.Key())]
+ copy(s.curKey, smallest.Key())
+}
+
+// initHeap checks all iterators and initializes our heap and array of keys.
+// Whenever we reverse direction, we need to run this.
+func (s *MergeIterator) initHeap() {
+ s.h = s.h[:0]
+ for idx, itr := range s.all {
+ if !itr.Valid() {
+ continue
+ }
+ e := &elem{itr: itr, nice: idx, reversed: s.reversed}
+ s.h = append(s.h, e)
+ }
+ heap.Init(&s.h)
+ for len(s.h) > 0 {
+ it := s.h[0].itr
+ if it == nil || !it.Valid() {
+ heap.Pop(&s.h)
+ continue
+ }
+ s.storeKey(s.h[0].itr)
+ break
+ }
+}
+
+// Valid returns whether the MergeIterator is at a valid element.
+func (s *MergeIterator) Valid() bool {
+ if s == nil {
+ return false
+ }
+ if len(s.h) == 0 {
+ return false
+ }
+ return s.h[0].itr.Valid()
+}
+
+// Key returns the key associated with the current iterator
+func (s *MergeIterator) Key() []byte {
+ if len(s.h) == 0 {
+ return nil
+ }
+ return s.h[0].itr.Key()
+}
+
+// Value returns the value associated with the iterator.
+func (s *MergeIterator) Value() ValueStruct {
+ if len(s.h) == 0 {
+ return ValueStruct{}
+ }
+ return s.h[0].itr.Value()
+}
+
+// Next returns the next element. If it is the same as the current key, ignore it.
+func (s *MergeIterator) Next() {
+ if len(s.h) == 0 {
+ return
+ }
+
+ smallest := s.h[0].itr
+ smallest.Next()
+
+ for len(s.h) > 0 {
+ smallest = s.h[0].itr
+ if !smallest.Valid() {
+ heap.Pop(&s.h)
+ continue
+ }
+
+ heap.Fix(&s.h, 0)
+ smallest = s.h[0].itr
+ if smallest.Valid() {
+ if !bytes.Equal(smallest.Key(), s.curKey) {
+ break
+ }
+ smallest.Next()
+ }
+ }
+ if !smallest.Valid() {
+ return
+ }
+ s.storeKey(smallest)
+}
+
+// Rewind seeks to first element (or last element for reverse iterator).
+func (s *MergeIterator) Rewind() {
+ for _, itr := range s.all {
+ itr.Rewind()
+ }
+ s.initHeap()
+}
+
+// Seek brings us to element with key >= given key.
+func (s *MergeIterator) Seek(key []byte) {
+ for _, itr := range s.all {
+ itr.Seek(key)
+ }
+ s.initHeap()
+}
+
+// Close implements y.Iterator
+func (s *MergeIterator) Close() error {
+ for _, itr := range s.all {
+ if err := itr.Close(); err != nil {
+ return errors.Wrap(err, "MergeIterator")
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/dgraph-io/badger/y/metrics.go b/vendor/github.com/dgraph-io/badger/y/metrics.go
new file mode 100644
index 000000000..2de17d100
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/metrics.go
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+import "expvar"
+
+var (
+ // LSMSize has size of the LSM in bytes
+ LSMSize *expvar.Map
+ // VlogSize has size of the value log in bytes
+ VlogSize *expvar.Map
+ // PendingWrites tracks the number of pending writes.
+ PendingWrites *expvar.Map
+
+ // These are cumulative
+
+ // NumReads has cumulative number of reads
+ NumReads *expvar.Int
+ // NumWrites has cumulative number of writes
+ NumWrites *expvar.Int
+ // NumBytesRead has cumulative number of bytes read
+ NumBytesRead *expvar.Int
+ // NumBytesWritten has cumulative number of bytes written
+ NumBytesWritten *expvar.Int
+ // NumLSMGets is number of LMS gets
+ NumLSMGets *expvar.Map
+ // NumLSMBloomHits is number of LMS bloom hits
+ NumLSMBloomHits *expvar.Map
+ // NumGets is number of gets
+ NumGets *expvar.Int
+ // NumPuts is number of puts
+ NumPuts *expvar.Int
+ // NumBlockedPuts is number of blocked puts
+ NumBlockedPuts *expvar.Int
+ // NumMemtableGets is number of memtable gets
+ NumMemtableGets *expvar.Int
+)
+
+// These variables are global and have cumulative values for all kv stores.
+func init() {
+ NumReads = expvar.NewInt("badger_disk_reads_total")
+ NumWrites = expvar.NewInt("badger_disk_writes_total")
+ NumBytesRead = expvar.NewInt("badger_read_bytes")
+ NumBytesWritten = expvar.NewInt("badger_written_bytes")
+ NumLSMGets = expvar.NewMap("badger_lsm_level_gets_total")
+ NumLSMBloomHits = expvar.NewMap("badger_lsm_bloom_hits_total")
+ NumGets = expvar.NewInt("badger_gets_total")
+ NumPuts = expvar.NewInt("badger_puts_total")
+ NumBlockedPuts = expvar.NewInt("badger_blocked_puts_total")
+ NumMemtableGets = expvar.NewInt("badger_memtable_gets_total")
+ LSMSize = expvar.NewMap("badger_lsm_size_bytes")
+ VlogSize = expvar.NewMap("badger_vlog_size_bytes")
+ PendingWrites = expvar.NewMap("badger_pending_writes_total")
+}
diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_unix.go b/vendor/github.com/dgraph-io/badger/y/mmap_unix.go
new file mode 100644
index 000000000..f9203a013
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/mmap_unix.go
@@ -0,0 +1,63 @@
+// +build !windows
+
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Mmap uses the mmap system call to memory-map a file. If writable is true,
+// memory protection of the pages is set so that they may be written to as well.
+func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
+ mtype := unix.PROT_READ
+ if writable {
+ mtype |= unix.PROT_WRITE
+ }
+ return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
+}
+
+// Munmap unmaps a previously mapped slice.
+func Munmap(b []byte) error {
+ return unix.Munmap(b)
+}
+
+// Madvise uses the madvise system call to give advise about the use of memory
+// when using a slice that is memory-mapped to a file. Set the readahead flag to
+// false if page references are expected in random order.
+func Madvise(b []byte, readahead bool) error {
+ flags := unix.MADV_NORMAL
+ if !readahead {
+ flags = unix.MADV_RANDOM
+ }
+ return madvise(b, flags)
+}
+
+// This is required because the unix package does not support the madvise system call on OS X.
+func madvise(b []byte, advice int) (err error) {
+ _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])),
+ uintptr(len(b)), uintptr(advice))
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_windows.go b/vendor/github.com/dgraph-io/badger/y/mmap_windows.go
new file mode 100644
index 000000000..0efb2d0f8
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/mmap_windows.go
@@ -0,0 +1,90 @@
+// +build windows
+
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func Mmap(fd *os.File, write bool, size int64) ([]byte, error) {
+ protect := syscall.PAGE_READONLY
+ access := syscall.FILE_MAP_READ
+
+ if write {
+ protect = syscall.PAGE_READWRITE
+ access = syscall.FILE_MAP_WRITE
+ }
+ fi, err := fd.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ // Truncate the database to the size of the mmap.
+ if fi.Size() < size {
+ if err := fd.Truncate(size); err != nil {
+ return nil, fmt.Errorf("truncate: %s", err)
+ }
+ }
+
+ // Open a file mapping handle.
+ sizelo := uint32(size >> 32)
+ sizehi := uint32(size) & 0xffffffff
+
+ handler, err := syscall.CreateFileMapping(syscall.Handle(fd.Fd()), nil,
+ uint32(protect), sizelo, sizehi, nil)
+ if err != nil {
+ return nil, os.NewSyscallError("CreateFileMapping", err)
+ }
+
+ // Create the memory map.
+ addr, err := syscall.MapViewOfFile(handler, uint32(access), 0, 0, uintptr(size))
+ if addr == 0 {
+ return nil, os.NewSyscallError("MapViewOfFile", err)
+ }
+
+ // Close mapping handle.
+ if err := syscall.CloseHandle(syscall.Handle(handler)); err != nil {
+ return nil, os.NewSyscallError("CloseHandle", err)
+ }
+
+ // Slice memory layout
+ // Copied this snippet from golang/sys package
+ var sl = struct {
+ addr uintptr
+ len int
+ cap int
+ }{addr, int(size), int(size)}
+
+ // Use unsafe to turn sl into a []byte.
+ data := *(*[]byte)(unsafe.Pointer(&sl))
+
+ return data, nil
+}
+
+func Munmap(b []byte) error {
+ return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&b[0])))
+}
+
+func Madvise(b []byte, readahead bool) error {
+ // Do Nothing. We don’t care about this setting on Windows
+ return nil
+}
diff --git a/vendor/github.com/dgraph-io/badger/y/watermark.go b/vendor/github.com/dgraph-io/badger/y/watermark.go
new file mode 100644
index 000000000..10ca00e7e
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/watermark.go
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2016-2018 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+import (
+ "container/heap"
+ "context"
+ "sync/atomic"
+
+ "golang.org/x/net/trace"
+)
+
+type uint64Heap []uint64
+
+func (u uint64Heap) Len() int { return len(u) }
+func (u uint64Heap) Less(i, j int) bool { return u[i] < u[j] }
+func (u uint64Heap) Swap(i, j int) { u[i], u[j] = u[j], u[i] }
+func (u *uint64Heap) Push(x interface{}) { *u = append(*u, x.(uint64)) }
+func (u *uint64Heap) Pop() interface{} {
+ old := *u
+ n := len(old)
+ x := old[n-1]
+ *u = old[0 : n-1]
+ return x
+}
+
+// mark contains one of more indices, along with a done boolean to indicate the
+// status of the index: begin or done. It also contains waiters, who could be
+// waiting for the watermark to reach >= a certain index.
+type mark struct {
+ // Either this is an (index, waiter) pair or (index, done) or (indices, done).
+ index uint64
+ waiter chan struct{}
+ indices []uint64
+ done bool // Set to true if the index is done.
+}
+
+// WaterMark is used to keep track of the minimum un-finished index. Typically, an index k becomes
+// finished or "done" according to a WaterMark once Done(k) has been called
+// 1. as many times as Begin(k) has, AND
+// 2. a positive number of times.
+//
+// An index may also become "done" by calling SetDoneUntil at a time such that it is not
+// inter-mingled with Begin/Done calls.
+//
+// Since doneUntil and lastIndex addresses are passed to sync/atomic packages, we ensure that they
+// are 64-bit aligned by putting them at the beginning of the structure.
+type WaterMark struct {
+ doneUntil uint64
+ lastIndex uint64
+ Name string
+ markCh chan mark
+ elog trace.EventLog
+}
+
+// Init initializes a WaterMark struct. MUST be called before using it.
+func (w *WaterMark) Init(closer *Closer) {
+ w.markCh = make(chan mark, 100)
+ w.elog = trace.NewEventLog("Watermark", w.Name)
+ go w.process(closer)
+}
+
+// Begin sets the last index to the given value.
+func (w *WaterMark) Begin(index uint64) {
+ atomic.StoreUint64(&w.lastIndex, index)
+ w.markCh <- mark{index: index, done: false}
+}
+
+// BeginMany works like Begin but accepts multiple indices.
+func (w *WaterMark) BeginMany(indices []uint64) {
+ atomic.StoreUint64(&w.lastIndex, indices[len(indices)-1])
+ w.markCh <- mark{index: 0, indices: indices, done: false}
+}
+
+// Done sets a single index as done.
+func (w *WaterMark) Done(index uint64) {
+ w.markCh <- mark{index: index, done: true}
+}
+
+// DoneMany works like Done but accepts multiple indices.
+func (w *WaterMark) DoneMany(indices []uint64) {
+ w.markCh <- mark{index: 0, indices: indices, done: true}
+}
+
+// DoneUntil returns the maximum index that has the property that all indices
+// less than or equal to it are done.
+func (w *WaterMark) DoneUntil() uint64 {
+ return atomic.LoadUint64(&w.doneUntil)
+}
+
+// SetDoneUntil sets the maximum index that has the property that all indices
+// less than or equal to it are done.
+func (w *WaterMark) SetDoneUntil(val uint64) {
+ atomic.StoreUint64(&w.doneUntil, val)
+}
+
+// LastIndex returns the last index for which Begin has been called.
+func (w *WaterMark) LastIndex() uint64 {
+ return atomic.LoadUint64(&w.lastIndex)
+}
+
+// WaitForMark waits until the given index is marked as done.
+func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error {
+ if w.DoneUntil() >= index {
+ return nil
+ }
+ waitCh := make(chan struct{})
+ w.markCh <- mark{index: index, waiter: waitCh}
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-waitCh:
+ return nil
+ }
+}
+
+// process is used to process the Mark channel. This is not thread-safe,
+// so only run one goroutine for process. One is sufficient, because
+// all goroutine ops use purely memory and cpu.
+// Each index has to emit atleast one begin watermark in serial order otherwise waiters
+// can get blocked idefinitely. Example: We had an watermark at 100 and a waiter at 101,
+// if no watermark is emitted at index 101 then waiter would get stuck indefinitely as it
+// can't decide whether the task at 101 has decided not to emit watermark or it didn't get
+// scheduled yet.
+func (w *WaterMark) process(closer *Closer) {
+ defer closer.Done()
+
+ var indices uint64Heap
+ // pending maps raft proposal index to the number of pending mutations for this proposal.
+ pending := make(map[uint64]int)
+ waiters := make(map[uint64][]chan struct{})
+
+ heap.Init(&indices)
+ var loop uint64
+
+ processOne := func(index uint64, done bool) {
+ // If not already done, then set. Otherwise, don't undo a done entry.
+ prev, present := pending[index]
+ if !present {
+ heap.Push(&indices, index)
+ }
+
+ delta := 1
+ if done {
+ delta = -1
+ }
+ pending[index] = prev + delta
+
+ loop++
+ if len(indices) > 0 && loop%10000 == 0 {
+ min := indices[0]
+ w.elog.Printf("WaterMark %s: Done entry %4d. Size: %4d Watermark: %-4d Looking for: "+
+ "%-4d. Value: %d\n", w.Name, index, len(indices), w.DoneUntil(), min, pending[min])
+ }
+
+ // Update mark by going through all indices in order; and checking if they have
+ // been done. Stop at the first index, which isn't done.
+ doneUntil := w.DoneUntil()
+ if doneUntil > index {
+ AssertTruef(false, "Name: %s doneUntil: %d. Index: %d", w.Name, doneUntil, index)
+ }
+
+ until := doneUntil
+ loops := 0
+
+ for len(indices) > 0 {
+ min := indices[0]
+ if done := pending[min]; done > 0 {
+ break // len(indices) will be > 0.
+ }
+ // Even if done is called multiple times causing it to become
+ // negative, we should still pop the index.
+ heap.Pop(&indices)
+ delete(pending, min)
+ until = min
+ loops++
+ }
+ for i := doneUntil + 1; i <= until; i++ {
+ toNotify := waiters[i]
+ for _, ch := range toNotify {
+ close(ch)
+ }
+ delete(waiters, i) // Release the memory back.
+ }
+ if until != doneUntil {
+ AssertTrue(atomic.CompareAndSwapUint64(&w.doneUntil, doneUntil, until))
+ w.elog.Printf("%s: Done until %d. Loops: %d\n", w.Name, until, loops)
+ }
+ }
+
+ for {
+ select {
+ case <-closer.HasBeenClosed():
+ return
+ case mark := <-w.markCh:
+ if mark.waiter != nil {
+ doneUntil := atomic.LoadUint64(&w.doneUntil)
+ if doneUntil >= mark.index {
+ close(mark.waiter)
+ } else {
+ ws, ok := waiters[mark.index]
+ if !ok {
+ waiters[mark.index] = []chan struct{}{mark.waiter}
+ } else {
+ waiters[mark.index] = append(ws, mark.waiter)
+ }
+ }
+ } else {
+ if mark.index > 0 {
+ processOne(mark.index, mark.done)
+ }
+ for _, index := range mark.indices {
+ processOne(index, mark.done)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/dgraph-io/badger/y/y.go b/vendor/github.com/dgraph-io/badger/y/y.go
new file mode 100644
index 000000000..4948315a9
--- /dev/null
+++ b/vendor/github.com/dgraph-io/badger/y/y.go
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2017 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package y
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+ "math"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+// ErrEOF indicates an end of file when trying to read from a memory mapped file
+// and encountering the end of slice.
+var ErrEOF = errors.New("End of mapped region")
+
+const (
+ // Sync indicates that O_DSYNC should be set on the underlying file,
+ // ensuring that data writes do not return until the data is flushed
+ // to disk.
+ Sync = 1 << iota
+ // ReadOnly opens the underlying file on a read-only basis.
+ ReadOnly
+)
+
+var (
+ // This is O_DSYNC (datasync) on platforms that support it -- see file_unix.go
+ datasyncFileFlag = 0x0
+
+ // CastagnoliCrcTable is a CRC32 polynomial table
+ CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli)
+
+ // Dummy channel for nil closers.
+ dummyCloserChan = make(chan struct{})
+)
+
+// OpenExistingFile opens an existing file, errors if it doesn't exist.
+func OpenExistingFile(filename string, flags uint32) (*os.File, error) {
+ openFlags := os.O_RDWR
+ if flags&ReadOnly != 0 {
+ openFlags = os.O_RDONLY
+ }
+
+ if flags&Sync != 0 {
+ openFlags |= datasyncFileFlag
+ }
+ return os.OpenFile(filename, openFlags, 0)
+}
+
+// CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed.
+func CreateSyncedFile(filename string, sync bool) (*os.File, error) {
+ flags := os.O_RDWR | os.O_CREATE | os.O_EXCL
+ if sync {
+ flags |= datasyncFileFlag
+ }
+ return os.OpenFile(filename, flags, 0666)
+}
+
+// OpenSyncedFile creates the file if one doesn't exist.
+func OpenSyncedFile(filename string, sync bool) (*os.File, error) {
+ flags := os.O_RDWR | os.O_CREATE
+ if sync {
+ flags |= datasyncFileFlag
+ }
+ return os.OpenFile(filename, flags, 0666)
+}
+
+// OpenTruncFile opens the file with O_RDWR | O_CREATE | O_TRUNC
+func OpenTruncFile(filename string, sync bool) (*os.File, error) {
+ flags := os.O_RDWR | os.O_CREATE | os.O_TRUNC
+ if sync {
+ flags |= datasyncFileFlag
+ }
+ return os.OpenFile(filename, flags, 0666)
+}
+
+// SafeCopy does append(a[:0], src...).
+func SafeCopy(a, src []byte) []byte {
+ return append(a[:0], src...)
+}
+
+// Copy copies a byte slice and returns the copied slice.
+func Copy(a []byte) []byte {
+ b := make([]byte, len(a))
+ copy(b, a)
+ return b
+}
+
+// KeyWithTs generates a new key by appending ts to key.
+func KeyWithTs(key []byte, ts uint64) []byte {
+ out := make([]byte, len(key)+8)
+ copy(out, key)
+ binary.BigEndian.PutUint64(out[len(key):], math.MaxUint64-ts)
+ return out
+}
+
+// ParseTs parses the timestamp from the key bytes.
+func ParseTs(key []byte) uint64 {
+ if len(key) <= 8 {
+ return 0
+ }
+ return math.MaxUint64 - binary.BigEndian.Uint64(key[len(key)-8:])
+}
+
+// CompareKeys checks the key without timestamp and checks the timestamp if keyNoTs
+// is same.
+// a would be sorted higher than aa if we use bytes.compare
+// All keys should have timestamp.
+func CompareKeys(key1, key2 []byte) int {
+ AssertTrue(len(key1) > 8 && len(key2) > 8)
+ if cmp := bytes.Compare(key1[:len(key1)-8], key2[:len(key2)-8]); cmp != 0 {
+ return cmp
+ }
+ return bytes.Compare(key1[len(key1)-8:], key2[len(key2)-8:])
+}
+
+// ParseKey parses the actual key from the key bytes.
+func ParseKey(key []byte) []byte {
+ if key == nil {
+ return nil
+ }
+
+ AssertTrue(len(key) > 8)
+ return key[:len(key)-8]
+}
+
+// SameKey checks for key equality ignoring the version timestamp suffix.
+func SameKey(src, dst []byte) bool {
+ if len(src) != len(dst) {
+ return false
+ }
+ return bytes.Equal(ParseKey(src), ParseKey(dst))
+}
+
+// Slice holds a reusable buf, will reallocate if you request a larger size than ever before.
+// One problem is with n distinct sizes in random order it'll reallocate log(n) times.
+type Slice struct {
+ buf []byte
+}
+
+// Resize reuses the Slice's buffer (or makes a new one) and returns a slice in that buffer of
+// length sz.
+func (s *Slice) Resize(sz int) []byte {
+ if cap(s.buf) < sz {
+ s.buf = make([]byte, sz)
+ }
+ return s.buf[0:sz]
+}
+
+// FixedDuration returns a string representation of the given duration with the
+// hours, minutes, and seconds.
+func FixedDuration(d time.Duration) string {
+ str := fmt.Sprintf("%02ds", int(d.Seconds())%60)
+ if d >= time.Minute {
+ str = fmt.Sprintf("%02dm", int(d.Minutes())%60) + str
+ }
+ if d >= time.Hour {
+ str = fmt.Sprintf("%02dh", int(d.Hours())) + str
+ }
+ return str
+}
+
+// Closer holds the two things we need to close a goroutine and wait for it to finish: a chan
+// to tell the goroutine to shut down, and a WaitGroup with which to wait for it to finish shutting
+// down.
+type Closer struct {
+ closed chan struct{}
+ waiting sync.WaitGroup
+}
+
+// NewCloser constructs a new Closer, with an initial count on the WaitGroup.
+func NewCloser(initial int) *Closer {
+ ret := &Closer{closed: make(chan struct{})}
+ ret.waiting.Add(initial)
+ return ret
+}
+
+// AddRunning Add()'s delta to the WaitGroup.
+func (lc *Closer) AddRunning(delta int) {
+ lc.waiting.Add(delta)
+}
+
+// Signal signals the HasBeenClosed signal.
+func (lc *Closer) Signal() {
+ close(lc.closed)
+}
+
+// HasBeenClosed gets signaled when Signal() is called.
+func (lc *Closer) HasBeenClosed() <-chan struct{} {
+ if lc == nil {
+ return dummyCloserChan
+ }
+ return lc.closed
+}
+
+// Done calls Done() on the WaitGroup.
+func (lc *Closer) Done() {
+ if lc == nil {
+ return
+ }
+ lc.waiting.Done()
+}
+
+// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done
+// calls to balance out.)
+func (lc *Closer) Wait() {
+ lc.waiting.Wait()
+}
+
+// SignalAndWait calls Signal(), then Wait().
+func (lc *Closer) SignalAndWait() {
+ lc.Signal()
+ lc.Wait()
+}
+
+// Throttle allows a limited number of workers to run at a time. It also
+// provides a mechanism to check for errors encountered by workers and wait for
+// them to finish.
+type Throttle struct {
+ once sync.Once
+ wg sync.WaitGroup
+ ch chan struct{}
+ errCh chan error
+ finishErr error
+}
+
+// NewThrottle creates a new throttle with a max number of workers.
+func NewThrottle(max int) *Throttle {
+ return &Throttle{
+ ch: make(chan struct{}, max),
+ errCh: make(chan error, max),
+ }
+}
+
+// Do should be called by workers before they start working. It blocks if there
+// are already maximum number of workers working. If it detects an error from
+// previously Done workers, it would return it.
+func (t *Throttle) Do() error {
+ for {
+ select {
+ case t.ch <- struct{}{}:
+ t.wg.Add(1)
+ return nil
+ case err := <-t.errCh:
+ if err != nil {
+ return err
+ }
+ }
+ }
+}
+
+// Done should be called by workers when they finish working. They can also
+// pass the error status of work done.
+func (t *Throttle) Done(err error) {
+ if err != nil {
+ t.errCh <- err
+ }
+ select {
+ case <-t.ch:
+ default:
+ panic("Throttle Do Done mismatch")
+ }
+ t.wg.Done()
+}
+
+// Finish waits until all workers have finished working. It would return any error passed by Done.
+// If Finish is called multiple time, it will wait for workers to finish only once(first time).
+// From next calls, it will return same error as found on first call.
+func (t *Throttle) Finish() error {
+ t.once.Do(func() {
+ t.wg.Wait()
+ close(t.ch)
+ close(t.errCh)
+ for err := range t.errCh {
+ if err != nil {
+ t.finishErr = err
+ return
+ }
+ }
+ })
+
+ return t.finishErr
+}
diff --git a/vendor/github.com/dgryski/go-farm/.gitignore b/vendor/github.com/dgryski/go-farm/.gitignore
new file mode 100644
index 000000000..36029ab5e
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+*.exe
+*.test
+*.prof
+
+target
diff --git a/vendor/github.com/dgryski/go-farm/.travis.yml b/vendor/github.com/dgryski/go-farm/.travis.yml
new file mode 100644
index 000000000..bc89a55d1
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/.travis.yml
@@ -0,0 +1,39 @@
+language: go
+
+sudo: false
+
+branches:
+ except:
+ - release
+
+branches:
+ only:
+ - master
+ - develop
+ - travis
+
+go:
+ - 1.11.x
+ - 1.12.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+before_install:
+ - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi;
+ - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi;
+ - go get github.com/mattn/goveralls
+
+before_script:
+ - make deps
+
+script:
+ - make qa
+
+after_failure:
+ - cat ./target/test/report.xml
+
+after_success:
+ - if [ "$TRAVIS_GO_VERSION" = "1.9" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi;
diff --git a/vendor/github.com/dgryski/go-farm/LICENSE b/vendor/github.com/dgryski/go-farm/LICENSE
new file mode 100644
index 000000000..3d07f6662
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/LICENSE
@@ -0,0 +1,23 @@
+As this is a highly derivative work, I have placed it under the same license as the original implementation:
+
+Copyright (c) 2014-2017 Damian Gryski
+Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/vendor/github.com/dgryski/go-farm/Makefile b/vendor/github.com/dgryski/go-farm/Makefile
new file mode 100644
index 000000000..c189c95dd
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/Makefile
@@ -0,0 +1,203 @@
+# MAKEFILE
+#
+# @author Nicola Asuni
+# @link https://github.com/dgryski/go-farm
+#
+# This file is intended to be executed in a Linux-compatible system.
+# It also assumes that the project has been cloned in the right path under GOPATH:
+# $GOPATH/src/github.com/dgryski/go-farm
+#
+# ------------------------------------------------------------------------------
+
+# List special make targets that are not associated with files
+.PHONY: help all test format fmtcheck vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan qa deps clean nuke
+
+# Use bash as shell (Note: Ubuntu now uses dash which doesn't support PIPESTATUS).
+SHELL=/bin/bash
+
+# CVS path (path to the parent dir containing the project)
+CVSPATH=github.com/dgryski
+
+# Project owner
+OWNER=dgryski
+
+# Project vendor
+VENDOR=dgryski
+
+# Project name
+PROJECT=go-farm
+
+# Project version
+VERSION=$(shell cat VERSION)
+
+# Name of RPM or DEB package
+PKGNAME=${VENDOR}-${PROJECT}
+
+# Current directory
+CURRENTDIR=$(shell pwd)
+
+# GO lang path
+ifneq ($(GOPATH),)
+ ifeq ($(findstring $(GOPATH),$(CURRENTDIR)),)
+ # the defined GOPATH is not valid
+ GOPATH=
+ endif
+endif
+ifeq ($(GOPATH),)
+ # extract the GOPATH
+ GOPATH=$(firstword $(subst /src/, ,$(CURRENTDIR)))
+endif
+
+# --- MAKE TARGETS ---
+
+# Display general help about this command
+help:
+ @echo ""
+ @echo "$(PROJECT) Makefile."
+ @echo "GOPATH=$(GOPATH)"
+ @echo "The following commands are available:"
+ @echo ""
+ @echo " make qa : Run all the tests"
+ @echo " make test : Run the unit tests"
+ @echo ""
+ @echo " make format : Format the source code"
+ @echo " make fmtcheck : Check if the source code has been formatted"
+ @echo " make vet : Check for suspicious constructs"
+ @echo " make lint : Check for style errors"
+ @echo " make coverage : Generate the coverage report"
+ @echo " make cyclo : Generate the cyclomatic complexity report"
+ @echo " make ineffassign : Detect ineffectual assignments"
+ @echo " make misspell : Detect commonly misspelled words in source files"
+ @echo " make structcheck : Find unused struct fields"
+ @echo " make varcheck : Find unused global variables and constants"
+ @echo " make errcheck : Check that error return values are used"
+ @echo " make gosimple : Suggest code simplifications"
+ @echo " make astscan : GO AST scanner"
+ @echo ""
+ @echo " make docs : Generate source code documentation"
+ @echo ""
+ @echo " make deps : Get the dependencies"
+ @echo " make clean : Remove any build artifact"
+ @echo " make nuke : Deletes any intermediate file"
+ @echo ""
+
+
+# Alias for help target
+all: help
+
+# Run the unit tests
+test:
+ @mkdir -p target/test
+ @mkdir -p target/report
+ GOPATH=$(GOPATH) \
+ go test \
+ -covermode=atomic \
+ -bench=. \
+ -race \
+ -cpuprofile=target/report/cpu.out \
+ -memprofile=target/report/mem.out \
+ -mutexprofile=target/report/mutex.out \
+ -coverprofile=target/report/coverage.out \
+ -v ./... | \
+ tee >(PATH=$(GOPATH)/bin:$(PATH) go-junit-report > target/test/report.xml); \
+ test $${PIPESTATUS[0]} -eq 0
+
+# Format the source code
+format:
+ @find . -type f -name "*.go" -exec gofmt -s -w {} \;
+
+# Check if the source code has been formatted
+fmtcheck:
+ @mkdir -p target
+ @find . -type f -name "*.go" -exec gofmt -s -d {} \; | tee target/format.diff
+ @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; }
+
+# Check for syntax errors
+vet:
+ GOPATH=$(GOPATH) go vet .
+
+# Check for style errors
+lint:
+ GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint .
+
+# Generate the coverage report
+coverage:
+ @mkdir -p target/report
+ GOPATH=$(GOPATH) \
+ go tool cover -html=target/report/coverage.out -o target/report/coverage.html
+
+# Report cyclomatic complexity
+cyclo:
+ @mkdir -p target/report
+ GOPATH=$(GOPATH) gocyclo -avg ./ | tee target/report/cyclo.txt ; test $${PIPESTATUS[0]} -eq 0
+
+# Detect ineffectual assignments
+ineffassign:
+ @mkdir -p target/report
+ GOPATH=$(GOPATH) ineffassign ./ | tee target/report/ineffassign.txt ; test $${PIPESTATUS[0]} -eq 0
+
+# Detect commonly misspelled words in source files
+misspell:
+ @mkdir -p target/report
+ GOPATH=$(GOPATH) misspell -error ./ | tee target/report/misspell.txt ; test $${PIPESTATUS[0]} -eq 0
+
+# Find unused struct fields
+structcheck:
+ @mkdir -p target/report
+ GOPATH=$(GOPATH) structcheck -a ./ | tee target/report/structcheck.txt
+
+# Find unused global variables and constants
+varcheck:
+ @mkdir -p target/report
+ GOPATH=$(GOPATH) varcheck -e ./ | tee target/report/varcheck.txt
+
+# Check that error return values are used
+errcheck:
+ @mkdir -p target/report
+ GOPATH=$(GOPATH) errcheck ./ | tee target/report/errcheck.txt
+
+# Suggest code simplifications
+gosimple:
+ @mkdir -p target/report
+ GOPATH=$(GOPATH) gosimple ./ | tee target/report/gosimple.txt
+
+# AST scanner
+astscan:
+ @mkdir -p target/report
+ GOPATH=$(GOPATH) gas .//*.go | tee target/report/astscan.txt
+
+# Generate source docs
+docs:
+ @mkdir -p target/docs
+ nohup sh -c 'GOPATH=$(GOPATH) godoc -http=127.0.0.1:6060' > target/godoc_server.log 2>&1 &
+ wget --directory-prefix=target/docs/ --execute robots=off --retry-connrefused --recursive --no-parent --adjust-extension --page-requisites --convert-links http://127.0.0.1:6060/pkg/github.com/${VENDOR}/${PROJECT}/ ; kill -9 `lsof -ti :6060`
+ @echo ''${PKGNAME}' Documentation ...' > target/docs/index.html
+
+# Alias to run all quality-assurance checks
+qa: fmtcheck test vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan
+
+# --- INSTALL ---
+
+# Get the dependencies
+deps:
+ GOPATH=$(GOPATH) go get ./...
+ GOPATH=$(GOPATH) go get golang.org/x/lint/golint
+ GOPATH=$(GOPATH) go get github.com/jstemmer/go-junit-report
+ GOPATH=$(GOPATH) go get github.com/axw/gocov/gocov
+ GOPATH=$(GOPATH) go get github.com/fzipp/gocyclo
+ GOPATH=$(GOPATH) go get github.com/gordonklaus/ineffassign
+ GOPATH=$(GOPATH) go get github.com/client9/misspell/cmd/misspell
+ GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/structcheck
+ GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/varcheck
+ GOPATH=$(GOPATH) go get github.com/kisielk/errcheck
+ GOPATH=$(GOPATH) go get honnef.co/go/tools/cmd/gosimple
+ GOPATH=$(GOPATH) go get github.com/GoASTScanner/gas
+
+# Remove any build artifact
+clean:
+ GOPATH=$(GOPATH) go clean ./...
+
+# Deletes any intermediate file
+nuke:
+ rm -rf ./target
+ GOPATH=$(GOPATH) go clean -i ./...
diff --git a/vendor/github.com/dgryski/go-farm/README.md b/vendor/github.com/dgryski/go-farm/README.md
new file mode 100644
index 000000000..dd07d6f99
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/README.md
@@ -0,0 +1,41 @@
+# go-farm
+
+*Google's FarmHash hash functions implemented in Go*
+
+[](https://github.com/dgryski/go-farm/tree/master)
+[](https://travis-ci.org/dgryski/go-farm?branch=master)
+[](https://coveralls.io/github/dgryski/go-farm?branch=master)
+[](https://goreportcard.com/report/github.com/dgryski/go-farm)
+[](http://godoc.org/github.com/dgryski/go-farm)
+
+## Description
+
+FarmHash, a family of hash functions.
+
+This is a (mechanical) translation of the non-SSE4/non-AESNI hash functions from Google's FarmHash (https://github.com/google/farmhash).
+
+
+FarmHash provides hash functions for strings and other data.
+The functions mix the input bits thoroughly but are not suitable for cryptography.
+
+All members of the FarmHash family were designed with heavy reliance on previous work by Jyrki Alakuijala, Austin Appleby, Bob Jenkins, and others.
+
+For more information please consult https://github.com/google/farmhash
+
+
+## Getting started
+
+This application is written in Go language, please refer to the guides in https://golang.org for getting started.
+
+This project include a Makefile that allows you to test and build the project with simple commands.
+To see all available options:
+```bash
+make help
+```
+
+## Running all tests
+
+Before committing the code, please check if it passes all tests using
+```bash
+make qa
+```
diff --git a/vendor/github.com/dgryski/go-farm/VERSION b/vendor/github.com/dgryski/go-farm/VERSION
new file mode 100644
index 000000000..38f77a65b
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/VERSION
@@ -0,0 +1 @@
+2.0.1
diff --git a/vendor/github.com/dgryski/go-farm/asm.go b/vendor/github.com/dgryski/go-farm/asm.go
new file mode 100644
index 000000000..1d69eebb0
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/asm.go
@@ -0,0 +1,898 @@
+// +build ignore
+
+package main
+
+import (
+ "flag"
+
+ . "github.com/mmcloughlin/avo/build"
+ . "github.com/mmcloughlin/avo/operand"
+ . "github.com/mmcloughlin/avo/reg"
+)
+
+const k0 uint64 = 0xc3a5c85c97cb3127
+const k1 uint64 = 0xb492b66fbe98f273
+const k2 uint64 = 0x9ae16a3b2f90404f
+
+const c1 uint32 = 0xcc9e2d51
+const c2 uint32 = 0x1b873593
+
+func shiftMix(val GPVirtual) GPVirtual {
+ r := GP64()
+ MOVQ(val, r)
+ SHRQ(Imm(47), r)
+ XORQ(val, r)
+ return r
+}
+
+func shiftMix64(val uint64) uint64 {
+ return val ^ (val >> 47)
+}
+
+func hashLen16MulLine(a, b, c, d, k, mul GPVirtual) GPVirtual {
+ tmpa := GP64()
+ MOVQ(a, tmpa)
+
+ ADDQ(b, tmpa)
+ RORQ(Imm(43), tmpa)
+ ADDQ(d, tmpa)
+ tmpc := GP64()
+ MOVQ(c, tmpc)
+ RORQ(Imm(30), tmpc)
+ ADDQ(tmpc, tmpa)
+
+ ADDQ(c, a)
+ ADDQ(k, b)
+ RORQ(Imm(18), b)
+ ADDQ(b, a)
+
+ r := hashLen16Mul(tmpa, a, mul)
+ return r
+}
+
+func hashLen16Mul(u, v, mul GPVirtual) GPVirtual {
+ XORQ(v, u)
+ IMULQ(mul, u)
+ a := shiftMix(u)
+
+ XORQ(a, v)
+ IMULQ(mul, v)
+ b := shiftMix(v)
+
+ IMULQ(mul, b)
+
+ return b
+}
+
+func hashLen0to16(sbase, slen GPVirtual) {
+ CMPQ(slen, Imm(8))
+ JL(LabelRef("check4"))
+ {
+ a := GP64()
+ MOVQ(Mem{Base: sbase}, a)
+
+ b := GP64()
+ t := GP64()
+ MOVQ(slen, t)
+ SUBQ(Imm(8), t)
+ ADDQ(sbase, t)
+ MOVQ(Mem{Base: t}, b)
+
+ rk2 := GP64()
+ MOVQ(Imm(k2), rk2)
+
+ ADDQ(rk2, a)
+
+ mul := slen
+ SHLQ(Imm(1), mul)
+ ADDQ(rk2, mul)
+
+ c := GP64()
+ MOVQ(b, c)
+ RORQ(Imm(37), c)
+ IMULQ(mul, c)
+ ADDQ(a, c)
+
+ d := GP64()
+ MOVQ(a, d)
+ RORQ(Imm(25), d)
+ ADDQ(b, d)
+ IMULQ(mul, d)
+
+ r := hashLen16Mul(c, d, mul)
+ Store(r, ReturnIndex(0))
+ RET()
+ }
+
+ Label("check4")
+
+ CMPQ(slen, Imm(4))
+ JL(LabelRef("check0"))
+ {
+ rk2 := GP64()
+ MOVQ(Imm(k2), rk2)
+
+ mul := GP64()
+ MOVQ(slen, mul)
+ SHLQ(Imm(1), mul)
+ ADDQ(rk2, mul)
+
+ a := GP64()
+ MOVL(Mem{Base: sbase}, a.As32())
+
+ SHLQ(Imm(3), a)
+ ADDQ(slen, a)
+
+ b := GP64()
+ SUBQ(Imm(4), slen)
+ ADDQ(slen, sbase)
+ MOVL(Mem{Base: sbase}, b.As32())
+ r := hashLen16Mul(a, b, mul)
+
+ Store(r, ReturnIndex(0))
+ RET()
+ }
+
+ Label("check0")
+ TESTQ(slen, slen)
+ JZ(LabelRef("empty"))
+ {
+
+ a := GP64()
+ MOVBQZX(Mem{Base: sbase}, a)
+
+ base := GP64()
+ MOVQ(slen, base)
+ SHRQ(Imm(1), base)
+
+ b := GP64()
+ ADDQ(sbase, base)
+ MOVBQZX(Mem{Base: base}, b)
+
+ MOVQ(slen, base)
+ SUBQ(Imm(1), base)
+ c := GP64()
+ ADDQ(sbase, base)
+ MOVBQZX(Mem{Base: base}, c)
+
+ SHLQ(Imm(8), b)
+ ADDQ(b, a)
+ y := a
+
+ SHLQ(Imm(2), c)
+ ADDQ(c, slen)
+ z := slen
+
+ rk0 := GP64()
+ MOVQ(Imm(k0), rk0)
+ IMULQ(rk0, z)
+
+ rk2 := GP64()
+ MOVQ(Imm(k2), rk2)
+
+ IMULQ(rk2, y)
+ XORQ(y, z)
+
+ r := shiftMix(z)
+ IMULQ(rk2, r)
+
+ Store(r, ReturnIndex(0))
+ RET()
+ }
+
+ Label("empty")
+
+ ret := GP64()
+ MOVQ(Imm(k2), ret)
+ Store(ret, ReturnIndex(0))
+ RET()
+}
+
+func hashLen17to32(sbase, slen GPVirtual) {
+ mul := GP64()
+ MOVQ(slen, mul)
+ SHLQ(Imm(1), mul)
+
+ rk2 := GP64()
+ MOVQ(Imm(k2), rk2)
+ ADDQ(rk2, mul)
+
+ a := GP64()
+ MOVQ(Mem{Base: sbase}, a)
+
+ rk1 := GP64()
+ MOVQ(Imm(k1), rk1)
+ IMULQ(rk1, a)
+
+ b := GP64()
+ MOVQ(Mem{Base: sbase, Disp: 8}, b)
+
+ base := GP64()
+ MOVQ(slen, base)
+ SUBQ(Imm(16), base)
+ ADDQ(sbase, base)
+
+ c := GP64()
+ MOVQ(Mem{Base: base, Disp: 8}, c)
+ IMULQ(mul, c)
+
+ d := GP64()
+ MOVQ(Mem{Base: base}, d)
+ IMULQ(rk2, d)
+
+ r := hashLen16MulLine(a, b, c, d, rk2, mul)
+ Store(r, ReturnIndex(0))
+ RET()
+}
+
+// Return an 8-byte hash for 33 to 64 bytes.
+func hashLen33to64(sbase, slen GPVirtual) {
+ mul := GP64()
+ MOVQ(slen, mul)
+ SHLQ(Imm(1), mul)
+
+ rk2 := GP64()
+ MOVQ(Imm(k2), rk2)
+ ADDQ(rk2, mul)
+
+ a := GP64()
+ MOVQ(Mem{Base: sbase}, a)
+ IMULQ(rk2, a)
+
+ b := GP64()
+ MOVQ(Mem{Base: sbase, Disp: 8}, b)
+
+ base := GP64()
+ MOVQ(slen, base)
+ SUBQ(Imm(16), base)
+ ADDQ(sbase, base)
+
+ c := GP64()
+ MOVQ(Mem{Base: base, Disp: 8}, c)
+ IMULQ(mul, c)
+
+ d := GP64()
+ MOVQ(Mem{Base: base}, d)
+ IMULQ(rk2, d)
+
+ y := GP64()
+ MOVQ(a, y)
+
+ ADDQ(b, y)
+ RORQ(Imm(43), y)
+ ADDQ(d, y)
+ tmpc := GP64()
+ MOVQ(c, tmpc)
+ RORQ(Imm(30), tmpc)
+ ADDQ(tmpc, y)
+
+ ADDQ(a, c)
+ ADDQ(rk2, b)
+ RORQ(Imm(18), b)
+ ADDQ(b, c)
+
+ tmpy := GP64()
+ MOVQ(y, tmpy)
+ z := hashLen16Mul(tmpy, c, mul)
+
+ e := GP64()
+ MOVQ(Mem{Base: sbase, Disp: 16}, e)
+ IMULQ(mul, e)
+
+ f := GP64()
+ MOVQ(Mem{Base: sbase, Disp: 24}, f)
+
+ base = GP64()
+ MOVQ(slen, base)
+ SUBQ(Imm(32), base)
+ ADDQ(sbase, base)
+ g := GP64()
+ MOVQ(Mem{Base: base}, g)
+ ADDQ(y, g)
+ IMULQ(mul, g)
+
+ h := GP64()
+ MOVQ(Mem{Base: base, Disp: 8}, h)
+ ADDQ(z, h)
+ IMULQ(mul, h)
+
+ r := hashLen16MulLine(e, f, g, h, a, mul)
+ Store(r, ReturnIndex(0))
+ RET()
+}
+
+// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty.
+func weakHashLen32WithSeeds(sbase GPVirtual, disp int, a, b GPVirtual) {
+
+ w := Mem{Base: sbase, Disp: disp + 0}
+ x := Mem{Base: sbase, Disp: disp + 8}
+ y := Mem{Base: sbase, Disp: disp + 16}
+ z := Mem{Base: sbase, Disp: disp + 24}
+
+ // a += w
+ ADDQ(w, a)
+
+ // b = bits.RotateLeft64(b+a+z, -21)
+ ADDQ(a, b)
+ ADDQ(z, b)
+ RORQ(Imm(21), b)
+
+ // c := a
+ c := GP64()
+ MOVQ(a, c)
+
+ // a += x
+ // a += y
+ ADDQ(x, a)
+ ADDQ(y, a)
+
+ // b += bits.RotateLeft64(a, -44)
+ atmp := GP64()
+ MOVQ(a, atmp)
+ RORQ(Imm(44), atmp)
+ ADDQ(atmp, b)
+
+ // a += z
+ // b += c
+ ADDQ(z, a)
+ ADDQ(c, b)
+
+ XCHGQ(a, b)
+}
+
+func hashLoopBody(x, y, z, vlo, vhi, wlo, whi, sbase GPVirtual, mul1 GPVirtual, mul2 uint64) {
+ ADDQ(y, x)
+ ADDQ(vlo, x)
+ ADDQ(Mem{Base: sbase, Disp: 8}, x)
+ RORQ(Imm(37), x)
+
+ IMULQ(mul1, x)
+
+ ADDQ(vhi, y)
+ ADDQ(Mem{Base: sbase, Disp: 48}, y)
+ RORQ(Imm(42), y)
+ IMULQ(mul1, y)
+
+ if mul2 != 1 {
+ t := GP64()
+ MOVQ(U32(mul2), t)
+ IMULQ(whi, t)
+ XORQ(t, x)
+ } else {
+ XORQ(whi, x)
+ }
+
+ if mul2 != 1 {
+ t := GP64()
+ MOVQ(U32(mul2), t)
+ IMULQ(vlo, t)
+ ADDQ(t, y)
+ } else {
+ ADDQ(vlo, y)
+ }
+
+ ADDQ(Mem{Base: sbase, Disp: 40}, y)
+
+ ADDQ(wlo, z)
+ RORQ(Imm(33), z)
+ IMULQ(mul1, z)
+
+ {
+ IMULQ(mul1, vhi)
+ MOVQ(x, vlo)
+ ADDQ(wlo, vlo)
+ weakHashLen32WithSeeds(sbase, 0, vhi, vlo)
+ }
+
+ {
+ ADDQ(z, whi)
+ MOVQ(y, wlo)
+ ADDQ(Mem{Base: sbase, Disp: 16}, wlo)
+ weakHashLen32WithSeeds(sbase, 32, whi, wlo)
+ }
+
+ XCHGQ(z, x)
+}
+
+func fp64() {
+
+ TEXT("Fingerprint64", NOSPLIT, "func(s []byte) uint64")
+
+ slen := GP64()
+ sbase := GP64()
+
+ Load(Param("s").Base(), sbase)
+ Load(Param("s").Len(), slen)
+
+ CMPQ(slen, Imm(16))
+ JG(LabelRef("check32"))
+ hashLen0to16(sbase, slen)
+
+ Label("check32")
+ CMPQ(slen, Imm(32))
+ JG(LabelRef("check64"))
+ hashLen17to32(sbase, slen)
+
+ Label("check64")
+ CMPQ(slen, Imm(64))
+ JG(LabelRef("long"))
+ hashLen33to64(sbase, slen)
+
+ Label("long")
+
+ seed := uint64(81)
+
+ vlo, vhi, wlo, whi := GP64(), GP64(), GP64(), GP64()
+ XORQ(vlo, vlo)
+ XORQ(vhi, vhi)
+ XORQ(wlo, wlo)
+ XORQ(whi, whi)
+
+ x := GP64()
+
+ eightOne := uint64(81)
+
+ MOVQ(Imm(eightOne*k2), x)
+ ADDQ(Mem{Base: sbase}, x)
+
+ y := GP64()
+ y64 := uint64(seed*k1) + 113
+ MOVQ(Imm(y64), y)
+
+ z := GP64()
+ MOVQ(Imm(shiftMix64(y64*k2+113)*k2), z)
+
+ endIdx := GP64()
+ MOVQ(slen, endIdx)
+ tmp := GP64()
+ SUBQ(Imm(1), endIdx)
+ MOVQ(U64(^uint64(63)), tmp)
+ ANDQ(tmp, endIdx)
+ last64Idx := GP64()
+ MOVQ(slen, last64Idx)
+ SUBQ(Imm(1), last64Idx)
+ ANDQ(Imm(63), last64Idx)
+ SUBQ(Imm(63), last64Idx)
+ ADDQ(endIdx, last64Idx)
+
+ last64 := GP64()
+ MOVQ(last64Idx, last64)
+ ADDQ(sbase, last64)
+
+ end := GP64()
+ MOVQ(slen, end)
+
+ Label("loop")
+
+ rk1 := GP64()
+ MOVQ(Imm(k1), rk1)
+
+ hashLoopBody(x, y, z, vlo, vhi, wlo, whi, sbase, rk1, 1)
+
+ ADDQ(Imm(64), sbase)
+ SUBQ(Imm(64), end)
+ CMPQ(end, Imm(64))
+ JG(LabelRef("loop"))
+
+ MOVQ(last64, sbase)
+
+ mul := GP64()
+ MOVQ(z, mul)
+ ANDQ(Imm(0xff), mul)
+ SHLQ(Imm(1), mul)
+ ADDQ(rk1, mul)
+
+ MOVQ(last64, sbase)
+
+ SUBQ(Imm(1), slen)
+ ANDQ(Imm(63), slen)
+ ADDQ(slen, wlo)
+
+ ADDQ(wlo, vlo)
+ ADDQ(vlo, wlo)
+
+ hashLoopBody(x, y, z, vlo, vhi, wlo, whi, sbase, mul, 9)
+
+ {
+ a := hashLen16Mul(vlo, wlo, mul)
+ ADDQ(z, a)
+ b := shiftMix(y)
+ rk0 := GP64()
+ MOVQ(Imm(k0), rk0)
+ IMULQ(rk0, b)
+ ADDQ(b, a)
+
+ c := hashLen16Mul(vhi, whi, mul)
+ ADDQ(x, c)
+
+ r := hashLen16Mul(a, c, mul)
+ Store(r, ReturnIndex(0))
+ }
+
+ RET()
+}
+
+func fmix(h GPVirtual) GPVirtual {
+ h2 := GP32()
+ MOVL(h, h2)
+ SHRL(Imm(16), h2)
+ XORL(h2, h)
+
+ MOVL(Imm(0x85ebca6b), h2)
+ IMULL(h2, h)
+
+ MOVL(h, h2)
+ SHRL(Imm(13), h2)
+ XORL(h2, h)
+
+ MOVL(Imm(0xc2b2ae35), h2)
+ IMULL(h2, h)
+
+ MOVL(h, h2)
+ SHRL(Imm(16), h2)
+ XORL(h2, h)
+ return h
+}
+
+func mur(a, h GPVirtual) GPVirtual {
+ imul3l(c1, a, a)
+ RORL(Imm(17), a)
+ imul3l(c2, a, a)
+ XORL(a, h)
+ RORL(Imm(19), h)
+
+ LEAL(Mem{Base: h, Index: h, Scale: 4}, a)
+ LEAL(Mem{Base: a, Disp: 0xe6546b64}, h)
+
+ return h
+}
+
+func hash32Len5to12(sbase, slen GPVirtual) {
+
+ a := GP32()
+ MOVL(slen.As32(), a)
+ b := GP32()
+ MOVL(a, b)
+ SHLL(Imm(2), b)
+ ADDL(a, b)
+
+ c := GP32()
+ MOVL(U32(9), c)
+
+ d := GP32()
+ MOVL(b, d)
+
+ ADDL(Mem{Base: sbase, Disp: 0}, a)
+
+ t := GP64()
+ MOVQ(slen, t)
+ SUBQ(Imm(4), t)
+ ADDQ(sbase, t)
+ ADDL(Mem{Base: t}, b)
+
+ MOVQ(slen, t)
+ SHRQ(Imm(1), t)
+ ANDQ(Imm(4), t)
+ ADDQ(sbase, t)
+ ADDL(Mem{Base: t}, c)
+
+ t = mur(a, d)
+ t = mur(b, t)
+ t = mur(c, t)
+ t = fmix(t)
+
+ Store(t, ReturnIndex(0))
+ RET()
+}
+
+func hash32Len13to24Seed(sbase, slen GPVirtual) {
+ slen2 := GP64()
+ MOVQ(slen, slen2)
+ SHRQ(Imm(1), slen2)
+ ADDQ(sbase, slen2)
+
+ a := GP32()
+ MOVL(Mem{Base: slen2, Disp: -4}, a)
+
+ b := GP32()
+ MOVL(Mem{Base: sbase, Disp: 4}, b)
+
+ send := GP64()
+ MOVQ(slen, send)
+ ADDQ(sbase, send)
+
+ c := GP32()
+ MOVL(Mem{Base: send, Disp: -8}, c)
+
+ d := GP32()
+ MOVL(Mem{Base: slen2}, d)
+
+ e := GP32()
+ MOVL(Mem{Base: sbase}, e)
+
+ f := GP32()
+ MOVL(Mem{Base: send, Disp: -4}, f)
+
+ h := GP32()
+ MOVL(U32(c1), h)
+ IMULL(d, h)
+ ADDL(slen.As32(), h)
+
+ RORL(Imm(12), a)
+ ADDL(f, a)
+
+ ctmp := GP32()
+ MOVL(c, ctmp)
+ h = mur(ctmp, h)
+ ADDL(a, h)
+
+ RORL(Imm(3), a)
+ ADDL(c, a)
+
+ h = mur(e, h)
+ ADDL(a, h)
+
+ ADDL(f, a)
+ RORL(Imm(12), a)
+ ADDL(d, a)
+
+ h = mur(b, h)
+ ADDL(a, h)
+
+ h = fmix(h)
+
+ Store(h, ReturnIndex(0))
+ RET()
+}
+
+func hash32Len0to4(sbase, slen GPVirtual) {
+ b := GP32()
+ c := GP32()
+
+ XORL(b, b)
+ MOVL(U32(9), c)
+
+ TESTQ(slen, slen)
+ JZ(LabelRef("done"))
+
+ l := GP64()
+ v := GP32()
+ MOVQ(slen, l)
+
+ c1reg := GP32()
+ MOVL(U32(c1), c1reg)
+
+ for i := 0; i < 4; i++ {
+ IMULL(c1reg, b)
+ MOVBLSX(Mem{Base: sbase, Disp: i}, v)
+ ADDL(v, b)
+ XORL(b, c)
+ SUBQ(Imm(1), l)
+ TESTQ(l, l)
+ JZ(LabelRef("done"))
+ }
+
+ Label("done")
+
+ s32 := GP32()
+ MOVL(slen.As32(), s32)
+ r := mur(s32, c)
+ r = mur(b, r)
+ r = fmix(r)
+
+ Store(r, ReturnIndex(0))
+ RET()
+}
+
+func fp32() {
+
+ TEXT("Fingerprint32", NOSPLIT, "func(s []byte) uint32")
+
+ sbase := GP64()
+ slen := GP64()
+
+ Load(Param("s").Base(), sbase)
+ Load(Param("s").Len(), slen)
+
+ CMPQ(slen, Imm(24))
+ JG(LabelRef("long"))
+
+ CMPQ(slen, Imm(12))
+ JG(LabelRef("hash_13_24"))
+
+ CMPQ(slen, Imm(4))
+ JG(LabelRef("hash_5_12"))
+ hash32Len0to4(sbase, slen)
+
+ Label("hash_5_12")
+ hash32Len5to12(sbase, slen)
+
+ Label("hash_13_24")
+ hash32Len13to24Seed(sbase, slen)
+
+ Label("long")
+
+ h := GP32()
+ MOVL(slen.As32(), h)
+
+ g := GP32()
+ MOVL(U32(c1), g)
+ IMULL(h, g)
+
+ f := GP32()
+ MOVL(g, f)
+
+ // len > 24
+
+ send := GP64()
+ MOVQ(slen, send)
+ ADDQ(sbase, send)
+ c1reg := GP32()
+ MOVL(U32(c1), c1reg)
+ c2reg := GP32()
+ MOVL(U32(c2), c2reg)
+
+ shuf := func(r GPVirtual, disp int) {
+ a := GP32()
+ MOVL(Mem{Base: send, Disp: disp}, a)
+ IMULL(c1reg, a)
+ RORL(Imm(17), a)
+ IMULL(c2reg, a)
+ XORL(a, r)
+ RORL(Imm(19), r)
+ MOVL(r, a)
+ SHLL(Imm(2), a)
+ ADDL(a, r)
+ ADDL(Imm(0xe6546b64), r)
+ }
+
+ shuf(h, -4)
+ shuf(g, -8)
+ shuf(h, -16)
+ shuf(g, -12)
+
+ PREFETCHT0(Mem{Base: sbase})
+ {
+ a := GP32()
+ MOVL(Mem{Base: send, Disp: -20}, a)
+ IMULL(c1reg, a)
+ RORL(Imm(17), a)
+ IMULL(c2reg, a)
+
+ ADDL(a, f)
+ RORL(Imm(19), f)
+ ADDL(Imm(113), f)
+
+ }
+
+ loop32Body := func(f, g, h, sbase, slen GPVirtual, disp int) {
+ a, b, c, d, e := GP32(), GP32(), GP32(), GP32(), GP32()
+
+ MOVL(Mem{Base: sbase, Disp: disp + 0}, a)
+ ADDL(a, h)
+
+ MOVL(Mem{Base: sbase, Disp: disp + 4}, b)
+ ADDL(b, g)
+
+ MOVL(Mem{Base: sbase, Disp: disp + 8}, c)
+ ADDL(c, f)
+
+ MOVL(Mem{Base: sbase, Disp: disp + 12}, d)
+ t := GP32()
+ MOVL(d, t)
+ h = mur(t, h)
+
+ MOVL(Mem{Base: sbase, Disp: disp + 16}, e)
+ ADDL(e, h)
+
+ MOVL(c, t)
+ g = mur(t, g)
+ ADDL(a, g)
+
+ imul3l(c1, e, t)
+ ADDL(b, t)
+ f = mur(t, f)
+ ADDL(d, f)
+
+ ADDL(g, f)
+ ADDL(f, g)
+ }
+
+ Label("loop80")
+ CMPQ(slen, Imm(80+20))
+ JL(LabelRef("loop20"))
+ {
+ PREFETCHT0(Mem{Base: sbase, Disp: 20})
+ loop32Body(f, g, h, sbase, slen, 0)
+ PREFETCHT0(Mem{Base: sbase, Disp: 40})
+ loop32Body(f, g, h, sbase, slen, 20)
+ PREFETCHT0(Mem{Base: sbase, Disp: 60})
+ loop32Body(f, g, h, sbase, slen, 40)
+ PREFETCHT0(Mem{Base: sbase, Disp: 80})
+ loop32Body(f, g, h, sbase, slen, 60)
+
+ ADDQ(Imm(80), sbase)
+ SUBQ(Imm(80), slen)
+ JMP(LabelRef("loop80"))
+ }
+
+ Label("loop20")
+ CMPQ(slen, Imm(20))
+ JLE(LabelRef("after"))
+ {
+ loop32Body(f, g, h, sbase, slen, 0)
+
+ ADDQ(Imm(20), sbase)
+ SUBQ(Imm(20), slen)
+ JMP(LabelRef("loop20"))
+ }
+
+ Label("after")
+
+ c1reg = GP32()
+ MOVL(U32(c1), c1reg)
+
+ RORL(Imm(11), g)
+ IMULL(c1reg, g)
+
+ RORL(Imm(17), g)
+ IMULL(c1reg, g)
+
+ RORL(Imm(11), f)
+ IMULL(c1reg, f)
+
+ RORL(Imm(17), f)
+ IMULL(c1reg, f)
+
+ ADDL(g, h)
+ RORL(Imm(19), h)
+
+ t := GP32()
+ MOVL(h, t)
+ SHLL(Imm(2), t)
+ ADDL(t, h)
+ ADDL(Imm(0xe6546b64), h)
+
+ RORL(Imm(17), h)
+ IMULL(c1reg, h)
+
+ ADDL(f, h)
+ RORL(Imm(19), h)
+
+ t = GP32()
+ MOVL(h, t)
+ SHLL(Imm(2), t)
+ ADDL(t, h)
+ ADDL(Imm(0xe6546b64), h)
+
+ RORL(Imm(17), h)
+ IMULL(c1reg, h)
+
+ Store(h, ReturnIndex(0))
+ RET()
+}
+
+var go111 = flag.Bool("go111", true, "use assembly instructions present in go1.11 and later")
+
+func imul3l(m uint32, x, y Register) {
+ if *go111 {
+ IMUL3L(U32(m), x, y)
+ } else {
+ t := GP32()
+ MOVL(U32(m), t)
+ IMULL(t, x)
+ MOVL(x, y)
+ }
+}
+
+func main() {
+
+ flag.Parse()
+
+ ConstraintExpr("amd64,!purego")
+
+ fp64()
+ fp32()
+
+ Generate()
+}
diff --git a/vendor/github.com/dgryski/go-farm/basics.go b/vendor/github.com/dgryski/go-farm/basics.go
new file mode 100644
index 000000000..ec7076c03
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/basics.go
@@ -0,0 +1,32 @@
+package farm
+
+import "math/bits"
+
+// Some primes between 2^63 and 2^64 for various uses.
+const k0 uint64 = 0xc3a5c85c97cb3127
+const k1 uint64 = 0xb492b66fbe98f273
+const k2 uint64 = 0x9ae16a3b2f90404f
+
+// Magic numbers for 32-bit hashing. Copied from Murmur3.
+const c1 uint32 = 0xcc9e2d51
+const c2 uint32 = 0x1b873593
+
+// A 32-bit to 32-bit integer hash copied from Murmur3.
+func fmix(h uint32) uint32 {
+ h ^= h >> 16
+ h *= 0x85ebca6b
+ h ^= h >> 13
+ h *= 0xc2b2ae35
+ h ^= h >> 16
+ return h
+}
+
+func mur(a, h uint32) uint32 {
+ // Helper from Murmur3 for combining two 32-bit values.
+ a *= c1
+ a = bits.RotateLeft32(a, -17)
+ a *= c2
+ h ^= a
+ h = bits.RotateLeft32(h, -19)
+ return h*5 + 0xe6546b64
+}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashcc.go b/vendor/github.com/dgryski/go-farm/farmhashcc.go
new file mode 100644
index 000000000..3e68ae3a3
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/farmhashcc.go
@@ -0,0 +1,194 @@
+package farm
+
+import (
+ "encoding/binary"
+ "math/bits"
+)
+
+// This file provides a 32-bit hash equivalent to CityHash32 (v1.1.1)
+// and a 128-bit hash equivalent to CityHash128 (v1.1.1). It also provides
+// a seeded 32-bit hash function similar to CityHash32.
+
+func hash32Len13to24Seed(s []byte, seed uint32) uint32 {
+ slen := len(s)
+ a := binary.LittleEndian.Uint32(s[-4+(slen>>1) : -4+(slen>>1)+4])
+ b := binary.LittleEndian.Uint32(s[4 : 4+4])
+ c := binary.LittleEndian.Uint32(s[slen-8 : slen-8+4])
+ d := binary.LittleEndian.Uint32(s[(slen >> 1) : (slen>>1)+4])
+ e := binary.LittleEndian.Uint32(s[0 : 0+4])
+ f := binary.LittleEndian.Uint32(s[slen-4 : slen-4+4])
+ h := d*c1 + uint32(slen) + seed
+ a = bits.RotateLeft32(a, -12) + f
+ h = mur(c, h) + a
+ a = bits.RotateLeft32(a, -3) + c
+ h = mur(e, h) + a
+ a = bits.RotateLeft32(a+f, -12) + d
+ h = mur(b^seed, h) + a
+ return fmix(h)
+}
+
+func hash32Len0to4(s []byte, seed uint32) uint32 {
+ slen := len(s)
+ b := seed
+ c := uint32(9)
+ for i := 0; i < slen; i++ {
+ v := int8(s[i])
+ b = (b * c1) + uint32(v)
+ c ^= b
+ }
+ return fmix(mur(b, mur(uint32(slen), c)))
+}
+
+func hash128to64(x uint128) uint64 {
+ // Murmur-inspired hashing.
+ const mul uint64 = 0x9ddfea08eb382d69
+ a := (x.lo ^ x.hi) * mul
+ a ^= (a >> 47)
+ b := (x.hi ^ a) * mul
+ b ^= (b >> 47)
+ b *= mul
+ return b
+}
+
+type uint128 struct {
+ lo uint64
+ hi uint64
+}
+
+// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings
+// of any length representable in signed long. Based on City and Murmur.
+func cityMurmur(s []byte, seed uint128) uint128 {
+ slen := len(s)
+ a := seed.lo
+ b := seed.hi
+ var c uint64
+ var d uint64
+ l := slen - 16
+ if l <= 0 { // len <= 16
+ a = shiftMix(a*k1) * k1
+ c = b*k1 + hashLen0to16(s)
+ if slen >= 8 {
+ d = shiftMix(a + binary.LittleEndian.Uint64(s[0:0+8]))
+ } else {
+ d = shiftMix(a + c)
+ }
+ } else { // len > 16
+ c = hashLen16(binary.LittleEndian.Uint64(s[slen-8:slen-8+8])+k1, a)
+ d = hashLen16(b+uint64(slen), c+binary.LittleEndian.Uint64(s[slen-16:slen-16+8]))
+ a += d
+ for {
+ a ^= shiftMix(binary.LittleEndian.Uint64(s[0:0+8])*k1) * k1
+ a *= k1
+ b ^= a
+ c ^= shiftMix(binary.LittleEndian.Uint64(s[8:8+8])*k1) * k1
+ c *= k1
+ d ^= c
+ s = s[16:]
+ l -= 16
+ if l <= 0 {
+ break
+ }
+ }
+ }
+ a = hashLen16(a, c)
+ b = hashLen16(d, b)
+ return uint128{a ^ b, hashLen16(b, a)}
+}
+
+func cityHash128WithSeed(s []byte, seed uint128) uint128 {
+ slen := len(s)
+ if slen < 128 {
+ return cityMurmur(s, seed)
+ }
+
+ endIdx := ((slen - 1) / 128) * 128
+ lastBlockIdx := endIdx + ((slen - 1) & 127) - 127
+ last := s[lastBlockIdx:]
+
+ // We expect len >= 128 to be the common case. Keep 56 bytes of state:
+ // v, w, x, y, and z.
+ var v1, v2 uint64
+ var w1, w2 uint64
+ x := seed.lo
+ y := seed.hi
+ z := uint64(slen) * k1
+ v1 = bits.RotateLeft64(y^k1, -49)*k1 + binary.LittleEndian.Uint64(s[0:0+8])
+ v2 = bits.RotateLeft64(v1, -42)*k1 + binary.LittleEndian.Uint64(s[8:8+8])
+ w1 = bits.RotateLeft64(y+z, -35)*k1 + x
+ w2 = bits.RotateLeft64(x+binary.LittleEndian.Uint64(s[88:88+8]), -53) * k1
+
+ // This is the same inner loop as CityHash64(), manually unrolled.
+ for {
+ x = bits.RotateLeft64(x+y+v1+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1
+ y = bits.RotateLeft64(y+v2+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1
+ x ^= w2
+ y += v1 + binary.LittleEndian.Uint64(s[40:40+8])
+ z = bits.RotateLeft64(z+w1, -33) * k1
+ v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1)
+ w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+binary.LittleEndian.Uint64(s[16:16+8]))
+ z, x = x, z
+ s = s[64:]
+ x = bits.RotateLeft64(x+y+v1+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1
+ y = bits.RotateLeft64(y+v2+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1
+ x ^= w2
+ y += v1 + binary.LittleEndian.Uint64(s[40:40+8])
+ z = bits.RotateLeft64(z+w1, -33) * k1
+ v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1)
+ w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+binary.LittleEndian.Uint64(s[16:16+8]))
+ z, x = x, z
+ s = s[64:]
+ slen -= 128
+ if slen < 128 {
+ break
+ }
+ }
+ x += bits.RotateLeft64(v1+z, -49) * k0
+ y = y*k0 + bits.RotateLeft64(w2, -37)
+ z = z*k0 + bits.RotateLeft64(w1, -27)
+ w1 *= 9
+ v1 *= k0
+ // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s.
+ for tailDone := 0; tailDone < slen; {
+ tailDone += 32
+ y = bits.RotateLeft64(x+y, -42)*k0 + v2
+ w1 += binary.LittleEndian.Uint64(last[128-tailDone+16 : 128-tailDone+16+8])
+ x = x*k0 + w1
+ z += w2 + binary.LittleEndian.Uint64(last[128-tailDone:128-tailDone+8])
+ w2 += v1
+ v1, v2 = weakHashLen32WithSeeds(last[128-tailDone:], v1+z, v2)
+ v1 *= k0
+ }
+
+ // At this point our 56 bytes of state should contain more than
+ // enough information for a strong 128-bit hash. We use two
+ // different 56-byte-to-8-byte hashes to get a 16-byte final result.
+ x = hashLen16(x, v1)
+ y = hashLen16(y+z, w1)
+ return uint128{hashLen16(x+v2, w2) + y,
+ hashLen16(x+w2, y+v2)}
+}
+
+func cityHash128(s []byte) uint128 {
+ slen := len(s)
+ if slen >= 16 {
+ return cityHash128WithSeed(s[16:], uint128{binary.LittleEndian.Uint64(s[0 : 0+8]), binary.LittleEndian.Uint64(s[8:8+8]) + k0})
+ }
+ return cityHash128WithSeed(s, uint128{k0, k1})
+}
+
+// Fingerprint128 is a 128-bit fingerprint function for byte-slices
+func Fingerprint128(s []byte) (lo, hi uint64) {
+ h := cityHash128(s)
+ return h.lo, h.hi
+}
+
+// Hash128 is a 128-bit hash function for byte-slices
+func Hash128(s []byte) (lo, hi uint64) {
+ return Fingerprint128(s)
+}
+
+// Hash128WithSeed is a 128-bit hash function for byte-slices and a 128-bit seed
+func Hash128WithSeed(s []byte, seed0, seed1 uint64) (lo, hi uint64) {
+ h := cityHash128WithSeed(s, uint128{seed0, seed1})
+ return h.lo, h.hi
+}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashmk.go b/vendor/github.com/dgryski/go-farm/farmhashmk.go
new file mode 100644
index 000000000..8e4c7428b
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/farmhashmk.go
@@ -0,0 +1,102 @@
+package farm
+
+import (
+ "encoding/binary"
+ "math/bits"
+)
+
+func hash32Len5to12(s []byte, seed uint32) uint32 {
+ slen := len(s)
+ a := uint32(len(s))
+ b := uint32(len(s) * 5)
+ c := uint32(9)
+ d := b + seed
+ a += binary.LittleEndian.Uint32(s[0 : 0+4])
+ b += binary.LittleEndian.Uint32(s[slen-4 : slen-4+4])
+ c += binary.LittleEndian.Uint32(s[((slen >> 1) & 4) : ((slen>>1)&4)+4])
+ return fmix(seed ^ mur(c, mur(b, mur(a, d))))
+}
+
+// Hash32 hashes a byte slice and returns a uint32 hash value
+func Hash32(s []byte) uint32 {
+
+ slen := len(s)
+
+ if slen <= 24 {
+ if slen <= 12 {
+ if slen <= 4 {
+ return hash32Len0to4(s, 0)
+ }
+ return hash32Len5to12(s, 0)
+ }
+ return hash32Len13to24Seed(s, 0)
+ }
+
+ // len > 24
+ h := uint32(slen)
+ g := c1 * uint32(slen)
+ f := g
+ a0 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-4:slen-4+4])*c1, -17) * c2
+ a1 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-8:slen-8+4])*c1, -17) * c2
+ a2 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-16:slen-16+4])*c1, -17) * c2
+ a3 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-12:slen-12+4])*c1, -17) * c2
+ a4 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-20:slen-20+4])*c1, -17) * c2
+ h ^= a0
+ h = bits.RotateLeft32(h, -19)
+ h = h*5 + 0xe6546b64
+ h ^= a2
+ h = bits.RotateLeft32(h, -19)
+ h = h*5 + 0xe6546b64
+ g ^= a1
+ g = bits.RotateLeft32(g, -19)
+ g = g*5 + 0xe6546b64
+ g ^= a3
+ g = bits.RotateLeft32(g, -19)
+ g = g*5 + 0xe6546b64
+ f += a4
+ f = bits.RotateLeft32(f, -19) + 113
+ for len(s) > 20 {
+ a := binary.LittleEndian.Uint32(s[0 : 0+4])
+ b := binary.LittleEndian.Uint32(s[4 : 4+4])
+ c := binary.LittleEndian.Uint32(s[8 : 8+4])
+ d := binary.LittleEndian.Uint32(s[12 : 12+4])
+ e := binary.LittleEndian.Uint32(s[16 : 16+4])
+ h += a
+ g += b
+ f += c
+ h = mur(d, h) + e
+ g = mur(c, g) + a
+ f = mur(b+e*c1, f) + d
+ f += g
+ g += f
+ s = s[20:]
+ }
+ g = bits.RotateLeft32(g, -11) * c1
+ g = bits.RotateLeft32(g, -17) * c1
+ f = bits.RotateLeft32(f, -11) * c1
+ f = bits.RotateLeft32(f, -17) * c1
+ h = bits.RotateLeft32(h+g, -19)
+ h = h*5 + 0xe6546b64
+ h = bits.RotateLeft32(h, -17) * c1
+ h = bits.RotateLeft32(h+f, -19)
+ h = h*5 + 0xe6546b64
+ h = bits.RotateLeft32(h, -17) * c1
+ return h
+}
+
+// Hash32WithSeed hashes a byte slice and a uint32 seed and returns a uint32 hash value
+func Hash32WithSeed(s []byte, seed uint32) uint32 {
+ slen := len(s)
+
+ if slen <= 24 {
+ if slen >= 13 {
+ return hash32Len13to24Seed(s, seed*c1)
+ }
+ if slen >= 5 {
+ return hash32Len5to12(s, seed)
+ }
+ return hash32Len0to4(s, seed)
+ }
+ h := hash32Len13to24Seed(s[:24], seed^uint32(slen))
+ return mur(Hash32(s[24:])+seed, h)
+}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashna.go b/vendor/github.com/dgryski/go-farm/farmhashna.go
new file mode 100644
index 000000000..ac62edd3b
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/farmhashna.go
@@ -0,0 +1,161 @@
+package farm
+
+import (
+ "encoding/binary"
+ "math/bits"
+)
+
+func shiftMix(val uint64) uint64 {
+ return val ^ (val >> 47)
+}
+
+func hashLen16(u, v uint64) uint64 {
+ return hash128to64(uint128{u, v})
+}
+
+func hashLen16Mul(u, v, mul uint64) uint64 {
+ // Murmur-inspired hashing.
+ a := (u ^ v) * mul
+ a ^= (a >> 47)
+ b := (v ^ a) * mul
+ b ^= (b >> 47)
+ b *= mul
+ return b
+}
+
+func hashLen0to16(s []byte) uint64 {
+ slen := uint64(len(s))
+ if slen >= 8 {
+ mul := k2 + slen*2
+ a := binary.LittleEndian.Uint64(s[0:0+8]) + k2
+ b := binary.LittleEndian.Uint64(s[int(slen-8) : int(slen-8)+8])
+ c := bits.RotateLeft64(b, -37)*mul + a
+ d := (bits.RotateLeft64(a, -25) + b) * mul
+ return hashLen16Mul(c, d, mul)
+ }
+
+ if slen >= 4 {
+ mul := k2 + slen*2
+ a := binary.LittleEndian.Uint32(s[0 : 0+4])
+ return hashLen16Mul(slen+(uint64(a)<<3), uint64(binary.LittleEndian.Uint32(s[int(slen-4):int(slen-4)+4])), mul)
+ }
+ if slen > 0 {
+ a := s[0]
+ b := s[slen>>1]
+ c := s[slen-1]
+ y := uint32(a) + (uint32(b) << 8)
+ z := uint32(slen) + (uint32(c) << 2)
+ return shiftMix(uint64(y)*k2^uint64(z)*k0) * k2
+ }
+ return k2
+}
+
+// This probably works well for 16-byte strings as well, but it may be overkill
+// in that case.
+func hashLen17to32(s []byte) uint64 {
+ slen := len(s)
+ mul := k2 + uint64(slen*2)
+ a := binary.LittleEndian.Uint64(s[0:0+8]) * k1
+ b := binary.LittleEndian.Uint64(s[8 : 8+8])
+ c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul
+ d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2
+ return hashLen16Mul(bits.RotateLeft64(a+b, -43)+bits.RotateLeft64(c, -30)+d, a+bits.RotateLeft64(b+k2, -18)+c, mul)
+}
+
+// Return a 16-byte hash for 48 bytes. Quick and dirty.
+// Callers do best to use "random-looking" values for a and b.
+func weakHashLen32WithSeedsWords(w, x, y, z, a, b uint64) (uint64, uint64) {
+ a += w
+ b = bits.RotateLeft64(b+a+z, -21)
+ c := a
+ a += x
+ a += y
+ b += bits.RotateLeft64(a, -44)
+ return a + z, b + c
+}
+
+// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty.
+func weakHashLen32WithSeeds(s []byte, a, b uint64) (uint64, uint64) {
+ return weakHashLen32WithSeedsWords(binary.LittleEndian.Uint64(s[0:0+8]),
+ binary.LittleEndian.Uint64(s[8:8+8]),
+ binary.LittleEndian.Uint64(s[16:16+8]),
+ binary.LittleEndian.Uint64(s[24:24+8]),
+ a,
+ b)
+}
+
+// Return an 8-byte hash for 33 to 64 bytes.
+func hashLen33to64(s []byte) uint64 {
+ slen := len(s)
+ mul := k2 + uint64(slen)*2
+ a := binary.LittleEndian.Uint64(s[0:0+8]) * k2
+ b := binary.LittleEndian.Uint64(s[8 : 8+8])
+ c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul
+ d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2
+ y := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d
+ z := hashLen16Mul(y, a+bits.RotateLeft64(b+k2, -18)+c, mul)
+ e := binary.LittleEndian.Uint64(s[16:16+8]) * mul
+ f := binary.LittleEndian.Uint64(s[24 : 24+8])
+ g := (y + binary.LittleEndian.Uint64(s[slen-32:slen-32+8])) * mul
+ h := (z + binary.LittleEndian.Uint64(s[slen-24:slen-24+8])) * mul
+ return hashLen16Mul(bits.RotateLeft64(e+f, -43)+bits.RotateLeft64(g, -30)+h, e+bits.RotateLeft64(f+a, -18)+g, mul)
+}
+
+func naHash64(s []byte) uint64 {
+ slen := len(s)
+ var seed uint64 = 81
+ if slen <= 32 {
+ if slen <= 16 {
+ return hashLen0to16(s)
+ }
+ return hashLen17to32(s)
+ }
+ if slen <= 64 {
+ return hashLen33to64(s)
+ }
+ // For strings over 64 bytes we loop.
+ // Internal state consists of 56 bytes: v, w, x, y, and z.
+ v := uint128{0, 0}
+ w := uint128{0, 0}
+ x := seed*k2 + binary.LittleEndian.Uint64(s[0:0+8])
+ y := seed*k1 + 113
+ z := shiftMix(y*k2+113) * k2
+ // Set end so that after the loop we have 1 to 64 bytes left to process.
+ endIdx := ((slen - 1) / 64) * 64
+ last64Idx := endIdx + ((slen - 1) & 63) - 63
+ last64 := s[last64Idx:]
+ for len(s) > 64 {
+ x = bits.RotateLeft64(x+y+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1
+ y = bits.RotateLeft64(y+v.hi+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1
+ x ^= w.hi
+ y += v.lo + binary.LittleEndian.Uint64(s[40:40+8])
+ z = bits.RotateLeft64(z+w.lo, -33) * k1
+ v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*k1, x+w.lo)
+ w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8]))
+ x, z = z, x
+ s = s[64:]
+ }
+ mul := k1 + ((z & 0xff) << 1)
+ // Make s point to the last 64 bytes of input.
+ s = last64
+ w.lo += (uint64(slen-1) & 63)
+ v.lo += w.lo
+ w.lo += v.lo
+ x = bits.RotateLeft64(x+y+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * mul
+ y = bits.RotateLeft64(y+v.hi+binary.LittleEndian.Uint64(s[48:48+8]), -42) * mul
+ x ^= w.hi * 9
+ y += v.lo*9 + binary.LittleEndian.Uint64(s[40:40+8])
+ z = bits.RotateLeft64(z+w.lo, -33) * mul
+ v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*mul, x+w.lo)
+ w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8]))
+ x, z = z, x
+ return hashLen16Mul(hashLen16Mul(v.lo, w.lo, mul)+shiftMix(y)*k0+z, hashLen16Mul(v.hi, w.hi, mul)+x, mul)
+}
+
+func naHash64WithSeed(s []byte, seed uint64) uint64 {
+ return naHash64WithSeeds(s, k2, seed)
+}
+
+func naHash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 {
+ return hashLen16(naHash64(s)-seed0, seed1)
+}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashuo.go b/vendor/github.com/dgryski/go-farm/farmhashuo.go
new file mode 100644
index 000000000..474b74e05
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/farmhashuo.go
@@ -0,0 +1,122 @@
+package farm
+
+import (
+ "encoding/binary"
+ "math/bits"
+)
+
+func uoH(x, y, mul uint64, r uint) uint64 {
+ a := (x ^ y) * mul
+ a ^= (a >> 47)
+ b := (y ^ a) * mul
+ return bits.RotateLeft64(b, -int(r)) * mul
+}
+
+// Hash64WithSeeds hashes a byte slice and two uint64 seeds and returns a uint64 hash value
+func Hash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 {
+ slen := len(s)
+ if slen <= 64 {
+ return naHash64WithSeeds(s, seed0, seed1)
+ }
+
+ // For strings over 64 bytes we loop.
+ // Internal state consists of 64 bytes: u, v, w, x, y, and z.
+ x := seed0
+ y := seed1*k2 + 113
+ z := shiftMix(y*k2) * k2
+ v := uint128{seed0, seed1}
+ var w uint128
+ u := x - z
+ x *= k2
+ mul := k2 + (u & 0x82)
+
+ // Set end so that after the loop we have 1 to 64 bytes left to process.
+ endIdx := ((slen - 1) / 64) * 64
+ last64Idx := endIdx + ((slen - 1) & 63) - 63
+ last64 := s[last64Idx:]
+
+ for len(s) > 64 {
+ a0 := binary.LittleEndian.Uint64(s[0 : 0+8])
+ a1 := binary.LittleEndian.Uint64(s[8 : 8+8])
+ a2 := binary.LittleEndian.Uint64(s[16 : 16+8])
+ a3 := binary.LittleEndian.Uint64(s[24 : 24+8])
+ a4 := binary.LittleEndian.Uint64(s[32 : 32+8])
+ a5 := binary.LittleEndian.Uint64(s[40 : 40+8])
+ a6 := binary.LittleEndian.Uint64(s[48 : 48+8])
+ a7 := binary.LittleEndian.Uint64(s[56 : 56+8])
+ x += a0 + a1
+ y += a2
+ z += a3
+ v.lo += a4
+ v.hi += a5 + a1
+ w.lo += a6
+ w.hi += a7
+
+ x = bits.RotateLeft64(x, -26)
+ x *= 9
+ y = bits.RotateLeft64(y, -29)
+ z *= mul
+ v.lo = bits.RotateLeft64(v.lo, -33)
+ v.hi = bits.RotateLeft64(v.hi, -30)
+ w.lo ^= x
+ w.lo *= 9
+ z = bits.RotateLeft64(z, -32)
+ z += w.hi
+ w.hi += z
+ z *= 9
+ u, y = y, u
+
+ z += a0 + a6
+ v.lo += a2
+ v.hi += a3
+ w.lo += a4
+ w.hi += a5 + a6
+ x += a1
+ y += a7
+
+ y += v.lo
+ v.lo += x - y
+ v.hi += w.lo
+ w.lo += v.hi
+ w.hi += x - y
+ x += w.hi
+ w.hi = bits.RotateLeft64(w.hi, -34)
+ u, z = z, u
+ s = s[64:]
+ }
+ // Make s point to the last 64 bytes of input.
+ s = last64
+ u *= 9
+ v.hi = bits.RotateLeft64(v.hi, -28)
+ v.lo = bits.RotateLeft64(v.lo, -20)
+ w.lo += (uint64(slen-1) & 63)
+ u += y
+ y += u
+ x = bits.RotateLeft64(y-x+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * mul
+ y = bits.RotateLeft64(y^v.hi^binary.LittleEndian.Uint64(s[48:48+8]), -42) * mul
+ x ^= w.hi * 9
+ y += v.lo + binary.LittleEndian.Uint64(s[40:40+8])
+ z = bits.RotateLeft64(z+w.lo, -33) * mul
+ v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*mul, x+w.lo)
+ w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8]))
+ return uoH(hashLen16Mul(v.lo+x, w.lo^y, mul)+z-u,
+ uoH(v.hi+y, w.hi+z, k2, 30)^x,
+ k2,
+ 31)
+}
+
+// Hash64WithSeed hashes a byte slice and a uint64 seed and returns a uint64 hash value
+func Hash64WithSeed(s []byte, seed uint64) uint64 {
+ if len(s) <= 64 {
+ return naHash64WithSeed(s, seed)
+ }
+ return Hash64WithSeeds(s, 0, seed)
+}
+
+// Hash64 hashes a byte slice and returns a uint64 hash value
+func Hash64(s []byte) uint64 {
+ if len(s) <= 64 {
+ return naHash64(s)
+ }
+ return Hash64WithSeeds(s, 81, 0)
+}
diff --git a/vendor/github.com/dgryski/go-farm/fp_amd64.s b/vendor/github.com/dgryski/go-farm/fp_amd64.s
new file mode 100644
index 000000000..2b8fa3247
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/fp_amd64.s
@@ -0,0 +1,951 @@
+// Code generated by command: go run asm.go -out=fp_amd64.s -go111=false. DO NOT EDIT.
+
+// +build amd64,!purego
+
+#include "textflag.h"
+
+// func Fingerprint64(s []byte) uint64
+TEXT ·Fingerprint64(SB), NOSPLIT, $0-32
+ MOVQ s_base+0(FP), CX
+ MOVQ s_len+8(FP), AX
+ CMPQ AX, $0x10
+ JG check32
+ CMPQ AX, $0x08
+ JL check4
+ MOVQ (CX), DX
+ MOVQ AX, BX
+ SUBQ $0x08, BX
+ ADDQ CX, BX
+ MOVQ (BX), BX
+ MOVQ $0x9ae16a3b2f90404f, BP
+ ADDQ BP, DX
+ SHLQ $0x01, AX
+ ADDQ BP, AX
+ MOVQ BX, BP
+ RORQ $0x25, BP
+ IMULQ AX, BP
+ ADDQ DX, BP
+ RORQ $0x19, DX
+ ADDQ BX, DX
+ IMULQ AX, DX
+ XORQ DX, BP
+ IMULQ AX, BP
+ MOVQ BP, BX
+ SHRQ $0x2f, BX
+ XORQ BP, BX
+ XORQ BX, DX
+ IMULQ AX, DX
+ MOVQ DX, BX
+ SHRQ $0x2f, BX
+ XORQ DX, BX
+ IMULQ AX, BX
+ MOVQ BX, ret+24(FP)
+ RET
+
+check4:
+ CMPQ AX, $0x04
+ JL check0
+ MOVQ $0x9ae16a3b2f90404f, DX
+ MOVQ AX, BX
+ SHLQ $0x01, BX
+ ADDQ DX, BX
+ MOVL (CX), SI
+ SHLQ $0x03, SI
+ ADDQ AX, SI
+ SUBQ $0x04, AX
+ ADDQ AX, CX
+ MOVL (CX), DI
+ XORQ DI, SI
+ IMULQ BX, SI
+ MOVQ SI, DX
+ SHRQ $0x2f, DX
+ XORQ SI, DX
+ XORQ DX, DI
+ IMULQ BX, DI
+ MOVQ DI, DX
+ SHRQ $0x2f, DX
+ XORQ DI, DX
+ IMULQ BX, DX
+ MOVQ DX, ret+24(FP)
+ RET
+
+check0:
+ TESTQ AX, AX
+ JZ empty
+ MOVBQZX (CX), DX
+ MOVQ AX, BX
+ SHRQ $0x01, BX
+ ADDQ CX, BX
+ MOVBQZX (BX), BP
+ MOVQ AX, BX
+ SUBQ $0x01, BX
+ ADDQ CX, BX
+ MOVBQZX (BX), BX
+ SHLQ $0x08, BP
+ ADDQ BP, DX
+ SHLQ $0x02, BX
+ ADDQ BX, AX
+ MOVQ $0xc3a5c85c97cb3127, BX
+ IMULQ BX, AX
+ MOVQ $0x9ae16a3b2f90404f, BX
+ IMULQ BX, DX
+ XORQ DX, AX
+ MOVQ AX, DX
+ SHRQ $0x2f, DX
+ XORQ AX, DX
+ IMULQ BX, DX
+ MOVQ DX, ret+24(FP)
+ RET
+
+empty:
+ MOVQ $0x9ae16a3b2f90404f, DX
+ MOVQ DX, ret+24(FP)
+ RET
+
+check32:
+ CMPQ AX, $0x20
+ JG check64
+ MOVQ AX, DX
+ SHLQ $0x01, DX
+ MOVQ $0x9ae16a3b2f90404f, BX
+ ADDQ BX, DX
+ MOVQ (CX), BP
+ MOVQ $0xb492b66fbe98f273, SI
+ IMULQ SI, BP
+ MOVQ 8(CX), SI
+ MOVQ AX, DI
+ SUBQ $0x10, DI
+ ADDQ CX, DI
+ MOVQ 8(DI), R12
+ IMULQ DX, R12
+ MOVQ (DI), DI
+ IMULQ BX, DI
+ MOVQ BP, R13
+ ADDQ SI, R13
+ RORQ $0x2b, R13
+ ADDQ DI, R13
+ MOVQ R12, DI
+ RORQ $0x1e, DI
+ ADDQ DI, R13
+ ADDQ R12, BP
+ ADDQ BX, SI
+ RORQ $0x12, SI
+ ADDQ SI, BP
+ XORQ BP, R13
+ IMULQ DX, R13
+ MOVQ R13, BX
+ SHRQ $0x2f, BX
+ XORQ R13, BX
+ XORQ BX, BP
+ IMULQ DX, BP
+ MOVQ BP, BX
+ SHRQ $0x2f, BX
+ XORQ BP, BX
+ IMULQ DX, BX
+ MOVQ BX, ret+24(FP)
+ RET
+
+check64:
+ CMPQ AX, $0x40
+ JG long
+ MOVQ AX, DX
+ SHLQ $0x01, DX
+ MOVQ $0x9ae16a3b2f90404f, BX
+ ADDQ BX, DX
+ MOVQ (CX), BP
+ IMULQ BX, BP
+ MOVQ 8(CX), SI
+ MOVQ AX, DI
+ SUBQ $0x10, DI
+ ADDQ CX, DI
+ MOVQ 8(DI), R12
+ IMULQ DX, R12
+ MOVQ (DI), DI
+ IMULQ BX, DI
+ MOVQ BP, R13
+ ADDQ SI, R13
+ RORQ $0x2b, R13
+ ADDQ DI, R13
+ MOVQ R12, DI
+ RORQ $0x1e, DI
+ ADDQ DI, R13
+ ADDQ BP, R12
+ ADDQ BX, SI
+ RORQ $0x12, SI
+ ADDQ SI, R12
+ MOVQ R13, BX
+ XORQ R12, BX
+ IMULQ DX, BX
+ MOVQ BX, SI
+ SHRQ $0x2f, SI
+ XORQ BX, SI
+ XORQ SI, R12
+ IMULQ DX, R12
+ MOVQ R12, BX
+ SHRQ $0x2f, BX
+ XORQ R12, BX
+ IMULQ DX, BX
+ MOVQ 16(CX), SI
+ IMULQ DX, SI
+ MOVQ 24(CX), DI
+ MOVQ AX, R12
+ SUBQ $0x20, R12
+ ADDQ CX, R12
+ MOVQ (R12), R14
+ ADDQ R13, R14
+ IMULQ DX, R14
+ MOVQ 8(R12), R12
+ ADDQ BX, R12
+ IMULQ DX, R12
+ MOVQ SI, BX
+ ADDQ DI, BX
+ RORQ $0x2b, BX
+ ADDQ R12, BX
+ MOVQ R14, R12
+ RORQ $0x1e, R12
+ ADDQ R12, BX
+ ADDQ R14, SI
+ ADDQ BP, DI
+ RORQ $0x12, DI
+ ADDQ DI, SI
+ XORQ SI, BX
+ IMULQ DX, BX
+ MOVQ BX, BP
+ SHRQ $0x2f, BP
+ XORQ BX, BP
+ XORQ BP, SI
+ IMULQ DX, SI
+ MOVQ SI, BX
+ SHRQ $0x2f, BX
+ XORQ SI, BX
+ IMULQ DX, BX
+ MOVQ BX, ret+24(FP)
+ RET
+
+long:
+ XORQ R8, R8
+ XORQ R9, R9
+ XORQ R10, R10
+ XORQ R11, R11
+ MOVQ $0x01529cba0ca458ff, DX
+ ADDQ (CX), DX
+ MOVQ $0x226bb95b4e64b6d4, BX
+ MOVQ $0x134a747f856d0526, BP
+ MOVQ AX, SI
+ SUBQ $0x01, SI
+ MOVQ $0xffffffffffffffc0, DI
+ ANDQ DI, SI
+ MOVQ AX, DI
+ SUBQ $0x01, DI
+ ANDQ $0x3f, DI
+ SUBQ $0x3f, DI
+ ADDQ SI, DI
+ MOVQ DI, SI
+ ADDQ CX, SI
+ MOVQ AX, DI
+
+loop:
+ MOVQ $0xb492b66fbe98f273, R12
+ ADDQ BX, DX
+ ADDQ R8, DX
+ ADDQ 8(CX), DX
+ RORQ $0x25, DX
+ IMULQ R12, DX
+ ADDQ R9, BX
+ ADDQ 48(CX), BX
+ RORQ $0x2a, BX
+ IMULQ R12, BX
+ XORQ R11, DX
+ ADDQ R8, BX
+ ADDQ 40(CX), BX
+ ADDQ R10, BP
+ RORQ $0x21, BP
+ IMULQ R12, BP
+ IMULQ R12, R9
+ MOVQ DX, R8
+ ADDQ R10, R8
+ ADDQ (CX), R9
+ ADDQ R9, R8
+ ADDQ 24(CX), R8
+ RORQ $0x15, R8
+ MOVQ R9, R10
+ ADDQ 8(CX), R9
+ ADDQ 16(CX), R9
+ MOVQ R9, R13
+ RORQ $0x2c, R13
+ ADDQ R13, R8
+ ADDQ 24(CX), R9
+ ADDQ R10, R8
+ XCHGQ R9, R8
+ ADDQ BP, R11
+ MOVQ BX, R10
+ ADDQ 16(CX), R10
+ ADDQ 32(CX), R11
+ ADDQ R11, R10
+ ADDQ 56(CX), R10
+ RORQ $0x15, R10
+ MOVQ R11, R13
+ ADDQ 40(CX), R11
+ ADDQ 48(CX), R11
+ MOVQ R11, R14
+ RORQ $0x2c, R14
+ ADDQ R14, R10
+ ADDQ 56(CX), R11
+ ADDQ R13, R10
+ XCHGQ R11, R10
+ XCHGQ BP, DX
+ ADDQ $0x40, CX
+ SUBQ $0x40, DI
+ CMPQ DI, $0x40
+ JG loop
+ MOVQ SI, CX
+ MOVQ BP, DI
+ ANDQ $0xff, DI
+ SHLQ $0x01, DI
+ ADDQ R12, DI
+ MOVQ SI, CX
+ SUBQ $0x01, AX
+ ANDQ $0x3f, AX
+ ADDQ AX, R10
+ ADDQ R10, R8
+ ADDQ R8, R10
+ ADDQ BX, DX
+ ADDQ R8, DX
+ ADDQ 8(CX), DX
+ RORQ $0x25, DX
+ IMULQ DI, DX
+ ADDQ R9, BX
+ ADDQ 48(CX), BX
+ RORQ $0x2a, BX
+ IMULQ DI, BX
+ MOVQ $0x00000009, AX
+ IMULQ R11, AX
+ XORQ AX, DX
+ MOVQ $0x00000009, AX
+ IMULQ R8, AX
+ ADDQ AX, BX
+ ADDQ 40(CX), BX
+ ADDQ R10, BP
+ RORQ $0x21, BP
+ IMULQ DI, BP
+ IMULQ DI, R9
+ MOVQ DX, R8
+ ADDQ R10, R8
+ ADDQ (CX), R9
+ ADDQ R9, R8
+ ADDQ 24(CX), R8
+ RORQ $0x15, R8
+ MOVQ R9, AX
+ ADDQ 8(CX), R9
+ ADDQ 16(CX), R9
+ MOVQ R9, SI
+ RORQ $0x2c, SI
+ ADDQ SI, R8
+ ADDQ 24(CX), R9
+ ADDQ AX, R8
+ XCHGQ R9, R8
+ ADDQ BP, R11
+ MOVQ BX, R10
+ ADDQ 16(CX), R10
+ ADDQ 32(CX), R11
+ ADDQ R11, R10
+ ADDQ 56(CX), R10
+ RORQ $0x15, R10
+ MOVQ R11, AX
+ ADDQ 40(CX), R11
+ ADDQ 48(CX), R11
+ MOVQ R11, SI
+ RORQ $0x2c, SI
+ ADDQ SI, R10
+ ADDQ 56(CX), R11
+ ADDQ AX, R10
+ XCHGQ R11, R10
+ XCHGQ BP, DX
+ XORQ R10, R8
+ IMULQ DI, R8
+ MOVQ R8, AX
+ SHRQ $0x2f, AX
+ XORQ R8, AX
+ XORQ AX, R10
+ IMULQ DI, R10
+ MOVQ R10, AX
+ SHRQ $0x2f, AX
+ XORQ R10, AX
+ IMULQ DI, AX
+ ADDQ BP, AX
+ MOVQ BX, CX
+ SHRQ $0x2f, CX
+ XORQ BX, CX
+ MOVQ $0xc3a5c85c97cb3127, BX
+ IMULQ BX, CX
+ ADDQ CX, AX
+ XORQ R11, R9
+ IMULQ DI, R9
+ MOVQ R9, CX
+ SHRQ $0x2f, CX
+ XORQ R9, CX
+ XORQ CX, R11
+ IMULQ DI, R11
+ MOVQ R11, CX
+ SHRQ $0x2f, CX
+ XORQ R11, CX
+ IMULQ DI, CX
+ ADDQ DX, CX
+ XORQ CX, AX
+ IMULQ DI, AX
+ MOVQ AX, DX
+ SHRQ $0x2f, DX
+ XORQ AX, DX
+ XORQ DX, CX
+ IMULQ DI, CX
+ MOVQ CX, AX
+ SHRQ $0x2f, AX
+ XORQ CX, AX
+ IMULQ DI, AX
+ MOVQ AX, ret+24(FP)
+ RET
+
+// func Fingerprint32(s []byte) uint32
+TEXT ·Fingerprint32(SB), NOSPLIT, $0-28
+ MOVQ s_base+0(FP), AX
+ MOVQ s_len+8(FP), CX
+ CMPQ CX, $0x18
+ JG long
+ CMPQ CX, $0x0c
+ JG hash_13_24
+ CMPQ CX, $0x04
+ JG hash_5_12
+ XORL DX, DX
+ MOVL $0x00000009, BX
+ TESTQ CX, CX
+ JZ done
+ MOVQ CX, BP
+ MOVL $0xcc9e2d51, DI
+ IMULL DI, DX
+ MOVBLSX (AX), SI
+ ADDL SI, DX
+ XORL DX, BX
+ SUBQ $0x01, BP
+ TESTQ BP, BP
+ JZ done
+ IMULL DI, DX
+ MOVBLSX 1(AX), SI
+ ADDL SI, DX
+ XORL DX, BX
+ SUBQ $0x01, BP
+ TESTQ BP, BP
+ JZ done
+ IMULL DI, DX
+ MOVBLSX 2(AX), SI
+ ADDL SI, DX
+ XORL DX, BX
+ SUBQ $0x01, BP
+ TESTQ BP, BP
+ JZ done
+ IMULL DI, DX
+ MOVBLSX 3(AX), SI
+ ADDL SI, DX
+ XORL DX, BX
+ SUBQ $0x01, BP
+ TESTQ BP, BP
+ JZ done
+
+done:
+ MOVL CX, BP
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, BP
+ RORL $0x11, BP
+ MOVL $0x1b873593, SI
+ IMULL SI, BP
+ XORL BP, BX
+ RORL $0x13, BX
+ LEAL (BX)(BX*4), BP
+ LEAL 3864292196(BP), BX
+ MOVL $0xcc9e2d51, BP
+ IMULL BP, DX
+ RORL $0x11, DX
+ MOVL $0x1b873593, BP
+ IMULL BP, DX
+ XORL DX, BX
+ RORL $0x13, BX
+ LEAL (BX)(BX*4), DX
+ LEAL 3864292196(DX), BX
+ MOVL BX, DX
+ SHRL $0x10, DX
+ XORL DX, BX
+ MOVL $0x85ebca6b, DX
+ IMULL DX, BX
+ MOVL BX, DX
+ SHRL $0x0d, DX
+ XORL DX, BX
+ MOVL $0xc2b2ae35, DX
+ IMULL DX, BX
+ MOVL BX, DX
+ SHRL $0x10, DX
+ XORL DX, BX
+ MOVL BX, ret+24(FP)
+ RET
+
+hash_5_12:
+ MOVL CX, DX
+ MOVL DX, BX
+ SHLL $0x02, BX
+ ADDL DX, BX
+ MOVL $0x00000009, BP
+ MOVL BX, SI
+ ADDL (AX), DX
+ MOVQ CX, DI
+ SUBQ $0x04, DI
+ ADDQ AX, DI
+ ADDL (DI), BX
+ MOVQ CX, DI
+ SHRQ $0x01, DI
+ ANDQ $0x04, DI
+ ADDQ AX, DI
+ ADDL (DI), BP
+ MOVL $0xcc9e2d51, DI
+ IMULL DI, DX
+ RORL $0x11, DX
+ MOVL $0x1b873593, DI
+ IMULL DI, DX
+ XORL DX, SI
+ RORL $0x13, SI
+ LEAL (SI)(SI*4), DX
+ LEAL 3864292196(DX), SI
+ MOVL $0xcc9e2d51, DX
+ IMULL DX, BX
+ RORL $0x11, BX
+ MOVL $0x1b873593, DX
+ IMULL DX, BX
+ XORL BX, SI
+ RORL $0x13, SI
+ LEAL (SI)(SI*4), BX
+ LEAL 3864292196(BX), SI
+ MOVL $0xcc9e2d51, DX
+ IMULL DX, BP
+ RORL $0x11, BP
+ MOVL $0x1b873593, DX
+ IMULL DX, BP
+ XORL BP, SI
+ RORL $0x13, SI
+ LEAL (SI)(SI*4), BP
+ LEAL 3864292196(BP), SI
+ MOVL SI, DX
+ SHRL $0x10, DX
+ XORL DX, SI
+ MOVL $0x85ebca6b, DX
+ IMULL DX, SI
+ MOVL SI, DX
+ SHRL $0x0d, DX
+ XORL DX, SI
+ MOVL $0xc2b2ae35, DX
+ IMULL DX, SI
+ MOVL SI, DX
+ SHRL $0x10, DX
+ XORL DX, SI
+ MOVL SI, ret+24(FP)
+ RET
+
+hash_13_24:
+ MOVQ CX, DX
+ SHRQ $0x01, DX
+ ADDQ AX, DX
+ MOVL -4(DX), BX
+ MOVL 4(AX), BP
+ MOVQ CX, SI
+ ADDQ AX, SI
+ MOVL -8(SI), DI
+ MOVL (DX), DX
+ MOVL (AX), R8
+ MOVL -4(SI), SI
+ MOVL $0xcc9e2d51, R9
+ IMULL DX, R9
+ ADDL CX, R9
+ RORL $0x0c, BX
+ ADDL SI, BX
+ MOVL DI, R10
+ MOVL $0xcc9e2d51, R11
+ IMULL R11, R10
+ RORL $0x11, R10
+ MOVL $0x1b873593, R11
+ IMULL R11, R10
+ XORL R10, R9
+ RORL $0x13, R9
+ LEAL (R9)(R9*4), R10
+ LEAL 3864292196(R10), R9
+ ADDL BX, R9
+ RORL $0x03, BX
+ ADDL DI, BX
+ MOVL $0xcc9e2d51, DI
+ IMULL DI, R8
+ RORL $0x11, R8
+ MOVL $0x1b873593, DI
+ IMULL DI, R8
+ XORL R8, R9
+ RORL $0x13, R9
+ LEAL (R9)(R9*4), R8
+ LEAL 3864292196(R8), R9
+ ADDL BX, R9
+ ADDL SI, BX
+ RORL $0x0c, BX
+ ADDL DX, BX
+ MOVL $0xcc9e2d51, DX
+ IMULL DX, BP
+ RORL $0x11, BP
+ MOVL $0x1b873593, DX
+ IMULL DX, BP
+ XORL BP, R9
+ RORL $0x13, R9
+ LEAL (R9)(R9*4), BP
+ LEAL 3864292196(BP), R9
+ ADDL BX, R9
+ MOVL R9, DX
+ SHRL $0x10, DX
+ XORL DX, R9
+ MOVL $0x85ebca6b, DX
+ IMULL DX, R9
+ MOVL R9, DX
+ SHRL $0x0d, DX
+ XORL DX, R9
+ MOVL $0xc2b2ae35, DX
+ IMULL DX, R9
+ MOVL R9, DX
+ SHRL $0x10, DX
+ XORL DX, R9
+ MOVL R9, ret+24(FP)
+ RET
+
+long:
+ MOVL CX, DX
+ MOVL $0xcc9e2d51, BX
+ IMULL DX, BX
+ MOVL BX, BP
+ MOVQ CX, SI
+ ADDQ AX, SI
+ MOVL $0xcc9e2d51, DI
+ MOVL $0x1b873593, R8
+ MOVL -4(SI), R9
+ IMULL DI, R9
+ RORL $0x11, R9
+ IMULL R8, R9
+ XORL R9, DX
+ RORL $0x13, DX
+ MOVL DX, R9
+ SHLL $0x02, R9
+ ADDL R9, DX
+ ADDL $0xe6546b64, DX
+ MOVL -8(SI), R9
+ IMULL DI, R9
+ RORL $0x11, R9
+ IMULL R8, R9
+ XORL R9, BX
+ RORL $0x13, BX
+ MOVL BX, R9
+ SHLL $0x02, R9
+ ADDL R9, BX
+ ADDL $0xe6546b64, BX
+ MOVL -16(SI), R9
+ IMULL DI, R9
+ RORL $0x11, R9
+ IMULL R8, R9
+ XORL R9, DX
+ RORL $0x13, DX
+ MOVL DX, R9
+ SHLL $0x02, R9
+ ADDL R9, DX
+ ADDL $0xe6546b64, DX
+ MOVL -12(SI), R9
+ IMULL DI, R9
+ RORL $0x11, R9
+ IMULL R8, R9
+ XORL R9, BX
+ RORL $0x13, BX
+ MOVL BX, R9
+ SHLL $0x02, R9
+ ADDL R9, BX
+ ADDL $0xe6546b64, BX
+ PREFETCHT0 (AX)
+ MOVL -20(SI), SI
+ IMULL DI, SI
+ RORL $0x11, SI
+ IMULL R8, SI
+ ADDL SI, BP
+ RORL $0x13, BP
+ ADDL $0x71, BP
+
+loop80:
+ CMPQ CX, $0x64
+ JL loop20
+ PREFETCHT0 20(AX)
+ MOVL (AX), SI
+ ADDL SI, DX
+ MOVL 4(AX), DI
+ ADDL DI, BX
+ MOVL 8(AX), R8
+ ADDL R8, BP
+ MOVL 12(AX), R9
+ MOVL R9, R11
+ MOVL $0xcc9e2d51, R10
+ IMULL R10, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, R10
+ IMULL R10, R11
+ XORL R11, DX
+ RORL $0x13, DX
+ LEAL (DX)(DX*4), R11
+ LEAL 3864292196(R11), DX
+ MOVL 16(AX), R10
+ ADDL R10, DX
+ MOVL R8, R11
+ MOVL $0xcc9e2d51, R8
+ IMULL R8, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, R8
+ IMULL R8, R11
+ XORL R11, BX
+ RORL $0x13, BX
+ LEAL (BX)(BX*4), R11
+ LEAL 3864292196(R11), BX
+ ADDL SI, BX
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, R10
+ MOVL R10, R11
+ ADDL DI, R11
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, SI
+ IMULL SI, R11
+ XORL R11, BP
+ RORL $0x13, BP
+ LEAL (BP)(BP*4), R11
+ LEAL 3864292196(R11), BP
+ ADDL R9, BP
+ ADDL BX, BP
+ ADDL BP, BX
+ PREFETCHT0 40(AX)
+ MOVL 20(AX), SI
+ ADDL SI, DX
+ MOVL 24(AX), DI
+ ADDL DI, BX
+ MOVL 28(AX), R8
+ ADDL R8, BP
+ MOVL 32(AX), R9
+ MOVL R9, R11
+ MOVL $0xcc9e2d51, R10
+ IMULL R10, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, R10
+ IMULL R10, R11
+ XORL R11, DX
+ RORL $0x13, DX
+ LEAL (DX)(DX*4), R11
+ LEAL 3864292196(R11), DX
+ MOVL 36(AX), R10
+ ADDL R10, DX
+ MOVL R8, R11
+ MOVL $0xcc9e2d51, R8
+ IMULL R8, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, R8
+ IMULL R8, R11
+ XORL R11, BX
+ RORL $0x13, BX
+ LEAL (BX)(BX*4), R11
+ LEAL 3864292196(R11), BX
+ ADDL SI, BX
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, R10
+ MOVL R10, R11
+ ADDL DI, R11
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, SI
+ IMULL SI, R11
+ XORL R11, BP
+ RORL $0x13, BP
+ LEAL (BP)(BP*4), R11
+ LEAL 3864292196(R11), BP
+ ADDL R9, BP
+ ADDL BX, BP
+ ADDL BP, BX
+ PREFETCHT0 60(AX)
+ MOVL 40(AX), SI
+ ADDL SI, DX
+ MOVL 44(AX), DI
+ ADDL DI, BX
+ MOVL 48(AX), R8
+ ADDL R8, BP
+ MOVL 52(AX), R9
+ MOVL R9, R11
+ MOVL $0xcc9e2d51, R10
+ IMULL R10, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, R10
+ IMULL R10, R11
+ XORL R11, DX
+ RORL $0x13, DX
+ LEAL (DX)(DX*4), R11
+ LEAL 3864292196(R11), DX
+ MOVL 56(AX), R10
+ ADDL R10, DX
+ MOVL R8, R11
+ MOVL $0xcc9e2d51, R8
+ IMULL R8, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, R8
+ IMULL R8, R11
+ XORL R11, BX
+ RORL $0x13, BX
+ LEAL (BX)(BX*4), R11
+ LEAL 3864292196(R11), BX
+ ADDL SI, BX
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, R10
+ MOVL R10, R11
+ ADDL DI, R11
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, SI
+ IMULL SI, R11
+ XORL R11, BP
+ RORL $0x13, BP
+ LEAL (BP)(BP*4), R11
+ LEAL 3864292196(R11), BP
+ ADDL R9, BP
+ ADDL BX, BP
+ ADDL BP, BX
+ PREFETCHT0 80(AX)
+ MOVL 60(AX), SI
+ ADDL SI, DX
+ MOVL 64(AX), DI
+ ADDL DI, BX
+ MOVL 68(AX), R8
+ ADDL R8, BP
+ MOVL 72(AX), R9
+ MOVL R9, R11
+ MOVL $0xcc9e2d51, R10
+ IMULL R10, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, R10
+ IMULL R10, R11
+ XORL R11, DX
+ RORL $0x13, DX
+ LEAL (DX)(DX*4), R11
+ LEAL 3864292196(R11), DX
+ MOVL 76(AX), R10
+ ADDL R10, DX
+ MOVL R8, R11
+ MOVL $0xcc9e2d51, R8
+ IMULL R8, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, R8
+ IMULL R8, R11
+ XORL R11, BX
+ RORL $0x13, BX
+ LEAL (BX)(BX*4), R11
+ LEAL 3864292196(R11), BX
+ ADDL SI, BX
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, R10
+ MOVL R10, R11
+ ADDL DI, R11
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, SI
+ IMULL SI, R11
+ XORL R11, BP
+ RORL $0x13, BP
+ LEAL (BP)(BP*4), R11
+ LEAL 3864292196(R11), BP
+ ADDL R9, BP
+ ADDL BX, BP
+ ADDL BP, BX
+ ADDQ $0x50, AX
+ SUBQ $0x50, CX
+ JMP loop80
+
+loop20:
+ CMPQ CX, $0x14
+ JLE after
+ MOVL (AX), SI
+ ADDL SI, DX
+ MOVL 4(AX), DI
+ ADDL DI, BX
+ MOVL 8(AX), R8
+ ADDL R8, BP
+ MOVL 12(AX), R9
+ MOVL R9, R11
+ MOVL $0xcc9e2d51, R10
+ IMULL R10, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, R10
+ IMULL R10, R11
+ XORL R11, DX
+ RORL $0x13, DX
+ LEAL (DX)(DX*4), R11
+ LEAL 3864292196(R11), DX
+ MOVL 16(AX), R10
+ ADDL R10, DX
+ MOVL R8, R11
+ MOVL $0xcc9e2d51, R8
+ IMULL R8, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, R8
+ IMULL R8, R11
+ XORL R11, BX
+ RORL $0x13, BX
+ LEAL (BX)(BX*4), R11
+ LEAL 3864292196(R11), BX
+ ADDL SI, BX
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, R10
+ MOVL R10, R11
+ ADDL DI, R11
+ MOVL $0xcc9e2d51, SI
+ IMULL SI, R11
+ RORL $0x11, R11
+ MOVL $0x1b873593, SI
+ IMULL SI, R11
+ XORL R11, BP
+ RORL $0x13, BP
+ LEAL (BP)(BP*4), R11
+ LEAL 3864292196(R11), BP
+ ADDL R9, BP
+ ADDL BX, BP
+ ADDL BP, BX
+ ADDQ $0x14, AX
+ SUBQ $0x14, CX
+ JMP loop20
+
+after:
+ MOVL $0xcc9e2d51, AX
+ RORL $0x0b, BX
+ IMULL AX, BX
+ RORL $0x11, BX
+ IMULL AX, BX
+ RORL $0x0b, BP
+ IMULL AX, BP
+ RORL $0x11, BP
+ IMULL AX, BP
+ ADDL BX, DX
+ RORL $0x13, DX
+ MOVL DX, CX
+ SHLL $0x02, CX
+ ADDL CX, DX
+ ADDL $0xe6546b64, DX
+ RORL $0x11, DX
+ IMULL AX, DX
+ ADDL BP, DX
+ RORL $0x13, DX
+ MOVL DX, CX
+ SHLL $0x02, CX
+ ADDL CX, DX
+ ADDL $0xe6546b64, DX
+ RORL $0x11, DX
+ IMULL AX, DX
+ MOVL DX, ret+24(FP)
+ RET
diff --git a/vendor/github.com/dgryski/go-farm/fp_generic.go b/vendor/github.com/dgryski/go-farm/fp_generic.go
new file mode 100644
index 000000000..2cfa1b9dc
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/fp_generic.go
@@ -0,0 +1,13 @@
+// +build !amd64 purego
+
+package farm
+
+// Fingerprint64 is a 64-bit fingerprint function for byte-slices
+func Fingerprint64(s []byte) uint64 {
+ return naHash64(s)
+}
+
+// Fingerprint32 is a 32-bit fingerprint function for byte-slices
+func Fingerprint32(s []byte) uint32 {
+ return Hash32(s)
+}
diff --git a/vendor/github.com/dgryski/go-farm/fp_stub.go b/vendor/github.com/dgryski/go-farm/fp_stub.go
new file mode 100644
index 000000000..94fff8de5
--- /dev/null
+++ b/vendor/github.com/dgryski/go-farm/fp_stub.go
@@ -0,0 +1,9 @@
+// Code generated by command: go run asm.go -out=fp_amd64.s -stubs=fp_stub.go. DO NOT EDIT.
+
+// +build amd64,!purego
+
+package farm
+
+func Fingerprint64(s []byte) uint64
+
+func Fingerprint32(s []byte) uint32
diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml
new file mode 100644
index 000000000..ba95cdd15
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/.travis.yml
@@ -0,0 +1,21 @@
+sudo: false
+language: go
+go:
+ - 1.3.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - master
+matrix:
+ allow_failures:
+ - go: master
+ fast_finish: true
+install:
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d -s .)
+ - go tool vet .
+ - go test -v -race ./...
diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE
new file mode 100644
index 000000000..8d9a94a90
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2005-2008 Dustin Sallings
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown
new file mode 100644
index 000000000..91b4ae564
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/README.markdown
@@ -0,0 +1,124 @@
+# Humane Units [](https://travis-ci.org/dustin/go-humanize) [](https://godoc.org/github.com/dustin/go-humanize)
+
+Just a few functions for helping humanize times and sizes.
+
+`go get` it as `github.com/dustin/go-humanize`, import it as
+`"github.com/dustin/go-humanize"`, use it as `humanize`.
+
+See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
+complete documentation.
+
+## Sizes
+
+This lets you take numbers like `82854982` and convert them to useful
+strings like, `83 MB` or `79 MiB` (whichever you prefer).
+
+Example:
+
+```go
+fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
+```
+
+## Times
+
+This lets you take a `time.Time` and spit it out in relative terms.
+For example, `12 seconds ago` or `3 days from now`.
+
+Example:
+
+```go
+fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
+```
+
+Thanks to Kyle Lemons for the time implementation from an IRC
+conversation one day. It's pretty neat.
+
+## Ordinals
+
+From a [mailing list discussion][odisc] where a user wanted to be able
+to label ordinals.
+
+ 0 -> 0th
+ 1 -> 1st
+ 2 -> 2nd
+ 3 -> 3rd
+ 4 -> 4th
+ [...]
+
+Example:
+
+```go
+fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
+```
+
+## Commas
+
+Want to shove commas into numbers? Be my guest.
+
+ 0 -> 0
+ 100 -> 100
+ 1000 -> 1,000
+ 1000000000 -> 1,000,000,000
+ -100000 -> -100,000
+
+Example:
+
+```go
+fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
+```
+
+## Ftoa
+
+Nicer float64 formatter that removes trailing zeros.
+
+```go
+fmt.Printf("%f", 2.24) // 2.240000
+fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
+fmt.Printf("%f", 2.0) // 2.000000
+fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
+```
+
+## SI notation
+
+Format numbers with [SI notation][sinotation].
+
+Example:
+
+```go
+humanize.SI(0.00000000223, "M") // 2.23 nM
+```
+
+## English-specific functions
+
+The following functions are in the `humanize/english` subpackage.
+
+### Plurals
+
+Simple English pluralization
+
+```go
+english.PluralWord(1, "object", "") // object
+english.PluralWord(42, "object", "") // objects
+english.PluralWord(2, "bus", "") // buses
+english.PluralWord(99, "locus", "loci") // loci
+
+english.Plural(1, "object", "") // 1 object
+english.Plural(42, "object", "") // 42 objects
+english.Plural(2, "bus", "") // 2 buses
+english.Plural(99, "locus", "loci") // 99 loci
+```
+
+### Word series
+
+Format comma-separated words lists with conjuctions:
+
+```go
+english.WordSeries([]string{"foo"}, "and") // foo
+english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
+english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
+
+english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
+```
+
+[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
+[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go
new file mode 100644
index 000000000..f49dc337d
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/big.go
@@ -0,0 +1,31 @@
+package humanize
+
+import (
+ "math/big"
+)
+
+// order of magnitude (to a max order)
+func oomm(n, b *big.Int, maxmag int) (float64, int) {
+ mag := 0
+ m := &big.Int{}
+ for n.Cmp(b) >= 0 {
+ n.DivMod(n, b, m)
+ mag++
+ if mag == maxmag && maxmag >= 0 {
+ break
+ }
+ }
+ return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
+
+// total order of magnitude
+// (same as above, but with no upper limit)
+func oom(n, b *big.Int) (float64, int) {
+ mag := 0
+ m := &big.Int{}
+ for n.Cmp(b) >= 0 {
+ n.DivMod(n, b, m)
+ mag++
+ }
+ return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
new file mode 100644
index 000000000..1a2bf6172
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bigbytes.go
@@ -0,0 +1,173 @@
+package humanize
+
+import (
+ "fmt"
+ "math/big"
+ "strings"
+ "unicode"
+)
+
+var (
+ bigIECExp = big.NewInt(1024)
+
+ // BigByte is one byte in bit.Ints
+ BigByte = big.NewInt(1)
+ // BigKiByte is 1,024 bytes in bit.Ints
+ BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
+ // BigMiByte is 1,024 k bytes in bit.Ints
+ BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
+ // BigGiByte is 1,024 m bytes in bit.Ints
+ BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
+ // BigTiByte is 1,024 g bytes in bit.Ints
+ BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
+ // BigPiByte is 1,024 t bytes in bit.Ints
+ BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
+ // BigEiByte is 1,024 p bytes in bit.Ints
+ BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
+ // BigZiByte is 1,024 e bytes in bit.Ints
+ BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
+ // BigYiByte is 1,024 z bytes in bit.Ints
+ BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
+)
+
+var (
+ bigSIExp = big.NewInt(1000)
+
+ // BigSIByte is one SI byte in big.Ints
+ BigSIByte = big.NewInt(1)
+ // BigKByte is 1,000 SI bytes in big.Ints
+ BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
+ // BigMByte is 1,000 SI k bytes in big.Ints
+ BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
+ // BigGByte is 1,000 SI m bytes in big.Ints
+ BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
+ // BigTByte is 1,000 SI g bytes in big.Ints
+ BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
+ // BigPByte is 1,000 SI t bytes in big.Ints
+ BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
+ // BigEByte is 1,000 SI p bytes in big.Ints
+ BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
+ // BigZByte is 1,000 SI e bytes in big.Ints
+ BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
+ // BigYByte is 1,000 SI z bytes in big.Ints
+ BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
+)
+
+var bigBytesSizeTable = map[string]*big.Int{
+ "b": BigByte,
+ "kib": BigKiByte,
+ "kb": BigKByte,
+ "mib": BigMiByte,
+ "mb": BigMByte,
+ "gib": BigGiByte,
+ "gb": BigGByte,
+ "tib": BigTiByte,
+ "tb": BigTByte,
+ "pib": BigPiByte,
+ "pb": BigPByte,
+ "eib": BigEiByte,
+ "eb": BigEByte,
+ "zib": BigZiByte,
+ "zb": BigZByte,
+ "yib": BigYiByte,
+ "yb": BigYByte,
+ // Without suffix
+ "": BigByte,
+ "ki": BigKiByte,
+ "k": BigKByte,
+ "mi": BigMiByte,
+ "m": BigMByte,
+ "gi": BigGiByte,
+ "g": BigGByte,
+ "ti": BigTiByte,
+ "t": BigTByte,
+ "pi": BigPiByte,
+ "p": BigPByte,
+ "ei": BigEiByte,
+ "e": BigEByte,
+ "z": BigZByte,
+ "zi": BigZiByte,
+ "y": BigYByte,
+ "yi": BigYiByte,
+}
+
+var ten = big.NewInt(10)
+
+func humanateBigBytes(s, base *big.Int, sizes []string) string {
+ if s.Cmp(ten) < 0 {
+ return fmt.Sprintf("%d B", s)
+ }
+ c := (&big.Int{}).Set(s)
+ val, mag := oomm(c, base, len(sizes)-1)
+ suffix := sizes[mag]
+ f := "%.0f %s"
+ if val < 10 {
+ f = "%.1f %s"
+ }
+
+ return fmt.Sprintf(f, val, suffix)
+
+}
+
+// BigBytes produces a human readable representation of an SI size.
+//
+// See also: ParseBigBytes.
+//
+// BigBytes(82854982) -> 83 MB
+func BigBytes(s *big.Int) string {
+ sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+ return humanateBigBytes(s, bigSIExp, sizes)
+}
+
+// BigIBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBigBytes.
+//
+// BigIBytes(82854982) -> 79 MiB
+func BigIBytes(s *big.Int) string {
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+ return humanateBigBytes(s, bigIECExp, sizes)
+}
+
+// ParseBigBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See also: BigBytes, BigIBytes.
+//
+// ParseBigBytes("42 MB") -> 42000000, nil
+// ParseBigBytes("42 mib") -> 44040192, nil
+func ParseBigBytes(s string) (*big.Int, error) {
+ lastDigit := 0
+ hasComma := false
+ for _, r := range s {
+ if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+ break
+ }
+ if r == ',' {
+ hasComma = true
+ }
+ lastDigit++
+ }
+
+ num := s[:lastDigit]
+ if hasComma {
+ num = strings.Replace(num, ",", "", -1)
+ }
+
+ val := &big.Rat{}
+ _, err := fmt.Sscanf(num, "%f", val)
+ if err != nil {
+ return nil, err
+ }
+
+ extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+ if m, ok := bigBytesSizeTable[extra]; ok {
+ mv := (&big.Rat{}).SetInt(m)
+ val.Mul(val, mv)
+ rv := &big.Int{}
+ rv.Div(val.Num(), val.Denom())
+ return rv, nil
+ }
+
+ return nil, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go
new file mode 100644
index 000000000..0b498f488
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bytes.go
@@ -0,0 +1,143 @@
+package humanize
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+// IEC Sizes.
+// kibis of bits
+const (
+ Byte = 1 << (iota * 10)
+ KiByte
+ MiByte
+ GiByte
+ TiByte
+ PiByte
+ EiByte
+)
+
+// SI Sizes.
+const (
+ IByte = 1
+ KByte = IByte * 1000
+ MByte = KByte * 1000
+ GByte = MByte * 1000
+ TByte = GByte * 1000
+ PByte = TByte * 1000
+ EByte = PByte * 1000
+)
+
+var bytesSizeTable = map[string]uint64{
+ "b": Byte,
+ "kib": KiByte,
+ "kb": KByte,
+ "mib": MiByte,
+ "mb": MByte,
+ "gib": GiByte,
+ "gb": GByte,
+ "tib": TiByte,
+ "tb": TByte,
+ "pib": PiByte,
+ "pb": PByte,
+ "eib": EiByte,
+ "eb": EByte,
+ // Without suffix
+ "": Byte,
+ "ki": KiByte,
+ "k": KByte,
+ "mi": MiByte,
+ "m": MByte,
+ "gi": GiByte,
+ "g": GByte,
+ "ti": TiByte,
+ "t": TByte,
+ "pi": PiByte,
+ "p": PByte,
+ "ei": EiByte,
+ "e": EByte,
+}
+
+func logn(n, b float64) float64 {
+ return math.Log(n) / math.Log(b)
+}
+
+func humanateBytes(s uint64, base float64, sizes []string) string {
+ if s < 10 {
+ return fmt.Sprintf("%d B", s)
+ }
+ e := math.Floor(logn(float64(s), base))
+ suffix := sizes[int(e)]
+ val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
+ f := "%.0f %s"
+ if val < 10 {
+ f = "%.1f %s"
+ }
+
+ return fmt.Sprintf(f, val, suffix)
+}
+
+// Bytes produces a human readable representation of an SI size.
+//
+// See also: ParseBytes.
+//
+// Bytes(82854982) -> 83 MB
+func Bytes(s uint64) string {
+ sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
+ return humanateBytes(s, 1000, sizes)
+}
+
+// IBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBytes.
+//
+// IBytes(82854982) -> 79 MiB
+func IBytes(s uint64) string {
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
+ return humanateBytes(s, 1024, sizes)
+}
+
+// ParseBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See Also: Bytes, IBytes.
+//
+// ParseBytes("42 MB") -> 42000000, nil
+// ParseBytes("42 mib") -> 44040192, nil
+func ParseBytes(s string) (uint64, error) {
+ lastDigit := 0
+ hasComma := false
+ for _, r := range s {
+ if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+ break
+ }
+ if r == ',' {
+ hasComma = true
+ }
+ lastDigit++
+ }
+
+ num := s[:lastDigit]
+ if hasComma {
+ num = strings.Replace(num, ",", "", -1)
+ }
+
+ f, err := strconv.ParseFloat(num, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+ if m, ok := bytesSizeTable[extra]; ok {
+ f *= float64(m)
+ if f >= math.MaxUint64 {
+ return 0, fmt.Errorf("too large: %v", s)
+ }
+ return uint64(f), nil
+ }
+
+ return 0, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go
new file mode 100644
index 000000000..520ae3e57
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/comma.go
@@ -0,0 +1,116 @@
+package humanize
+
+import (
+ "bytes"
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+)
+
+// Comma produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Comma(834142) -> 834,142
+func Comma(v int64) string {
+ sign := ""
+
+ // Min int64 can't be negated to a usable value, so it has to be special cased.
+ if v == math.MinInt64 {
+ return "-9,223,372,036,854,775,808"
+ }
+
+ if v < 0 {
+ sign = "-"
+ v = 0 - v
+ }
+
+ parts := []string{"", "", "", "", "", "", ""}
+ j := len(parts) - 1
+
+ for v > 999 {
+ parts[j] = strconv.FormatInt(v%1000, 10)
+ switch len(parts[j]) {
+ case 2:
+ parts[j] = "0" + parts[j]
+ case 1:
+ parts[j] = "00" + parts[j]
+ }
+ v = v / 1000
+ j--
+ }
+ parts[j] = strconv.Itoa(int(v))
+ return sign + strings.Join(parts[j:], ",")
+}
+
+// Commaf produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Commaf(834142.32) -> 834,142.32
+func Commaf(v float64) string {
+ buf := &bytes.Buffer{}
+ if v < 0 {
+ buf.Write([]byte{'-'})
+ v = 0 - v
+ }
+
+ comma := []byte{','}
+
+ parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
+ pos := 0
+ if len(parts[0])%3 != 0 {
+ pos += len(parts[0]) % 3
+ buf.WriteString(parts[0][:pos])
+ buf.Write(comma)
+ }
+ for ; pos < len(parts[0]); pos += 3 {
+ buf.WriteString(parts[0][pos : pos+3])
+ buf.Write(comma)
+ }
+ buf.Truncate(buf.Len() - 1)
+
+ if len(parts) > 1 {
+ buf.Write([]byte{'.'})
+ buf.WriteString(parts[1])
+ }
+ return buf.String()
+}
+
+// CommafWithDigits works like the Commaf but limits the resulting
+// string to the given number of decimal places.
+//
+// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
+func CommafWithDigits(f float64, decimals int) string {
+ return stripTrailingDigits(Commaf(f), decimals)
+}
+
+// BigComma produces a string form of the given big.Int in base 10
+// with commas after every three orders of magnitude.
+func BigComma(b *big.Int) string {
+ sign := ""
+ if b.Sign() < 0 {
+ sign = "-"
+ b.Abs(b)
+ }
+
+ athousand := big.NewInt(1000)
+ c := (&big.Int{}).Set(b)
+ _, m := oom(c, athousand)
+ parts := make([]string, m+1)
+ j := len(parts) - 1
+
+ mod := &big.Int{}
+ for b.Cmp(athousand) >= 0 {
+ b.DivMod(b, athousand, mod)
+ parts[j] = strconv.FormatInt(mod.Int64(), 10)
+ switch len(parts[j]) {
+ case 2:
+ parts[j] = "0" + parts[j]
+ case 1:
+ parts[j] = "00" + parts[j]
+ }
+ j--
+ }
+ parts[j] = strconv.Itoa(int(b.Int64()))
+ return sign + strings.Join(parts[j:], ",")
+}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
new file mode 100644
index 000000000..620690dec
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/commaf.go
@@ -0,0 +1,40 @@
+// +build go1.6
+
+package humanize
+
+import (
+ "bytes"
+ "math/big"
+ "strings"
+)
+
+// BigCommaf produces a string form of the given big.Float in base 10
+// with commas after every three orders of magnitude.
+func BigCommaf(v *big.Float) string {
+ buf := &bytes.Buffer{}
+ if v.Sign() < 0 {
+ buf.Write([]byte{'-'})
+ v.Abs(v)
+ }
+
+ comma := []byte{','}
+
+ parts := strings.Split(v.Text('f', -1), ".")
+ pos := 0
+ if len(parts[0])%3 != 0 {
+ pos += len(parts[0]) % 3
+ buf.WriteString(parts[0][:pos])
+ buf.Write(comma)
+ }
+ for ; pos < len(parts[0]); pos += 3 {
+ buf.WriteString(parts[0][pos : pos+3])
+ buf.Write(comma)
+ }
+ buf.Truncate(buf.Len() - 1)
+
+ if len(parts) > 1 {
+ buf.Write([]byte{'.'})
+ buf.WriteString(parts[1])
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
new file mode 100644
index 000000000..1c62b640d
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ftoa.go
@@ -0,0 +1,46 @@
+package humanize
+
+import (
+ "strconv"
+ "strings"
+)
+
+func stripTrailingZeros(s string) string {
+ offset := len(s) - 1
+ for offset > 0 {
+ if s[offset] == '.' {
+ offset--
+ break
+ }
+ if s[offset] != '0' {
+ break
+ }
+ offset--
+ }
+ return s[:offset+1]
+}
+
+func stripTrailingDigits(s string, digits int) string {
+ if i := strings.Index(s, "."); i >= 0 {
+ if digits <= 0 {
+ return s[:i]
+ }
+ i++
+ if i+digits >= len(s) {
+ return s
+ }
+ return s[:i+digits]
+ }
+ return s
+}
+
+// Ftoa converts a float to a string with no trailing zeros.
+func Ftoa(num float64) string {
+ return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
+}
+
+// FtoaWithDigits converts a float to a string but limits the resulting string
+// to the given number of decimal places, and no trailing zeros.
+func FtoaWithDigits(num float64, digits int) string {
+ return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
+}
diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go
new file mode 100644
index 000000000..a2c2da31e
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/humanize.go
@@ -0,0 +1,8 @@
+/*
+Package humanize converts boring ugly numbers to human-friendly strings and back.
+
+Durations can be turned into strings such as "3 days ago", numbers
+representing sizes like 82854982 into useful strings like, "83 MB" or
+"79 MiB" (whichever you prefer).
+*/
+package humanize
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
new file mode 100644
index 000000000..dec618659
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/number.go
@@ -0,0 +1,192 @@
+package humanize
+
+/*
+Slightly adapted from the source to fit go-humanize.
+
+Author: https://github.com/gorhill
+Source: https://gist.github.com/gorhill/5285193
+
+*/
+
+import (
+ "math"
+ "strconv"
+)
+
+var (
+ renderFloatPrecisionMultipliers = [...]float64{
+ 1,
+ 10,
+ 100,
+ 1000,
+ 10000,
+ 100000,
+ 1000000,
+ 10000000,
+ 100000000,
+ 1000000000,
+ }
+
+ renderFloatPrecisionRounders = [...]float64{
+ 0.5,
+ 0.05,
+ 0.005,
+ 0.0005,
+ 0.00005,
+ 0.000005,
+ 0.0000005,
+ 0.00000005,
+ 0.000000005,
+ 0.0000000005,
+ }
+)
+
+// FormatFloat produces a formatted number as string based on the following user-specified criteria:
+// * thousands separator
+// * decimal separator
+// * decimal precision
+//
+// Usage: s := RenderFloat(format, n)
+// The format parameter tells how to render the number n.
+//
+// See examples: http://play.golang.org/p/LXc1Ddm1lJ
+//
+// Examples of format strings, given n = 12345.6789:
+// "#,###.##" => "12,345.67"
+// "#,###." => "12,345"
+// "#,###" => "12345,678"
+// "#\u202F###,##" => "12 345,68"
+// "#.###,###### => 12.345,678900
+// "" (aka default format) => 12,345.67
+//
+// The highest precision allowed is 9 digits after the decimal symbol.
+// There is also a version for integer number, FormatInteger(),
+// which is convenient for calls within template.
+func FormatFloat(format string, n float64) string {
+ // Special cases:
+ // NaN = "NaN"
+ // +Inf = "+Infinity"
+ // -Inf = "-Infinity"
+ if math.IsNaN(n) {
+ return "NaN"
+ }
+ if n > math.MaxFloat64 {
+ return "Infinity"
+ }
+ if n < -math.MaxFloat64 {
+ return "-Infinity"
+ }
+
+ // default format
+ precision := 2
+ decimalStr := "."
+ thousandStr := ","
+ positiveStr := ""
+ negativeStr := "-"
+
+ if len(format) > 0 {
+ format := []rune(format)
+
+ // If there is an explicit format directive,
+ // then default values are these:
+ precision = 9
+ thousandStr = ""
+
+ // collect indices of meaningful formatting directives
+ formatIndx := []int{}
+ for i, char := range format {
+ if char != '#' && char != '0' {
+ formatIndx = append(formatIndx, i)
+ }
+ }
+
+ if len(formatIndx) > 0 {
+ // Directive at index 0:
+ // Must be a '+'
+ // Raise an error if not the case
+ // index: 0123456789
+ // +0.000,000
+ // +000,000.0
+ // +0000.00
+ // +0000
+ if formatIndx[0] == 0 {
+ if format[formatIndx[0]] != '+' {
+ panic("RenderFloat(): invalid positive sign directive")
+ }
+ positiveStr = "+"
+ formatIndx = formatIndx[1:]
+ }
+
+ // Two directives:
+ // First is thousands separator
+ // Raise an error if not followed by 3-digit
+ // 0123456789
+ // 0.000,000
+ // 000,000.00
+ if len(formatIndx) == 2 {
+ if (formatIndx[1] - formatIndx[0]) != 4 {
+ panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
+ }
+ thousandStr = string(format[formatIndx[0]])
+ formatIndx = formatIndx[1:]
+ }
+
+ // One directive:
+ // Directive is decimal separator
+ // The number of digit-specifier following the separator indicates wanted precision
+ // 0123456789
+ // 0.00
+ // 000,0000
+ if len(formatIndx) == 1 {
+ decimalStr = string(format[formatIndx[0]])
+ precision = len(format) - formatIndx[0] - 1
+ }
+ }
+ }
+
+ // generate sign part
+ var signStr string
+ if n >= 0.000000001 {
+ signStr = positiveStr
+ } else if n <= -0.000000001 {
+ signStr = negativeStr
+ n = -n
+ } else {
+ signStr = ""
+ n = 0.0
+ }
+
+ // split number into integer and fractional parts
+ intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
+
+ // generate integer part string
+ intStr := strconv.FormatInt(int64(intf), 10)
+
+ // add thousand separator if required
+ if len(thousandStr) > 0 {
+ for i := len(intStr); i > 3; {
+ i -= 3
+ intStr = intStr[:i] + thousandStr + intStr[i:]
+ }
+ }
+
+ // no fractional part, we can leave now
+ if precision == 0 {
+ return signStr + intStr
+ }
+
+ // generate fractional part
+ fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
+ // may need padding
+ if len(fracStr) < precision {
+ fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
+ }
+
+ return signStr + intStr + decimalStr + fracStr
+}
+
+// FormatInteger produces a formatted number as string.
+// See FormatFloat.
+func FormatInteger(format string, n int) string {
+ return FormatFloat(format, float64(n))
+}
diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go
new file mode 100644
index 000000000..43d88a861
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ordinals.go
@@ -0,0 +1,25 @@
+package humanize
+
+import "strconv"
+
+// Ordinal gives you the input number in a rank/ordinal format.
+//
+// Ordinal(3) -> 3rd
+func Ordinal(x int) string {
+ suffix := "th"
+ switch x % 10 {
+ case 1:
+ if x%100 != 11 {
+ suffix = "st"
+ }
+ case 2:
+ if x%100 != 12 {
+ suffix = "nd"
+ }
+ case 3:
+ if x%100 != 13 {
+ suffix = "rd"
+ }
+ }
+ return strconv.Itoa(x) + suffix
+}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
new file mode 100644
index 000000000..ae659e0e4
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/si.go
@@ -0,0 +1,123 @@
+package humanize
+
+import (
+ "errors"
+ "math"
+ "regexp"
+ "strconv"
+)
+
+var siPrefixTable = map[float64]string{
+ -24: "y", // yocto
+ -21: "z", // zepto
+ -18: "a", // atto
+ -15: "f", // femto
+ -12: "p", // pico
+ -9: "n", // nano
+ -6: "µ", // micro
+ -3: "m", // milli
+ 0: "",
+ 3: "k", // kilo
+ 6: "M", // mega
+ 9: "G", // giga
+ 12: "T", // tera
+ 15: "P", // peta
+ 18: "E", // exa
+ 21: "Z", // zetta
+ 24: "Y", // yotta
+}
+
+var revSIPrefixTable = revfmap(siPrefixTable)
+
+// revfmap reverses the map and precomputes the power multiplier
+func revfmap(in map[float64]string) map[string]float64 {
+ rv := map[string]float64{}
+ for k, v := range in {
+ rv[v] = math.Pow(10, k)
+ }
+ return rv
+}
+
+var riParseRegex *regexp.Regexp
+
+func init() {
+ ri := `^([\-0-9.]+)\s?([`
+ for _, v := range siPrefixTable {
+ ri += v
+ }
+ ri += `]?)(.*)`
+
+ riParseRegex = regexp.MustCompile(ri)
+}
+
+// ComputeSI finds the most appropriate SI prefix for the given number
+// and returns the prefix along with the value adjusted to be within
+// that prefix.
+//
+// See also: SI, ParseSI.
+//
+// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
+func ComputeSI(input float64) (float64, string) {
+ if input == 0 {
+ return 0, ""
+ }
+ mag := math.Abs(input)
+ exponent := math.Floor(logn(mag, 10))
+ exponent = math.Floor(exponent/3) * 3
+
+ value := mag / math.Pow(10, exponent)
+
+ // Handle special case where value is exactly 1000.0
+ // Should return 1 M instead of 1000 k
+ if value == 1000.0 {
+ exponent += 3
+ value = mag / math.Pow(10, exponent)
+ }
+
+ value = math.Copysign(value, input)
+
+ prefix := siPrefixTable[exponent]
+ return value, prefix
+}
+
+// SI returns a string with default formatting.
+//
+// SI uses Ftoa to format float value, removing trailing zeros.
+//
+// See also: ComputeSI, ParseSI.
+//
+// e.g. SI(1000000, "B") -> 1 MB
+// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
+func SI(input float64, unit string) string {
+ value, prefix := ComputeSI(input)
+ return Ftoa(value) + " " + prefix + unit
+}
+
+// SIWithDigits works like SI but limits the resulting string to the
+// given number of decimal places.
+//
+// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
+// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
+func SIWithDigits(input float64, decimals int, unit string) string {
+ value, prefix := ComputeSI(input)
+ return FtoaWithDigits(value, decimals) + " " + prefix + unit
+}
+
+var errInvalid = errors.New("invalid input")
+
+// ParseSI parses an SI string back into the number and unit.
+//
+// See also: SI, ComputeSI.
+//
+// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
+func ParseSI(input string) (float64, string, error) {
+ found := riParseRegex.FindStringSubmatch(input)
+ if len(found) != 4 {
+ return 0, "", errInvalid
+ }
+ mag := revSIPrefixTable[found[2]]
+ unit := found[3]
+
+ base, err := strconv.ParseFloat(found[1], 64)
+ return base * mag, unit, err
+}
diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go
new file mode 100644
index 000000000..dd3fbf5ef
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/times.go
@@ -0,0 +1,117 @@
+package humanize
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "time"
+)
+
+// Seconds-based time units
+const (
+ Day = 24 * time.Hour
+ Week = 7 * Day
+ Month = 30 * Day
+ Year = 12 * Month
+ LongTime = 37 * Year
+)
+
+// Time formats a time into a relative string.
+//
+// Time(someT) -> "3 weeks ago"
+func Time(then time.Time) string {
+ return RelTime(then, time.Now(), "ago", "from now")
+}
+
+// A RelTimeMagnitude struct contains a relative time point at which
+// the relative format of time will switch to a new format string. A
+// slice of these in ascending order by their "D" field is passed to
+// CustomRelTime to format durations.
+//
+// The Format field is a string that may contain a "%s" which will be
+// replaced with the appropriate signed label (e.g. "ago" or "from
+// now") and a "%d" that will be replaced by the quantity.
+//
+// The DivBy field is the amount of time the time difference must be
+// divided by in order to display correctly.
+//
+// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
+// DivBy should be time.Minute so whatever the duration is will be
+// expressed in minutes.
+type RelTimeMagnitude struct {
+ D time.Duration
+ Format string
+ DivBy time.Duration
+}
+
+var defaultMagnitudes = []RelTimeMagnitude{
+ {time.Second, "now", time.Second},
+ {2 * time.Second, "1 second %s", 1},
+ {time.Minute, "%d seconds %s", time.Second},
+ {2 * time.Minute, "1 minute %s", 1},
+ {time.Hour, "%d minutes %s", time.Minute},
+ {2 * time.Hour, "1 hour %s", 1},
+ {Day, "%d hours %s", time.Hour},
+ {2 * Day, "1 day %s", 1},
+ {Week, "%d days %s", Day},
+ {2 * Week, "1 week %s", 1},
+ {Month, "%d weeks %s", Week},
+ {2 * Month, "1 month %s", 1},
+ {Year, "%d months %s", Month},
+ {18 * Month, "1 year %s", 1},
+ {2 * Year, "2 years %s", 1},
+ {LongTime, "%d years %s", Year},
+ {math.MaxInt64, "a long while %s", 1},
+}
+
+// RelTime formats a time into a relative string.
+//
+// It takes two times and two labels. In addition to the generic time
+// delta string (e.g. 5 minutes), the labels are used applied so that
+// the label corresponding to the smaller time is applied.
+//
+// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
+func RelTime(a, b time.Time, albl, blbl string) string {
+ return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
+}
+
+// CustomRelTime formats a time into a relative string.
+//
+// It takes two times two labels and a table of relative time formats.
+// In addition to the generic time delta string (e.g. 5 minutes), the
+// labels are used applied so that the label corresponding to the
+// smaller time is applied.
+func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
+ lbl := albl
+ diff := b.Sub(a)
+
+ if a.After(b) {
+ lbl = blbl
+ diff = a.Sub(b)
+ }
+
+ n := sort.Search(len(magnitudes), func(i int) bool {
+ return magnitudes[i].D > diff
+ })
+
+ if n >= len(magnitudes) {
+ n = len(magnitudes) - 1
+ }
+ mag := magnitudes[n]
+ args := []interface{}{}
+ escaped := false
+ for _, ch := range mag.Format {
+ if escaped {
+ switch ch {
+ case 's':
+ args = append(args, lbl)
+ case 'd':
+ args = append(args, diff/mag.DivBy)
+ }
+ escaped = false
+ } else {
+ escaped = ch == '%'
+ }
+ }
+ return fmt.Sprintf(mag.Format, args...)
+}
diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE
new file mode 100644
index 000000000..9d83342ac
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Peter Bourgon
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md
new file mode 100644
index 000000000..a201a3d92
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/log/README.md
@@ -0,0 +1,151 @@
+# package log
+
+`package log` provides a minimal interface for structured logging in services.
+It may be wrapped to encode conventions, enforce type-safety, provide leveled
+logging, and so on. It can be used for both typical application log events,
+and log-structured data streams.
+
+## Structured logging
+
+Structured logging is, basically, conceding to the reality that logs are
+_data_, and warrant some level of schematic rigor. Using a stricter,
+key/value-oriented message format for our logs, containing contextual and
+semantic information, makes it much easier to get insight into the
+operational activity of the systems we build. Consequently, `package log` is
+of the strong belief that "[the benefits of structured logging outweigh the
+minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
+
+Migrating from unstructured to structured logging is probably a lot easier
+than you'd expect.
+
+```go
+// Unstructured
+log.Printf("HTTP server listening on %s", addr)
+
+// Structured
+logger.Log("transport", "HTTP", "addr", addr, "msg", "listening")
+```
+
+## Usage
+
+### Typical application logging
+
+```go
+w := log.NewSyncWriter(os.Stderr)
+logger := log.NewLogfmtLogger(w)
+logger.Log("question", "what is the meaning of life?", "answer", 42)
+
+// Output:
+// question="what is the meaning of life?" answer=42
+```
+
+### Contextual Loggers
+
+```go
+func main() {
+ var logger log.Logger
+ logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
+ logger = log.With(logger, "instance_id", 123)
+
+ logger.Log("msg", "starting")
+ NewWorker(log.With(logger, "component", "worker")).Run()
+ NewSlacker(log.With(logger, "component", "slacker")).Run()
+}
+
+// Output:
+// instance_id=123 msg=starting
+// instance_id=123 component=worker msg=running
+// instance_id=123 component=slacker msg=running
+```
+
+### Interact with stdlib logger
+
+Redirect stdlib logger to Go kit logger.
+
+```go
+import (
+ "os"
+ stdlog "log"
+ kitlog "github.com/go-kit/kit/log"
+)
+
+func main() {
+ logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))
+ stdlog.SetOutput(kitlog.NewStdlibAdapter(logger))
+ stdlog.Print("I sure like pie")
+}
+
+// Output:
+// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
+```
+
+Or, if, for legacy reasons, you need to pipe all of your logging through the
+stdlib log package, you can redirect Go kit logger to the stdlib logger.
+
+```go
+logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
+logger.Log("legacy", true, "msg", "at least it's something")
+
+// Output:
+// 2016/01/01 12:34:56 legacy=true msg="at least it's something"
+```
+
+### Timestamps and callers
+
+```go
+var logger log.Logger
+logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
+logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
+
+logger.Log("msg", "hello")
+
+// Output:
+// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello
+```
+
+## Levels
+
+Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level).
+
+## Supported output formats
+
+- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
+- JSON
+
+## Enhancements
+
+`package log` is centered on the one-method Logger interface.
+
+```go
+type Logger interface {
+ Log(keyvals ...interface{}) error
+}
+```
+
+This interface, and its supporting code like is the product of much iteration
+and evaluation. For more details on the evolution of the Logger interface,
+see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
+a talk by [Chris Hines](https://github.com/ChrisHines).
+Also, please see
+[#63](https://github.com/go-kit/kit/issues/63),
+[#76](https://github.com/go-kit/kit/pull/76),
+[#131](https://github.com/go-kit/kit/issues/131),
+[#157](https://github.com/go-kit/kit/pull/157),
+[#164](https://github.com/go-kit/kit/issues/164), and
+[#252](https://github.com/go-kit/kit/pull/252)
+to review historical conversations about package log and the Logger interface.
+
+Value-add packages and suggestions,
+like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level),
+are of course welcome. Good proposals should
+
+- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With),
+- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and
+- Be friendly to packages that accept only an unadorned log.Logger.
+
+## Benchmarks & comparisons
+
+There are a few Go logging benchmarks and comparisons that include Go kit's package log.
+
+- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log
+- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log
diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go
new file mode 100644
index 000000000..918c0af46
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/log/doc.go
@@ -0,0 +1,116 @@
+// Package log provides a structured logger.
+//
+// Structured logging produces logs easily consumed later by humans or
+// machines. Humans might be interested in debugging errors, or tracing
+// specific requests. Machines might be interested in counting interesting
+// events, or aggregating information for off-line processing. In both cases,
+// it is important that the log messages are structured and actionable.
+// Package log is designed to encourage both of these best practices.
+//
+// Basic Usage
+//
+// The fundamental interface is Logger. Loggers create log events from
+// key/value data. The Logger interface has a single method, Log, which
+// accepts a sequence of alternating key/value pairs, which this package names
+// keyvals.
+//
+// type Logger interface {
+// Log(keyvals ...interface{}) error
+// }
+//
+// Here is an example of a function using a Logger to create log events.
+//
+// func RunTask(task Task, logger log.Logger) string {
+// logger.Log("taskID", task.ID, "event", "starting task")
+// ...
+// logger.Log("taskID", task.ID, "event", "task complete")
+// }
+//
+// The keys in the above example are "taskID" and "event". The values are
+// task.ID, "starting task", and "task complete". Every key is followed
+// immediately by its value.
+//
+// Keys are usually plain strings. Values may be any type that has a sensible
+// encoding in the chosen log format. With structured logging it is a good
+// idea to log simple values without formatting them. This practice allows
+// the chosen logger to encode values in the most appropriate way.
+//
+// Contextual Loggers
+//
+// A contextual logger stores keyvals that it includes in all log events.
+// Building appropriate contextual loggers reduces repetition and aids
+// consistency in the resulting log output. With and WithPrefix add context to
+// a logger. We can use With to improve the RunTask example.
+//
+// func RunTask(task Task, logger log.Logger) string {
+// logger = log.With(logger, "taskID", task.ID)
+// logger.Log("event", "starting task")
+// ...
+// taskHelper(task.Cmd, logger)
+// ...
+// logger.Log("event", "task complete")
+// }
+//
+// The improved version emits the same log events as the original for the
+// first and last calls to Log. Passing the contextual logger to taskHelper
+// enables each log event created by taskHelper to include the task.ID even
+// though taskHelper does not have access to that value. Using contextual
+// loggers this way simplifies producing log output that enables tracing the
+// life cycle of individual tasks. (See the Contextual example for the full
+// code of the above snippet.)
+//
+// Dynamic Contextual Values
+//
+// A Valuer function stored in a contextual logger generates a new value each
+// time an event is logged. The Valuer example demonstrates how this feature
+// works.
+//
+// Valuers provide the basis for consistently logging timestamps and source
+// code location. The log package defines several valuers for that purpose.
+// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
+// DefaultCaller. A common logger initialization sequence that ensures all log
+// entries contain a timestamp and source location looks like this:
+//
+// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
+// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
+//
+// Concurrent Safety
+//
+// Applications with multiple goroutines want each log event written to the
+// same logger to remain separate from other log events. Package log provides
+// two simple solutions for concurrent safe logging.
+//
+// NewSyncWriter wraps an io.Writer and serializes each call to its Write
+// method. Using a SyncWriter has the benefit that the smallest practical
+// portion of the logging logic is performed within a mutex, but it requires
+// the formatting Logger to make only one call to Write per log event.
+//
+// NewSyncLogger wraps any Logger and serializes each call to its Log method.
+// Using a SyncLogger has the benefit that it guarantees each log event is
+// handled atomically within the wrapped logger, but it typically serializes
+// both the formatting and output logic. Use a SyncLogger if the formatting
+// logger may perform multiple writes per log event.
+//
+// Error Handling
+//
+// This package relies on the practice of wrapping or decorating loggers with
+// other loggers to provide composable pieces of functionality. It also means
+// that Logger.Log must return an error because some
+// implementations—especially those that output log data to an io.Writer—may
+// encounter errors that cannot be handled locally. This in turn means that
+// Loggers that wrap other loggers should return errors from the wrapped
+// logger up the stack.
+//
+// Fortunately, the decorator pattern also provides a way to avoid the
+// necessity to check for errors every time an application calls Logger.Log.
+// An application required to panic whenever its Logger encounters
+// an error could initialize its logger as follows.
+//
+// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
+// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
+// if err := fmtlogger.Log(keyvals...); err != nil {
+// panic(err)
+// }
+// return nil
+// })
+package log
diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go
new file mode 100644
index 000000000..66094b4dd
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/log/json_logger.go
@@ -0,0 +1,89 @@
+package log
+
+import (
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+)
+
+type jsonLogger struct {
+ io.Writer
+}
+
+// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
+// single JSON object. Each log event produces no more than one call to
+// w.Write. The passed Writer must be safe for concurrent use by multiple
+// goroutines if the returned Logger will be used concurrently.
+func NewJSONLogger(w io.Writer) Logger {
+ return &jsonLogger{w}
+}
+
+func (l *jsonLogger) Log(keyvals ...interface{}) error {
+ n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd
+ m := make(map[string]interface{}, n)
+ for i := 0; i < len(keyvals); i += 2 {
+ k := keyvals[i]
+ var v interface{} = ErrMissingValue
+ if i+1 < len(keyvals) {
+ v = keyvals[i+1]
+ }
+ merge(m, k, v)
+ }
+ return json.NewEncoder(l.Writer).Encode(m)
+}
+
+func merge(dst map[string]interface{}, k, v interface{}) {
+ var key string
+ switch x := k.(type) {
+ case string:
+ key = x
+ case fmt.Stringer:
+ key = safeString(x)
+ default:
+ key = fmt.Sprint(x)
+ }
+
+ // We want json.Marshaler and encoding.TextMarshaller to take priority over
+ // err.Error() and v.String(). But json.Marshall (called later) does that by
+ // default so we force a no-op if it's one of those 2 case.
+ switch x := v.(type) {
+ case json.Marshaler:
+ case encoding.TextMarshaler:
+ case error:
+ v = safeError(x)
+ case fmt.Stringer:
+ v = safeString(x)
+ }
+
+ dst[key] = v
+}
+
+func safeString(str fmt.Stringer) (s string) {
+ defer func() {
+ if panicVal := recover(); panicVal != nil {
+ if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
+ s = "NULL"
+ } else {
+ panic(panicVal)
+ }
+ }
+ }()
+ s = str.String()
+ return
+}
+
+func safeError(err error) (s interface{}) {
+ defer func() {
+ if panicVal := recover(); panicVal != nil {
+ if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
+ s = nil
+ } else {
+ panic(panicVal)
+ }
+ }
+ }()
+ s = err.Error()
+ return
+}
diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go
new file mode 100644
index 000000000..505d307b1
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/log/level/doc.go
@@ -0,0 +1,22 @@
+// Package level implements leveled logging on top of Go kit's log package. To
+// use the level package, create a logger as per normal in your func main, and
+// wrap it with level.NewFilter.
+//
+// var logger log.Logger
+// logger = log.NewLogfmtLogger(os.Stderr)
+// logger = level.NewFilter(logger, level.AllowInfo()) // <--
+// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
+//
+// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
+// helper methods to emit leveled log events.
+//
+// logger.Log("foo", "bar") // as normal, no level
+// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get())
+// if value > 100 {
+// level.Error(logger).Log("value", value)
+// }
+//
+// NewFilter allows precise control over what happens when a log event is
+// emitted without a level key, or if a squelched level is used. Check the
+// Option functions for details.
+package level
diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go
new file mode 100644
index 000000000..fceafc454
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/log/level/level.go
@@ -0,0 +1,205 @@
+package level
+
+import "github.com/go-kit/kit/log"
+
+// Error returns a logger that includes a Key/ErrorValue pair.
+func Error(logger log.Logger) log.Logger {
+ return log.WithPrefix(logger, Key(), ErrorValue())
+}
+
+// Warn returns a logger that includes a Key/WarnValue pair.
+func Warn(logger log.Logger) log.Logger {
+ return log.WithPrefix(logger, Key(), WarnValue())
+}
+
+// Info returns a logger that includes a Key/InfoValue pair.
+func Info(logger log.Logger) log.Logger {
+ return log.WithPrefix(logger, Key(), InfoValue())
+}
+
+// Debug returns a logger that includes a Key/DebugValue pair.
+func Debug(logger log.Logger) log.Logger {
+ return log.WithPrefix(logger, Key(), DebugValue())
+}
+
+// NewFilter wraps next and implements level filtering. See the commentary on
+// the Option functions for a detailed description of how to configure levels.
+// If no options are provided, all leveled log events created with Debug,
+// Info, Warn or Error helper methods are squelched and non-leveled log
+// events are passed to next unmodified.
+func NewFilter(next log.Logger, options ...Option) log.Logger {
+ l := &logger{
+ next: next,
+ }
+ for _, option := range options {
+ option(l)
+ }
+ return l
+}
+
+type logger struct {
+ next log.Logger
+ allowed level
+ squelchNoLevel bool
+ errNotAllowed error
+ errNoLevel error
+}
+
+func (l *logger) Log(keyvals ...interface{}) error {
+ var hasLevel, levelAllowed bool
+ for i := 1; i < len(keyvals); i += 2 {
+ if v, ok := keyvals[i].(*levelValue); ok {
+ hasLevel = true
+ levelAllowed = l.allowed&v.level != 0
+ break
+ }
+ }
+ if !hasLevel && l.squelchNoLevel {
+ return l.errNoLevel
+ }
+ if hasLevel && !levelAllowed {
+ return l.errNotAllowed
+ }
+ return l.next.Log(keyvals...)
+}
+
+// Option sets a parameter for the leveled logger.
+type Option func(*logger)
+
+// AllowAll is an alias for AllowDebug.
+func AllowAll() Option {
+ return AllowDebug()
+}
+
+// AllowDebug allows error, warn, info and debug level log events to pass.
+func AllowDebug() Option {
+ return allowed(levelError | levelWarn | levelInfo | levelDebug)
+}
+
+// AllowInfo allows error, warn and info level log events to pass.
+func AllowInfo() Option {
+ return allowed(levelError | levelWarn | levelInfo)
+}
+
+// AllowWarn allows error and warn level log events to pass.
+func AllowWarn() Option {
+ return allowed(levelError | levelWarn)
+}
+
+// AllowError allows only error level log events to pass.
+func AllowError() Option {
+ return allowed(levelError)
+}
+
+// AllowNone allows no leveled log events to pass.
+func AllowNone() Option {
+ return allowed(0)
+}
+
+func allowed(allowed level) Option {
+ return func(l *logger) { l.allowed = allowed }
+}
+
+// ErrNotAllowed sets the error to return from Log when it squelches a log
+// event disallowed by the configured Allow[Level] option. By default,
+// ErrNotAllowed is nil; in this case the log event is squelched with no
+// error.
+func ErrNotAllowed(err error) Option {
+ return func(l *logger) { l.errNotAllowed = err }
+}
+
+// SquelchNoLevel instructs Log to squelch log events with no level, so that
+// they don't proceed through to the wrapped logger. If SquelchNoLevel is set
+// to true and a log event is squelched in this way, the error value
+// configured with ErrNoLevel is returned to the caller.
+func SquelchNoLevel(squelch bool) Option {
+ return func(l *logger) { l.squelchNoLevel = squelch }
+}
+
+// ErrNoLevel sets the error to return from Log when it squelches a log event
+// with no level. By default, ErrNoLevel is nil; in this case the log event is
+// squelched with no error.
+func ErrNoLevel(err error) Option {
+ return func(l *logger) { l.errNoLevel = err }
+}
+
+// NewInjector wraps next and returns a logger that adds a Key/level pair to
+// the beginning of log events that don't already contain a level. In effect,
+// this gives a default level to logs without a level.
+func NewInjector(next log.Logger, level Value) log.Logger {
+ return &injector{
+ next: next,
+ level: level,
+ }
+}
+
+type injector struct {
+ next log.Logger
+ level interface{}
+}
+
+func (l *injector) Log(keyvals ...interface{}) error {
+ for i := 1; i < len(keyvals); i += 2 {
+ if _, ok := keyvals[i].(*levelValue); ok {
+ return l.next.Log(keyvals...)
+ }
+ }
+ kvs := make([]interface{}, len(keyvals)+2)
+ kvs[0], kvs[1] = key, l.level
+ copy(kvs[2:], keyvals)
+ return l.next.Log(kvs...)
+}
+
+// Value is the interface that each of the canonical level values implement.
+// It contains unexported methods that prevent types from other packages from
+// implementing it and guaranteeing that NewFilter can distinguish the levels
+// defined in this package from all other values.
+type Value interface {
+ String() string
+ levelVal()
+}
+
+// Key returns the unique key added to log events by the loggers in this
+// package.
+func Key() interface{} { return key }
+
+// ErrorValue returns the unique value added to log events by Error.
+func ErrorValue() Value { return errorValue }
+
+// WarnValue returns the unique value added to log events by Warn.
+func WarnValue() Value { return warnValue }
+
+// InfoValue returns the unique value added to log events by Info.
+func InfoValue() Value { return infoValue }
+
+// DebugValue returns the unique value added to log events by Warn.
+func DebugValue() Value { return debugValue }
+
+var (
+ // key is of type interface{} so that it allocates once during package
+ // initialization and avoids allocating every time the value is added to a
+ // []interface{} later.
+ key interface{} = "level"
+
+ errorValue = &levelValue{level: levelError, name: "error"}
+ warnValue = &levelValue{level: levelWarn, name: "warn"}
+ infoValue = &levelValue{level: levelInfo, name: "info"}
+ debugValue = &levelValue{level: levelDebug, name: "debug"}
+)
+
+type level byte
+
+const (
+ levelDebug level = 1 << iota
+ levelInfo
+ levelWarn
+ levelError
+)
+
+type levelValue struct {
+ name string
+ level
+}
+
+func (v *levelValue) String() string { return v.name }
+func (v *levelValue) levelVal() {}
diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go
new file mode 100644
index 000000000..66a9e2fde
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/log/log.go
@@ -0,0 +1,135 @@
+package log
+
+import "errors"
+
+// Logger is the fundamental interface for all log operations. Log creates a
+// log event from keyvals, a variadic sequence of alternating keys and values.
+// Implementations must be safe for concurrent use by multiple goroutines. In
+// particular, any implementation of Logger that appends to keyvals or
+// modifies or retains any of its elements must make a copy first.
+type Logger interface {
+ Log(keyvals ...interface{}) error
+}
+
+// ErrMissingValue is appended to keyvals slices with odd length to substitute
+// the missing value.
+var ErrMissingValue = errors.New("(MISSING)")
+
+// With returns a new contextual logger with keyvals prepended to those passed
+// to calls to Log. If logger is also a contextual logger created by With or
+// WithPrefix, keyvals is appended to the existing context.
+//
+// The returned Logger replaces all value elements (odd indexes) containing a
+// Valuer with their generated value for each call to its Log method.
+func With(logger Logger, keyvals ...interface{}) Logger {
+ if len(keyvals) == 0 {
+ return logger
+ }
+ l := newContext(logger)
+ kvs := append(l.keyvals, keyvals...)
+ if len(kvs)%2 != 0 {
+ kvs = append(kvs, ErrMissingValue)
+ }
+ return &context{
+ logger: l.logger,
+ // Limiting the capacity of the stored keyvals ensures that a new
+ // backing array is created if the slice must grow in Log or With.
+ // Using the extra capacity without copying risks a data race that
+ // would violate the Logger interface contract.
+ keyvals: kvs[:len(kvs):len(kvs)],
+ hasValuer: l.hasValuer || containsValuer(keyvals),
+ }
+}
+
+// WithPrefix returns a new contextual logger with keyvals prepended to those
+// passed to calls to Log. If logger is also a contextual logger created by
+// With or WithPrefix, keyvals is prepended to the existing context.
+//
+// The returned Logger replaces all value elements (odd indexes) containing a
+// Valuer with their generated value for each call to its Log method.
+func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
+ if len(keyvals) == 0 {
+ return logger
+ }
+ l := newContext(logger)
+ // Limiting the capacity of the stored keyvals ensures that a new
+ // backing array is created if the slice must grow in Log or With.
+ // Using the extra capacity without copying risks a data race that
+ // would violate the Logger interface contract.
+ n := len(l.keyvals) + len(keyvals)
+ if len(keyvals)%2 != 0 {
+ n++
+ }
+ kvs := make([]interface{}, 0, n)
+ kvs = append(kvs, keyvals...)
+ if len(kvs)%2 != 0 {
+ kvs = append(kvs, ErrMissingValue)
+ }
+ kvs = append(kvs, l.keyvals...)
+ return &context{
+ logger: l.logger,
+ keyvals: kvs,
+ hasValuer: l.hasValuer || containsValuer(keyvals),
+ }
+}
+
+// context is the Logger implementation returned by With and WithPrefix. It
+// wraps a Logger and holds keyvals that it includes in all log events. Its
+// Log method calls bindValues to generate values for each Valuer in the
+// context keyvals.
+//
+// A context must always have the same number of stack frames between calls to
+// its Log method and the eventual binding of Valuers to their value. This
+// requirement comes from the functional requirement to allow a context to
+// resolve application call site information for a Caller stored in the
+// context. To do this we must be able to predict the number of logging
+// functions on the stack when bindValues is called.
+//
+// Two implementation details provide the needed stack depth consistency.
+//
+// 1. newContext avoids introducing an additional layer when asked to
+// wrap another context.
+// 2. With and WithPrefix avoid introducing an additional layer by
+// returning a newly constructed context with a merged keyvals rather
+// than simply wrapping the existing context.
+type context struct {
+ logger Logger
+ keyvals []interface{}
+ hasValuer bool
+}
+
+func newContext(logger Logger) *context {
+ if c, ok := logger.(*context); ok {
+ return c
+ }
+ return &context{logger: logger}
+}
+
+// Log replaces all value elements (odd indexes) containing a Valuer in the
+// stored context with their generated value, appends keyvals, and passes the
+// result to the wrapped Logger.
+func (l *context) Log(keyvals ...interface{}) error {
+ kvs := append(l.keyvals, keyvals...)
+ if len(kvs)%2 != 0 {
+ kvs = append(kvs, ErrMissingValue)
+ }
+ if l.hasValuer {
+ // If no keyvals were appended above then we must copy l.keyvals so
+ // that future log events will reevaluate the stored Valuers.
+ if len(keyvals) == 0 {
+ kvs = append([]interface{}{}, l.keyvals...)
+ }
+ bindValues(kvs[:len(l.keyvals)])
+ }
+ return l.logger.Log(kvs...)
+}
+
+// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
+// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
+// object that calls f.
+type LoggerFunc func(...interface{}) error
+
+// Log implements Logger by calling f(keyvals...).
+func (f LoggerFunc) Log(keyvals ...interface{}) error {
+ return f(keyvals...)
+}
diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go
new file mode 100644
index 000000000..a00305298
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/log/logfmt_logger.go
@@ -0,0 +1,62 @@
+package log
+
+import (
+ "bytes"
+ "io"
+ "sync"
+
+ "github.com/go-logfmt/logfmt"
+)
+
+type logfmtEncoder struct {
+ *logfmt.Encoder
+ buf bytes.Buffer
+}
+
+func (l *logfmtEncoder) Reset() {
+ l.Encoder.Reset()
+ l.buf.Reset()
+}
+
+var logfmtEncoderPool = sync.Pool{
+ New: func() interface{} {
+ var enc logfmtEncoder
+ enc.Encoder = logfmt.NewEncoder(&enc.buf)
+ return &enc
+ },
+}
+
+type logfmtLogger struct {
+ w io.Writer
+}
+
+// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
+// logfmt format. Each log event produces no more than one call to w.Write.
+// The passed Writer must be safe for concurrent use by multiple goroutines if
+// the returned Logger will be used concurrently.
+func NewLogfmtLogger(w io.Writer) Logger {
+ return &logfmtLogger{w}
+}
+
+func (l logfmtLogger) Log(keyvals ...interface{}) error {
+ enc := logfmtEncoderPool.Get().(*logfmtEncoder)
+ enc.Reset()
+ defer logfmtEncoderPool.Put(enc)
+
+ if err := enc.EncodeKeyvals(keyvals...); err != nil {
+ return err
+ }
+
+ // Add newline to the end of the buffer
+ if err := enc.EndRecord(); err != nil {
+ return err
+ }
+
+ // The Logger interface requires implementations to be safe for concurrent
+ // use by multiple goroutines. For this implementation that means making
+ // only one call to l.w.Write() for each call to Log.
+ if _, err := l.w.Write(enc.buf.Bytes()); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go
new file mode 100644
index 000000000..1047d626c
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/log/nop_logger.go
@@ -0,0 +1,8 @@
+package log
+
+type nopLogger struct{}
+
+// NewNopLogger returns a logger that doesn't do anything.
+func NewNopLogger() Logger { return nopLogger{} }
+
+func (nopLogger) Log(...interface{}) error { return nil }
diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go
new file mode 100644
index 000000000..ff96b5dee
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/log/stdlib.go
@@ -0,0 +1,116 @@
+package log
+
+import (
+ "io"
+ "log"
+ "regexp"
+ "strings"
+)
+
+// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
+// designed to be passed to a Go kit logger as the writer, for cases where
+// it's necessary to redirect all Go kit log output to the stdlib logger.
+//
+// If you have any choice in the matter, you shouldn't use this. Prefer to
+// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
+type StdlibWriter struct{}
+
+// Write implements io.Writer.
+func (w StdlibWriter) Write(p []byte) (int, error) {
+ log.Print(strings.TrimSpace(string(p)))
+ return len(p), nil
+}
+
+// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
+// logger's SetOutput. It will extract date/timestamps, filenames, and
+// messages, and place them under relevant keys.
+type StdlibAdapter struct {
+ Logger
+ timestampKey string
+ fileKey string
+ messageKey string
+}
+
+// StdlibAdapterOption sets a parameter for the StdlibAdapter.
+type StdlibAdapterOption func(*StdlibAdapter)
+
+// TimestampKey sets the key for the timestamp field. By default, it's "ts".
+func TimestampKey(key string) StdlibAdapterOption {
+ return func(a *StdlibAdapter) { a.timestampKey = key }
+}
+
+// FileKey sets the key for the file and line field. By default, it's "caller".
+func FileKey(key string) StdlibAdapterOption {
+ return func(a *StdlibAdapter) { a.fileKey = key }
+}
+
+// MessageKey sets the key for the actual log message. By default, it's "msg".
+func MessageKey(key string) StdlibAdapterOption {
+ return func(a *StdlibAdapter) { a.messageKey = key }
+}
+
+// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
+// logger. It's designed to be passed to log.SetOutput.
+func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
+ a := StdlibAdapter{
+ Logger: logger,
+ timestampKey: "ts",
+ fileKey: "caller",
+ messageKey: "msg",
+ }
+ for _, option := range options {
+ option(&a)
+ }
+ return a
+}
+
+func (a StdlibAdapter) Write(p []byte) (int, error) {
+ result := subexps(p)
+ keyvals := []interface{}{}
+ var timestamp string
+ if date, ok := result["date"]; ok && date != "" {
+ timestamp = date
+ }
+ if time, ok := result["time"]; ok && time != "" {
+ if timestamp != "" {
+ timestamp += " "
+ }
+ timestamp += time
+ }
+ if timestamp != "" {
+ keyvals = append(keyvals, a.timestampKey, timestamp)
+ }
+ if file, ok := result["file"]; ok && file != "" {
+ keyvals = append(keyvals, a.fileKey, file)
+ }
+ if msg, ok := result["msg"]; ok {
+ keyvals = append(keyvals, a.messageKey, msg)
+ }
+ if err := a.Logger.Log(keyvals...); err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+const (
+ logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?`
+ logRegexpTime = `(?P