Skip to content
This repository has been archived by the owner on Jun 16, 2022. It is now read-only.

Drop retry logic from autoscaler #11

Merged
merged 1 commit into from
Dec 23, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 0 additions & 59 deletions pkg/gatewayserver/rewinder.go

This file was deleted.

75 changes: 0 additions & 75 deletions pkg/gatewayserver/roundtripper.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,8 @@ import (
"crypto/tls"
"net"
"net/http"
"strconv"

"github.com/sirupsen/logrus"
"golang.org/x/net/http2"
"k8s.io/apimachinery/pkg/util/wait"
)

const (
requestCountHTTPHeader = "Request-Retry-Count"
)

type roundTripperFunc func(*http.Request) (*http.Response, error)
Expand Down Expand Up @@ -55,71 +48,3 @@ var http2Transport http.RoundTripper = &http2.Transport{

// AutoTransport uses h2c for HTTP2 requests and falls back to `http.DefaultTransport` for all others
var autoTransport = newHTTPTransport(http.DefaultTransport, http2Transport)

type retryCond func(*http.Response) bool

// RetryStatus will filter responses matching `status`
func retryStatus(status int) retryCond {
return func(resp *http.Response) bool {
return resp.StatusCode == status
}
}

type retryRoundTripper struct {
transport http.RoundTripper
backoffSettings wait.Backoff
retryConditions []retryCond
}

// RetryRoundTripper retries a request on error or retry condition, using the given `retry` strategy
func newRetryRoundTripper(rt http.RoundTripper, b wait.Backoff, conditions ...retryCond) http.RoundTripper {
return &retryRoundTripper{
transport: rt,
backoffSettings: b,
retryConditions: conditions,
}
}

func (rrt *retryRoundTripper) RoundTrip(r *http.Request) (resp *http.Response, err error) {
// The request body cannot be read multiple times for retries.
// The workaround is to clone the request body into a byte reader
// so the body can be read multiple times.
if r.Body != nil {
logrus.Debugf("Wrapping body in a rewinder.")
r.Body = newRewinder(r.Body)
}

attempts := 0
wait.ExponentialBackoff(rrt.backoffSettings, func() (bool, error) {
attempts++
r.Header.Add(requestCountHTTPHeader, strconv.Itoa(attempts))
resp, err = rrt.transport.RoundTrip(r)

if err != nil {
logrus.Errorf("Error making a request: %s", err)
return false, nil
}

for _, retryCond := range rrt.retryConditions {
if retryCond(resp) {
resp.Body.Close()
return false, nil
}
}
return true, nil
})

if err == nil {
logrus.Infof("Finished after %d attempt(s). Response code: %d", attempts, resp.StatusCode)

if resp.Header == nil {
resp.Header = make(http.Header)
}

resp.Header.Add(requestCountHTTPHeader, strconv.Itoa(attempts))
} else {
logrus.Errorf("Failed after %d attempts. Last error: %v", attempts, err)
}

return
}
21 changes: 4 additions & 17 deletions pkg/gatewayserver/serve.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,11 @@ import (
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/proxy"
"k8s.io/apimachinery/pkg/util/wait"
)

const (
maxRetries = 18 // the sum of all retries would add up to 1 minute
minRetryInterval = 100 * time.Millisecond
exponentialBackoffBase = 1.3
RioNameHeader = "X-Rio-ServiceName"
RioNamespaceHeader = "X-Rio-Namespace"
RioNameHeader = "X-Rio-ServiceName"
RioNamespaceHeader = "X-Rio-Namespace"
)

func NewHandler(rContext *types.Context, lock *sync.RWMutex, autoscalers map[string]*servicescale.SimpleScale) Handler {
Expand Down Expand Up @@ -81,8 +77,7 @@ func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
app, version := services.AppAndVersion(svc)
serveFQDN(name2.SafeConcatName(app, version), namespace, checkPort, w, r)

logrus.Infof("activating service %s/%s takes %v seconds", svc.Name, svc.Namespace, time.Now().Sub(start).Seconds())
return
logrus.Infof("activating service %s/%s takes %v seconds", svc.Name, svc.Namespace, time.Since(start).Seconds())
}

func serveFQDN(name, namespace, port string, w http.ResponseWriter, r *http.Request) {
Expand All @@ -95,15 +90,7 @@ func serveFQDN(name, namespace, port string, w http.ResponseWriter, r *http.Requ
r.URL.Host = targetURL.Host
r.Host = targetURL.Host

shouldRetry := []retryCond{retryStatus(http.StatusServiceUnavailable), retryStatus(http.StatusBadGateway)}
backoffSettings := wait.Backoff{
Duration: minRetryInterval,
Factor: exponentialBackoffBase,
Steps: maxRetries,
}

rt := newRetryRoundTripper(autoTransport, backoffSettings, shouldRetry...)
httpProxy := proxy.NewUpgradeAwareHandler(targetURL, rt, true, false, er)
httpProxy := proxy.NewUpgradeAwareHandler(targetURL, autoTransport, true, false, er)
httpProxy.ServeHTTP(w, r)
}

Expand Down