Skip to content
This repository was archived by the owner on Apr 17, 2019. It is now read-only.

Commit fb7c517

Browse files
committed
Merge pull request #1002 from aledbf/nginx-check-upstreams
[nginx-ingress-controller] Allow custom health checks in upstreams
2 parents c1b1d84 + 97914b4 commit fb7c517

File tree

14 files changed

+340
-41
lines changed

14 files changed

+340
-41
lines changed

ingress/controllers/nginx/Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
FROM gcr.io/google_containers/nginx-slim:0.6
15+
FROM gcr.io/google_containers/nginx-slim:0.7
1616

1717
RUN apt-get update && apt-get install -y \
1818
diffutils \

ingress/controllers/nginx/README.md

+17-1
Original file line numberDiff line numberDiff line change
@@ -196,6 +196,23 @@ Use the [custom-template](examples/custom-template/README.md) example as a guide
196196
**Please note the template is tied to the go code. Be sure to no change names in the variable `$cfg`**
197197

198198

199+
### Custom NGINX upstream checks
200+
201+
NGINX exposes some flags in the [upstream configuration](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) that enabled configuration of each server in the upstream. The ingress controller allows custom `max_fails` and `fail_timeout` parameters in a global context using `upstream-max-fails` or `upstream-fail-timeout` in the NGINX Configmap or in a particular Ingress rule. By default this values are 0. This means NGINX will respect the `livenessProbe`, if is defined. If there is no probe, NGINX will not mark a server inside an upstream down.
202+
203+
To use custom values in an Ingress rule define this annotations:
204+
205+
`ingress-nginx.kubernetes.io/upstream-max-fails`: number of unsuccessful attempts to communicate with the server that should happen in the duration set by the fail_timeout parameter to consider the server unavailable
206+
207+
`ingress-nginx.kubernetes.io/upstream-fail-timeout`: time in seconds during which the specified number of unsuccessful attempts to communicate with the server should happen to consider the server unavailable. Also the period of time the server will be considered unavailable.
208+
209+
**Important:**
210+
The upstreams are shared. i.e. Ingress rule using the same service will use the same upstream.
211+
This means only one of the rules should define annotations to configure the upstream servers
212+
213+
214+
Please check the [auth](examples/custom-upstream-check/README.md) example
215+
199216

200217
### NGINX status page
201218

@@ -209,7 +226,6 @@ Please check the example `example/rc-default.yaml`
209226
To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json`
210227

211228

212-
213229
### Custom errors
214230

215231
In case of an error in a request the body of the response is obtained from the `default backend`. Each request to the default backend includes two headers:

ingress/controllers/nginx/controller.go

+21-12
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ import (
4040
"k8s.io/kubernetes/pkg/util/intstr"
4141
"k8s.io/kubernetes/pkg/watch"
4242

43+
"k8s.io/contrib/ingress/controllers/nginx/healthcheck"
4344
"k8s.io/contrib/ingress/controllers/nginx/nginx"
4445
)
4546

@@ -327,9 +328,6 @@ func (lbc *loadBalancerController) sync(key string) {
327328
return
328329
}
329330

330-
ings := lbc.ingLister.Store.List()
331-
upstreams, servers := lbc.getUpstreamServers(ings)
332-
333331
var cfg *api.ConfigMap
334332

335333
ns, name, _ := parseNsName(lbc.nxgConfigMap)
@@ -339,6 +337,10 @@ func (lbc *loadBalancerController) sync(key string) {
339337
}
340338

341339
ngxConfig := lbc.nginx.ReadConfig(cfg)
340+
341+
ings := lbc.ingLister.Store.List()
342+
upstreams, servers := lbc.getUpstreamServers(ngxConfig, ings)
343+
342344
lbc.nginx.CheckAndReload(ngxConfig, nginx.IngressConfig{
343345
Upstreams: upstreams,
344346
Servers: servers,
@@ -489,15 +491,15 @@ func (lbc *loadBalancerController) getStreamServices(data map[string]string, pro
489491
if err != nil {
490492
for _, sp := range svc.Spec.Ports {
491493
if sp.Name == svcPort {
492-
endps = lbc.getEndpoints(svc, sp.TargetPort, proto)
494+
endps = lbc.getEndpoints(svc, sp.TargetPort, proto, &healthcheck.Upstream{})
493495
break
494496
}
495497
}
496498
} else {
497499
// we need to use the TargetPort (where the endpoints are running)
498500
for _, sp := range svc.Spec.Ports {
499501
if sp.Port == int32(targetPort) {
500-
endps = lbc.getEndpoints(svc, sp.TargetPort, proto)
502+
endps = lbc.getEndpoints(svc, sp.TargetPort, proto, &healthcheck.Upstream{})
501503
break
502504
}
503505
}
@@ -542,7 +544,7 @@ func (lbc *loadBalancerController) getDefaultUpstream() *nginx.Upstream {
542544

543545
svc := svcObj.(*api.Service)
544546

545-
endps := lbc.getEndpoints(svc, svc.Spec.Ports[0].TargetPort, api.ProtocolTCP)
547+
endps := lbc.getEndpoints(svc, svc.Spec.Ports[0].TargetPort, api.ProtocolTCP, &healthcheck.Upstream{})
546548
if len(endps) == 0 {
547549
glog.Warningf("service %v does no have any active endpoints", svcKey)
548550
upstream.Backends = append(upstream.Backends, nginx.NewDefaultServer())
@@ -553,8 +555,8 @@ func (lbc *loadBalancerController) getDefaultUpstream() *nginx.Upstream {
553555
return upstream
554556
}
555557

556-
func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*nginx.Upstream, []*nginx.Server) {
557-
upstreams := lbc.createUpstreams(data)
558+
func (lbc *loadBalancerController) getUpstreamServers(ngxCfg nginx.NginxConfiguration, data []interface{}) ([]*nginx.Upstream, []*nginx.Server) {
559+
upstreams := lbc.createUpstreams(ngxCfg, data)
558560
upstreams[defUpstreamName] = lbc.getDefaultUpstream()
559561

560562
servers := lbc.createServers(data)
@@ -655,12 +657,14 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng
655657

656658
// createUpstreams creates the NGINX upstreams for each service referenced in
657659
// Ingress rules. The servers inside the upstream are endpoints.
658-
func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[string]*nginx.Upstream {
660+
func (lbc *loadBalancerController) createUpstreams(ngxCfg nginx.NginxConfiguration, data []interface{}) map[string]*nginx.Upstream {
659661
upstreams := make(map[string]*nginx.Upstream)
660662

661663
for _, ingIf := range data {
662664
ing := ingIf.(*extensions.Ingress)
663665

666+
hz := healthcheck.ParseAnnotations(ngxCfg, ing)
667+
664668
for _, rule := range ing.Spec.Rules {
665669
if rule.IngressRuleValue.HTTP == nil {
666670
continue
@@ -693,7 +697,7 @@ func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[strin
693697
for _, servicePort := range svc.Spec.Ports {
694698
// targetPort could be a string, use the name or the port (int)
695699
if strconv.Itoa(int(servicePort.Port)) == bp || servicePort.TargetPort.String() == bp || servicePort.Name == bp {
696-
endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP)
700+
endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP, hz)
697701
if len(endps) == 0 {
698702
glog.Warningf("service %v does no have any active endpoints", svcKey)
699703
}
@@ -801,7 +805,7 @@ func (lbc *loadBalancerController) getPemsFromIngress(data []interface{}) map[st
801805
}
802806

803807
// getEndpoints returns a list of <endpoint ip>:<port> for a given service/target port combination.
804-
func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort intstr.IntOrString, proto api.Protocol) []nginx.UpstreamServer {
808+
func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort intstr.IntOrString, proto api.Protocol, hz *healthcheck.Upstream) []nginx.UpstreamServer {
805809
glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, servicePort.String())
806810
ep, err := lbc.endpLister.GetServiceEndpoints(s)
807811
if err != nil {
@@ -859,7 +863,12 @@ func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort ints
859863
}
860864

861865
for _, epAddress := range ss.Addresses {
862-
ups := nginx.UpstreamServer{Address: epAddress.IP, Port: fmt.Sprintf("%v", targetPort)}
866+
ups := nginx.UpstreamServer{
867+
Address: epAddress.IP,
868+
Port: fmt.Sprintf("%v", targetPort),
869+
MaxFails: hz.MaxFails,
870+
FailTimeout: hz.FailTimeout,
871+
}
863872
upsServers = append(upsServers, ups)
864873
}
865874
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
2+
This example shows how is possible to create a custom configuration for a particular upstream associated with an Ingress rule.
3+
4+
echo "
5+
apiVersion: extensions/v1beta1
6+
kind: Ingress
7+
metadata:
8+
name: echoheaders
9+
annotations:
10+
ingress-nginx.kubernetes.io/upstream-fail-timeout: "30"
11+
spec:
12+
rules:
13+
- host: foo.bar.com
14+
http:
15+
paths:
16+
- path: /
17+
backend:
18+
serviceName: echoheaders
19+
servicePort: 80
20+
" | kubectl create -f -
21+
22+
23+
Check the annotation is present in the Ingress rule:
24+
```
25+
kubectl get ingress echoheaders -o yaml
26+
``
27+
28+
Check the NGINX configuration is updated using kubectl or the status page:
29+
30+
```
31+
$ kubectl exec nginx-ingress-controller-v1ppm cat /etc/nginx/nginx.conf
32+
```
33+
34+
```
35+
....
36+
upstream default-echoheaders-x-80 {
37+
least_conn;
38+
server 10.2.92.2:8080 max_fails=5 fail_timeout=30;
39+
40+
}
41+
....
42+
```
43+
44+
45+
![nginx-module-vts](contrib/ingress/controllers/nginx/examples/custom-upstream-check/custom-upstream.png "screenshot with custom configuration")
Loading
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
/*
2+
Copyright 2016 The Kubernetes Authors All rights reserved.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package healthcheck
18+
19+
import (
20+
"errors"
21+
"strconv"
22+
23+
"k8s.io/kubernetes/pkg/apis/extensions"
24+
25+
"k8s.io/contrib/ingress/controllers/nginx/nginx"
26+
)
27+
28+
const (
29+
upsMaxFails = "ingress-nginx.kubernetes.io/upstream-max-fails"
30+
upsFailTimeout = "ingress-nginx.kubernetes.io/upstream-fail-timeout"
31+
)
32+
33+
var (
34+
// ErrMissingMaxFails returned error when the ingress does not contains the
35+
// max-fails annotation
36+
ErrMissingMaxFails = errors.New("max-fails annotations is missing")
37+
38+
// ErrMissingFailTimeout returned error when the ingress does not contains
39+
// the fail-timeout annotation
40+
ErrMissingFailTimeout = errors.New("fail-timeout annotations is missing")
41+
42+
// ErrInvalidNumber returned
43+
ErrInvalidNumber = errors.New("the annotation does not contains a number")
44+
)
45+
46+
// Upstream returns the URL and method to use check the status of
47+
// the upstream server/s
48+
type Upstream struct {
49+
MaxFails int
50+
FailTimeout int
51+
}
52+
53+
type ingAnnotations map[string]string
54+
55+
func (a ingAnnotations) maxFails() (int, error) {
56+
val, ok := a[upsMaxFails]
57+
if !ok {
58+
return 0, ErrMissingMaxFails
59+
}
60+
61+
mf, err := strconv.Atoi(val)
62+
if err != nil {
63+
return 0, ErrInvalidNumber
64+
}
65+
66+
return mf, nil
67+
}
68+
69+
func (a ingAnnotations) failTimeout() (int, error) {
70+
val, ok := a[upsFailTimeout]
71+
if !ok {
72+
return 0, ErrMissingFailTimeout
73+
}
74+
75+
ft, err := strconv.Atoi(val)
76+
if err != nil {
77+
return 0, ErrInvalidNumber
78+
}
79+
80+
return ft, nil
81+
}
82+
83+
// ParseAnnotations parses the annotations contained in the ingress
84+
// rule used to configure upstream check parameters
85+
func ParseAnnotations(cfg nginx.NginxConfiguration, ing *extensions.Ingress) *Upstream {
86+
if ing.GetAnnotations() == nil {
87+
return &Upstream{cfg.UpstreamMaxFails, cfg.UpstreamFailTimeout}
88+
}
89+
90+
mf, err := ingAnnotations(ing.GetAnnotations()).maxFails()
91+
if err != nil {
92+
mf = cfg.UpstreamMaxFails
93+
}
94+
95+
ft, err := ingAnnotations(ing.GetAnnotations()).failTimeout()
96+
if err != nil {
97+
ft = cfg.UpstreamFailTimeout
98+
}
99+
100+
return &Upstream{mf, ft}
101+
}

0 commit comments

Comments
 (0)