diff --git a/.circleci/config.yml b/.circleci/config.yml index b1bcce48c7..c856be9113 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -101,6 +101,8 @@ commands: ${ENABLE_ENTERPRISE:+-enable-enterprise} \ -enable-multi-cluster \ -debug-directory="$TEST_RESULTS/debug" \ + -run TestPeering_Connect \ + -run TestPeering_ConnectNamespaces \ -consul-k8s-image=<< parameters.consul-k8s-image >> then echo "Tests in ${pkg} failed, aborting early" @@ -132,6 +134,8 @@ commands: -enable-multi-cluster \ ${ENABLE_ENTERPRISE:+-enable-enterprise} \ -debug-directory="$TEST_RESULTS/debug" \ + -run TestPeering_Connect \ + -run TestPeering_ConnectNamespaces \ -consul-k8s-image=<< parameters.consul-k8s-image >> jobs: @@ -706,7 +710,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -disable-peering -enable-transparent-proxy + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy - store_test_results: path: /tmp/test-results @@ -1004,6 +1008,15 @@ workflows: context: consul-ci requires: - dev-upload-docker + - acceptance-gke-1-20: + requires: + - dev-upload-docker + - acceptance-eks-1-19: + requires: + - dev-upload-docker + - acceptance-aks-1-21: + requires: + - dev-upload-docker nightly-acceptance-tests: triggers: - schedule: diff --git a/acceptance/framework/k8s/deploy.go b/acceptance/framework/k8s/deploy.go index 09272d5382..2a258dcd96 100644 --- a/acceptance/framework/k8s/deploy.go +++ b/acceptance/framework/k8s/deploy.go @@ -96,7 +96,7 @@ func CheckStaticServerConnectionMultipleFailureMessages(t *testing.T, options *k expectedOutput = expectedSuccessOutput } - retrier := &retry.Timer{Timeout: 80 * time.Second, Wait: 2 * time.Second} + retrier := &retry.Timer{Timeout: 160 * time.Second, Wait: 2 * time.Second} args := []string{"exec", "deploy/" + sourceApp, "-c", sourceApp, "--", "curl", "-vvvsSf"} args = append(args, curlArgs...) diff --git a/acceptance/tests/partitions/partitions_connect_test.go b/acceptance/tests/partitions/partitions_connect_test.go index d57e1af44a..e1a43850d6 100644 --- a/acceptance/tests/partitions/partitions_connect_test.go +++ b/acceptance/tests/partitions/partitions_connect_test.go @@ -123,6 +123,7 @@ func TestPartitions_Connect(t *testing.T) { serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" serverHelmValues["meshGateway.service.type"] = "NodePort" serverHelmValues["meshGateway.service.nodePort"] = "30100" + serverHelmValues["server.exposeService.type"] = "NodePort" } releaseName := helpers.RandomName() diff --git a/acceptance/tests/peering/peering_connect_namespaces_test.go b/acceptance/tests/peering/peering_connect_namespaces_test.go index 57421a3f53..7205e5c158 100644 --- a/acceptance/tests/peering/peering_connect_namespaces_test.go +++ b/acceptance/tests/peering/peering_connect_namespaces_test.go @@ -5,6 +5,7 @@ import ( "fmt" "strconv" "testing" + "time" terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" @@ -13,6 +14,7 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-version" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -76,7 +78,7 @@ func TestPeering_ConnectNamespaces(t *testing.T) { "global.peering.enabled": "true", "global.enableConsulNamespaces": "true", - "global.image": "thisisnotashwin/consul@sha256:446aad6e02f66e3027756dfc0d34e8e6e2b11ac6ec5637b134b34644ca7cda64", + "global.image": "ndhanushkodi/consul-dev@sha256:61b02ac369cc13db6b9af8808b7e3a811bcdc9a09c95ddac0da931f81743091c", "global.tls.enabled": "false", "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), @@ -95,8 +97,10 @@ func TestPeering_ConnectNamespaces(t *testing.T) { "controller.enabled": "true", - "dns.enabled": "true", - "dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), + "dns.enabled": "true", + "dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), + "server.replicas": "3", + "server.bootstrapExpect": "3", } staticServerPeerHelmValues := map[string]string{ @@ -110,14 +114,18 @@ func TestPeering_ConnectNamespaces(t *testing.T) { staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticServerPeerHelmValues["server.exposeService.type"] = "NodePort" + staticServerPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" + staticServerPeerHelmValues["server.replicas"] = "1" + staticServerPeerHelmValues["server.bootstrapExpect"] = "1" } releaseName := helpers.RandomName() - helpers.MergeMaps(staticServerPeerHelmValues, commonHelmValues) + helpers.MergeMaps(commonHelmValues, staticServerPeerHelmValues) // Install the first peer where static-server will be deployed in the static-server kubernetes context. - staticServerPeerCluster := consul.NewHelmCluster(t, staticServerPeerHelmValues, staticServerPeerClusterContext, cfg, releaseName) + staticServerPeerCluster := consul.NewHelmCluster(t, commonHelmValues, staticServerPeerClusterContext, cfg, releaseName) staticServerPeerCluster.Create(t) staticClientPeerHelmValues := map[string]string{ @@ -128,12 +136,16 @@ func TestPeering_ConnectNamespaces(t *testing.T) { staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticClientPeerHelmValues["server.exposeService.type"] = "NodePort" + staticClientPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" + staticServerPeerHelmValues["server.replicas"] = "1" + staticServerPeerHelmValues["server.bootstrapExpect"] = "1" } - helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) + helpers.MergeMaps(commonHelmValues, staticClientPeerHelmValues) // Install the second peer where static-client will be deployed in the static-client kubernetes context. - staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) + staticClientPeerCluster := consul.NewHelmCluster(t, commonHelmValues, staticClientPeerClusterContext, cfg, releaseName) staticClientPeerCluster.Create(t) // Create the peering acceptor on the client peer. @@ -142,6 +154,14 @@ func TestPeering_ConnectNamespaces(t *testing.T) { k8s.KubectlDelete(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") }) + // Ensure the secret is created. + timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} + retry.RunWith(timer, t, func(r *retry.R) { + acceptorSecretResourceVersion, err := k8s.RunKubectlAndGetOutputE(t, staticClientPeerClusterContext.KubectlOptions(t), "get", "peeringacceptor", "server", "-o", "jsonpath={.status.secret.resourceVersion}") + require.NoError(r, err) + require.NotEmpty(r, acceptorSecretResourceVersion) + }) + // Copy secret from client peer to server peer. k8s.CopySecret(t, staticClientPeerClusterContext, staticServerPeerClusterContext, "api-token") diff --git a/acceptance/tests/peering/peering_connect_test.go b/acceptance/tests/peering/peering_connect_test.go index b1b246aac5..3b5a2dcbcd 100644 --- a/acceptance/tests/peering/peering_connect_test.go +++ b/acceptance/tests/peering/peering_connect_test.go @@ -5,6 +5,7 @@ import ( "fmt" "strconv" "testing" + "time" terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" @@ -13,6 +14,7 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-version" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -49,7 +51,7 @@ func TestPeering_Connect(t *testing.T) { commonHelmValues := map[string]string{ "global.peering.enabled": "true", - "global.image": "thisisnotashwin/consul@sha256:446aad6e02f66e3027756dfc0d34e8e6e2b11ac6ec5637b134b34644ca7cda64", + "global.image": "ndhanushkodi/consul-dev@sha256:61b02ac369cc13db6b9af8808b7e3a811bcdc9a09c95ddac0da931f81743091c", "global.tls.enabled": "false", "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), @@ -64,8 +66,10 @@ func TestPeering_Connect(t *testing.T) { "controller.enabled": "true", - "dns.enabled": "true", - "dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), + "dns.enabled": "true", + "dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), + "server.replicas": "3", + "server.bootstrapExpect": "3", } staticServerPeerHelmValues := map[string]string{ @@ -79,14 +83,18 @@ func TestPeering_Connect(t *testing.T) { staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticServerPeerHelmValues["server.exposeService.type"] = "NodePort" + staticServerPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" + staticServerPeerHelmValues["server.replicas"] = "1" + staticServerPeerHelmValues["server.bootstrapExpect"] = "1" } releaseName := helpers.RandomName() - helpers.MergeMaps(staticServerPeerHelmValues, commonHelmValues) + helpers.MergeMaps(commonHelmValues, staticServerPeerHelmValues) // Install the first peer where static-server will be deployed in the static-server kubernetes context. - staticServerPeerCluster := consul.NewHelmCluster(t, staticServerPeerHelmValues, staticServerPeerClusterContext, cfg, releaseName) + staticServerPeerCluster := consul.NewHelmCluster(t, commonHelmValues, staticServerPeerClusterContext, cfg, releaseName) staticServerPeerCluster.Create(t) staticClientPeerHelmValues := map[string]string{ @@ -97,12 +105,16 @@ func TestPeering_Connect(t *testing.T) { staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticClientPeerHelmValues["server.exposeService.type"] = "NodePort" + staticClientPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" + staticClientPeerHelmValues["server.replicas"] = "1" + staticClientPeerHelmValues["server.bootstrapExpect"] = "1" } - helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) + helpers.MergeMaps(commonHelmValues, staticClientPeerHelmValues) // Install the second peer where static-client will be deployed in the static-client kubernetes context. - staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) + staticClientPeerCluster := consul.NewHelmCluster(t, commonHelmValues, staticClientPeerClusterContext, cfg, releaseName) staticClientPeerCluster.Create(t) // Create the peering acceptor on the client peer. @@ -110,9 +122,14 @@ func TestPeering_Connect(t *testing.T) { helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { k8s.KubectlDelete(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") }) - acceptorSecretResourceVersion, err := k8s.RunKubectlAndGetOutputE(t, staticClientPeerClusterContext.KubectlOptions(t), "get", "peeringacceptor", "server", "-o", "jsonpath={.status.secret.resourceVersion}") - require.NoError(t, err) - require.NotEmpty(t, acceptorSecretResourceVersion) + + // Ensure the secret is created. + timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} + retry.RunWith(timer, t, func(r *retry.R) { + acceptorSecretResourceVersion, err := k8s.RunKubectlAndGetOutputE(t, staticClientPeerClusterContext.KubectlOptions(t), "get", "peeringacceptor", "server", "-o", "jsonpath={.status.secret.resourceVersion}") + require.NoError(r, err) + require.NotEmpty(r, acceptorSecretResourceVersion) + }) // Copy secret from client peer to server peer. k8s.CopySecret(t, staticClientPeerClusterContext, staticServerPeerClusterContext, "api-token") diff --git a/acceptance/tests/vault/vault_partitions_test.go b/acceptance/tests/vault/vault_partitions_test.go index 19798a374d..31eac7bc1a 100644 --- a/acceptance/tests/vault/vault_partitions_test.go +++ b/acceptance/tests/vault/vault_partitions_test.go @@ -338,6 +338,7 @@ func TestVault_Partitions(t *testing.T) { serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" serverHelmValues["meshGateway.service.type"] = "NodePort" serverHelmValues["meshGateway.service.nodePort"] = "30100" + serverHelmValues["server.exposeService.type"] = "NodePort" } helpers.MergeMaps(serverHelmValues, commonHelmValues) diff --git a/charts/consul/templates/connect-inject-clusterrole.yaml b/charts/consul/templates/connect-inject-clusterrole.yaml index 88194f4a61..846e70132a 100644 --- a/charts/consul/templates/connect-inject-clusterrole.yaml +++ b/charts/consul/templates/connect-inject-clusterrole.yaml @@ -19,7 +19,7 @@ rules: - get {{- end }} - apiGroups: [ "" ] - resources: [ "pods", "endpoints", "services", "namespaces" ] + resources: [ "pods", "endpoints", "services", "namespaces", "nodes" ] verbs: - "get" - "list" diff --git a/charts/consul/templates/connect-inject-deployment.yaml b/charts/consul/templates/connect-inject-deployment.yaml index 134ecbd344..0e0e59bd86 100644 --- a/charts/consul/templates/connect-inject-deployment.yaml +++ b/charts/consul/templates/connect-inject-deployment.yaml @@ -11,6 +11,8 @@ {{- if .Values.global.lifecycleSidecarContainer }}{{ fail "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." }}{{ end }} {{ template "consul.validateVaultWebhookCertConfiguration" . }} {{- template "consul.reservedNamesFailer" (list .Values.connectInject.consulNamespaces.consulDestinationNamespace "connectInject.consulNamespaces.consulDestinationNamespace") }} +{{- $serverEnabled := (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) -}} +{{- $serverExposeServiceEnabled := (or (and (ne (.Values.server.exposeService.enabled | toString) "-") .Values.server.exposeService.enabled) (and (eq (.Values.server.exposeService.enabled | toString) "-") (or .Values.global.peering.enabled .Values.global.adminPartitions.enabled))) -}} # The deployment for running the Connect sidecar injector apiVersion: apps/v1 kind: Deployment @@ -129,6 +131,7 @@ spec: -consul-k8s-image="{{ default .Values.global.imageK8S .Values.connectInject.image }}" \ -release-name="{{ .Release.Name }}" \ -release-namespace="{{ .Release.Namespace }}" \ + -resource-prefix={{ template "consul.fullname" . }} \ -listen=:8080 \ {{- if .Values.connectInject.transparentProxy.defaultEnabled }} -default-enable-transparent-proxy=true \ @@ -137,6 +140,11 @@ spec: {{- end }} {{- if .Values.global.peering.enabled }} -enable-peering=true \ + {{- if (eq .Values.global.peering.tokenGeneration.serverAddresses.source "") }} + {{- if (and $serverEnabled $serverExposeServiceEnabled) }} + -poll-server-expose-service=true \ + {{- end }} + {{- end }} {{- end }} {{- if .Values.global.openshift.enabled }} -enable-openshift \ @@ -146,7 +154,6 @@ spec: {{- else }} -transparent-proxy-default-overwrite-probes=false \ {{- end }} - -resource-prefix={{ template "consul.fullname" . }} \ {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} -enable-consul-dns=true \ {{- end }} diff --git a/charts/consul/templates/expose-servers-service.yaml b/charts/consul/templates/expose-servers-service.yaml new file mode 100644 index 0000000000..22dde451e2 --- /dev/null +++ b/charts/consul/templates/expose-servers-service.yaml @@ -0,0 +1,63 @@ +{{- $serverEnabled := (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) -}} +{{- $serverExposeServiceEnabled := (or (and (ne (.Values.server.exposeService.enabled | toString) "-") .Values.server.exposeService.enabled) (and (eq (.Values.server.exposeService.enabled | toString) "-") (or .Values.global.peering.enabled .Values.global.adminPartitions.enabled))) -}} +{{- if (and $serverEnabled $serverExposeServiceEnabled) }} + +# Service with an external IP to reach Consul servers. +# Used for exposing gRPC port for peering and ports for client partitions to discover servers. +apiVersion: v1 +kind: Service +metadata: + name: {{ template "consul.fullname" . }}-expose-servers + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: server + annotations: + {{- if .Values.server.exposeService.annotations }} + {{ tpl .Values.server.exposeService.annotations . | nindent 4 | trim }} + {{- end }} +spec: + type: "{{ .Values.server.exposeService.type }}" + ports: + {{- if (or (not .Values.global.tls.enabled) (not .Values.global.tls.httpsOnly)) }} + - name: http + port: 8500 + targetPort: 8500 + {{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.https) }} + nodePort: {{ .Values.server.exposeService.nodePort.http }} + {{- end }} + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: https + port: 8501 + targetPort: 8501 + {{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.https) }} + nodePort: {{ .Values.server.exposeService.nodePort.https }} + {{- end }} + {{- end }} + - name: serflan + port: 8301 + targetPort: 8301 + {{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.serf) }} + nodePort: {{ .Values.server.exposeService.nodePort.serf }} + {{- end }} + - name: rpc + port: 8300 + targetPort: 8300 + {{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.rpc) }} + nodePort: {{ .Values.server.exposeService.nodePort.rpc }} + {{- end }} + - name: grpc + port: 8503 + targetPort: 8503 + {{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.grpc) }} + nodePort: {{ .Values.server.exposeService.nodePort.grpc }} + {{- end }} + selector: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: server +{{- end }} diff --git a/charts/consul/test/unit/connect-inject-deployment.bats b/charts/consul/test/unit/connect-inject-deployment.bats index 658fca992c..d3beb7e051 100755 --- a/charts/consul/test/unit/connect-inject-deployment.bats +++ b/charts/consul/test/unit/connect-inject-deployment.bats @@ -1831,6 +1831,57 @@ EOF [[ "$output" =~ "setting global.peering.enabled to true requires connectInject.enabled to be true" ]] } +@test "connectInject/Deployment: -poll-server-expose-service=true is set when global.peering.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-poll-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: -poll-server-expose-service=true is set when servers are enabled and peering is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-poll-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: -poll-server-expose-service is not set when servers are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'server.enabled=false' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-poll-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: -poll-server-expose-service is not set when peering is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-poll-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "false" ] +} #-------------------------------------------------------------------- # openshift diff --git a/charts/consul/test/unit/expose-servers-service.bats b/charts/consul/test/unit/expose-servers-service.bats new file mode 100644 index 0000000000..4d64aec2cd --- /dev/null +++ b/charts/consul/test/unit/expose-servers-service.bats @@ -0,0 +1,181 @@ +#!/usr/bin/env bats + +load _helpers + +@test "expose-servers/Service: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/expose-servers-service.yaml \ + . +} + +@test "expose-servers/Service: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "expose-servers/Service: enable with global.enabled true and global.peering.enabled true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'global.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "expose-servers/Service: enable with global.peering.enabled true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "expose-servers/Service: enable with global.adminPartitions.enabled true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "expose-servers/Service: disable when peering.enabled is false" { + cd `chart_dir` + assert_empty helm template \ + -s templates/expose-servers-service.yaml \ + --set 'server.enabled=true' \ + --set 'global.peering.enabled=false' \ + . +} + +@test "expose-servers/Service: disable with server.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/expose-servers-service.yaml \ + --set 'server.enabled=false' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . +} + +@test "expose-servers/Service: disable with global.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/expose-servers-service.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . +} + +#-------------------------------------------------------------------- +# annotations + +@test "expose-servers/Service: no annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "expose-servers/Service: can set annotations" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'server.exposeService.annotations=key: value' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.key' | tee /dev/stderr) + [ "${actual}" = "value" ] +} + +#-------------------------------------------------------------------- +# nodePort + +@test "expose-servers/Service: RPC node port can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'server.exposeService.type=NodePort' \ + --set 'server.exposeService.nodePort.rpc=4443' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "rpc") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4443" ] +} + +@test "expose-servers/Service: Serf node port can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'server.exposeService.type=NodePort' \ + --set 'server.exposeService.nodePort.serf=4444' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "serflan") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4444" ] +} + +@test "expose-servers/Service: Grpc node port can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'server.exposeService.type=NodePort' \ + --set 'server.exposeService.nodePort.grpc=4444' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "grpc") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4444" ] +} + +# TODO: HTTP/HTTPS/TLS + +@test "expose-servers/Service: RPC, Serf and grpc node ports can be set" { + cd `chart_dir` + local ports=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'server.exposeService.type=NodePort' \ + --set 'server.exposeService.nodePort.rpc=4443' \ + --set 'server.exposeService.nodePort.grpc=4444' \ + --set 'server.exposeService.nodePort.serf=4445' \ + . | tee /dev/stderr | + yq -r '.spec.ports[]' | tee /dev/stderr) + + local actual + actual=$(echo $ports | jq -r 'select(.name == "rpc") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4443" ] + + actual=$(echo $ports | jq -r 'select(.name == "grpc") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4444" ] + + actual=$(echo $ports | jq -r 'select(.name == "serflan") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4445" ] +} diff --git a/charts/consul/values.yaml b/charts/consul/values.yaml index 44e274c58d..209ea60a16 100644 --- a/charts/consul/values.yaml +++ b/charts/consul/values.yaml @@ -34,6 +34,11 @@ global: # If true, the Helm chart enables Cluster Peering for the cluster. This option enables peering controllers and # allows use of the PeeringAcceptor and PeeringDialer CRDs for establishing service mesh peerings. enabled: false + tokenGeneration: + serverAddresses: + # Source can be set to "" or "consul". + # "" is the default behavior. + source: "" # [Enterprise Only] Enabling `adminPartitions` allows creation of Admin Partitions in Kubernetes clusters. # It additionally indicates that you are running Consul Enterprise v1.11+ with a valid Consul Enterprise @@ -992,6 +997,43 @@ server: # @type: string annotations: null + # Configures a service to expose ports on the Consul servers over a Kubernetes Service. + exposeService: + # When enabled, deploys a Kubernetes Service to reach the Consul servers. + # @type: boolean + enabled: "-" + # Type of service, supports LoadBalancer or NodePort. + # @type: string + type: LoadBalancer + # If service is of type NodePort, configures the nodePorts. + nodePort: + # Configures the nodePort to expose the Consul server http port. + # @type: integer + http: null + # Configures the nodePort to expose the Consul server https port. + # @type: integer + https: null + # Configures the nodePort to expose the Consul server serf port. + # @type: integer + serf: null + # Configures the nodePort to expose the Consul server rpc port. + # @type: integer + rpc: null + # Configures the nodePort to expose the Consul server grpc port. + # @type: integer + grpc: null + # This value defines additional annotations for + # server pods. This should be formatted as a multi-line string. + # + # ```yaml + # annotations: | + # "sample/annotation1": "foo" + # "sample/annotation2": "bar" + # ``` + # + # @type: string + annotations: null + # Server service properties. service: # Annotations to apply to the server service. @@ -1034,6 +1076,9 @@ externalServers: # The HTTPS port of the Consul servers. httpsPort: 8501 + # The GRPC port of the Consul servers. + grpcPort: 8503 + # The server name to use as the SNI host header when connecting with HTTPS. # @type: string tlsServerName: null diff --git a/control-plane/connect-inject/peering_acceptor_controller.go b/control-plane/connect-inject/peering_acceptor_controller.go index da977d8e8a..d1ff388468 100644 --- a/control-plane/connect-inject/peering_acceptor_controller.go +++ b/control-plane/connect-inject/peering_acceptor_controller.go @@ -3,6 +3,7 @@ package connectinject import ( "context" "errors" + "fmt" "strconv" "time" @@ -28,9 +29,12 @@ import ( type PeeringAcceptorController struct { client.Client // ConsulClient points at the agent local to the connect-inject deployment pod. - ConsulClient *api.Client - Log logr.Logger - Scheme *runtime.Scheme + ConsulClient *api.Client + ExposeServersServiceName string + PollServerExternalService bool + ReleaseNamespace string + Log logr.Logger + Scheme *runtime.Scheme context.Context } @@ -98,6 +102,17 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ } } + // Scrape the address of the server service + var serverExternalAddresses []string + if r.PollServerExternalService { + addrs, err := r.getExposeServersServiceAddresses() + if err != nil { + r.updateStatusError(ctx, acceptor, KubernetesError, err) + return ctrl.Result{}, err + } + serverExternalAddresses = addrs + } + statusSecretSet := acceptor.SecretRef() != nil // existingStatusSecret will be nil if it doesn't exist, and have the contents of the secret if it does exist. @@ -136,7 +151,7 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ } // Generate and store the peering token. var resp *api.PeeringGenerateTokenResponse - if resp, err = r.generateToken(ctx, acceptor.Name); err != nil { + if resp, err = r.generateToken(ctx, acceptor.Name, serverExternalAddresses); err != nil { r.updateStatusError(ctx, acceptor, ConsulAgentError, err) return ctrl.Result{}, err } @@ -175,7 +190,7 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ if shouldGenerate { // Generate and store the peering token. var resp *api.PeeringGenerateTokenResponse - if resp, err = r.generateToken(ctx, acceptor.Name); err != nil { + if resp, err = r.generateToken(ctx, acceptor.Name, serverExternalAddresses); err != nil { return ctrl.Result{}, err } if acceptor.Secret().Backend == "kubernetes" { @@ -342,10 +357,13 @@ func (r *PeeringAcceptorController) SetupWithManager(mgr ctrl.Manager) error { } // generateToken is a helper function that calls the Consul api to generate a token for the peer. -func (r *PeeringAcceptorController) generateToken(ctx context.Context, peerName string) (*api.PeeringGenerateTokenResponse, error) { +func (r *PeeringAcceptorController) generateToken(ctx context.Context, peerName string, serverExternalAddresses []string) (*api.PeeringGenerateTokenResponse, error) { req := api.PeeringGenerateTokenRequest{ PeerName: peerName, } + if len(serverExternalAddresses) > 0 { + req.ServerExternalAddresses = serverExternalAddresses + } resp, _, err := r.ConsulClient.Peerings().GenerateToken(ctx, req, nil) if err != nil { r.Log.Error(err, "failed to get generate token", "err", err) @@ -388,6 +406,73 @@ func (r *PeeringAcceptorController) requestsForPeeringTokens(object client.Objec return []ctrl.Request{} } +func (r *PeeringAcceptorController) getExposeServersServiceAddresses() ([]string, error) { + r.Log.Info("getting external address from expose-servers service", "name", r.ExposeServersServiceName) + var serverExternalAddresses []string + + serverService := &corev1.Service{} + key := types.NamespacedName{ + Name: r.ExposeServersServiceName, + Namespace: r.ReleaseNamespace, + } + err := r.Client.Get(r.Context, key, serverService) + if err != nil { + return []string{}, err + } + switch serverService.Spec.Type { + case corev1.ServiceTypeNodePort: + nodes := corev1.NodeList{} + err := r.Client.List(r.Context, &nodes) + if err != nil { + return []string{}, err + } + if len(nodes.Items) == 0 { + return []string{}, fmt.Errorf("no nodes were found for scraping server addresses from expose-servers service") + } + var grpcNodePort int32 + for _, port := range serverService.Spec.Ports { + if port.Name == "grpc" { + grpcNodePort = port.NodePort + } + } + if grpcNodePort == 0 { + return []string{}, fmt.Errorf("no grpc port was found for expose-servers service") + } + for _, node := range nodes.Items { + addrs := node.Status.Addresses + for _, addr := range addrs { + if addr.Type == corev1.NodeInternalIP { + serverExternalAddresses = append(serverExternalAddresses, fmt.Sprintf("%s:%d", addr.Address, grpcNodePort)) + } + } + } + if len(serverExternalAddresses) == 0 { + return []string{}, fmt.Errorf("no server addresses were scraped from expose-servers service") + } + return serverExternalAddresses, nil + case corev1.ServiceTypeLoadBalancer: + lbAddrs := serverService.Status.LoadBalancer.Ingress + if len(lbAddrs) < 1 { + return []string{}, fmt.Errorf("unable to find load balancer address for %s service, retrying", r.ExposeServersServiceName) + } + for _, lbAddr := range lbAddrs { + // When the service is of type load balancer, the grpc port is hardcoded to 8503. + if lbAddr.IP != "" { + serverExternalAddresses = append(serverExternalAddresses, fmt.Sprintf("%s:%s", lbAddr.IP, "8503")) + } + if lbAddr.Hostname != "" { + serverExternalAddresses = append(serverExternalAddresses, fmt.Sprintf("%s:%s", lbAddr.Hostname, "8503")) + } + } + if len(serverExternalAddresses) == 0 { + return []string{}, fmt.Errorf("unable to find load balancer address for %s service, retrying", r.ExposeServersServiceName) + } + default: + return []string{}, fmt.Errorf("only NodePort and LoadBalancer service types are supported") + } + return serverExternalAddresses, nil +} + // filterPeeringAcceptors receives meta and object information for Kubernetes resources that are being watched, // which in this case are Secrets. It only returns true if the Secret is a Peering Token Secret. It reads the labels // from the meta of the resource and uses the values of the "consul.hashicorp.com/peering-token" label to validate that diff --git a/control-plane/connect-inject/peering_acceptor_controller_test.go b/control-plane/connect-inject/peering_acceptor_controller_test.go index 8a38762c4e..861c9c9a2d 100644 --- a/control-plane/connect-inject/peering_acceptor_controller_test.go +++ b/control-plane/connect-inject/peering_acceptor_controller_test.go @@ -1452,3 +1452,301 @@ func TestAcceptor_RequestsForPeeringTokens(t *testing.T) { }) } } + +func TestGetExposeServersServiceAddress(t *testing.T) { + t.Parallel() + cases := []struct { + name string + k8sObjects func() []runtime.Object + releaseNamespace string + expAddresses []string + expErr string + }{ + { + name: "Valid LoadBalancer service", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + { + IP: "1.2.3.4", + }, + }, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expAddresses: []string{"1.2.3.4:8503"}, + }, + { + name: "Valid LoadBalancer service with Hostname", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + { + Hostname: "foo.bar.baz", + }, + }, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expAddresses: []string{"foo.bar.baz:8503"}, + }, + { + name: "LoadBalancer has no addresses", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{}, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expErr: "unable to find load balancer address for test-expose-servers service, retrying", + }, + { + name: "LoadBalancer has empty IP", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + { + IP: "", + }, + }, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expErr: "unable to find load balancer address for test-expose-servers service, retrying", + }, + { + name: "Valid NodePort service", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "", "10.1.1.1") + node2 := createNode("fake-gke-node2", "", "10.2.2.2") + node3 := createNode("fake-gke-node3", "", "10.3.3.3") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expAddresses: []string{"10.1.1.1:30100", "10.2.2.2:30100", "10.3.3.3:30100"}, + }, + { + name: "Valid NodePort service ignores node external IPs", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "30.1.1.1", "10.1.1.1") + node2 := createNode("fake-gke-node2", "30.2.2.2", "10.2.2.2") + node3 := createNode("fake-gke-node3", "30.3.3.3", "10.3.3.3") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expAddresses: []string{"10.1.1.1:30100", "10.2.2.2:30100", "10.3.3.3:30100"}, + }, + { + name: "Invalid NodePort service with only external IPs", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "30.1.1.1", "") + node2 := createNode("fake-gke-node2", "30.2.2.2", "") + node3 := createNode("fake-gke-node3", "30.3.3.3", "") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expErr: "no server addresses were scraped from expose-servers service", + }, + { + name: "Invalid NodePort service because no nodes exist to scrape addresses from", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + return []runtime.Object{exposeServersService} + }, + expErr: "no nodes were found for scraping server addresses from expose-servers service", + }, + { + name: "Invalid NodePort service because no grpc port exists", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "not-grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "30.1.1.1", "10.1.1.1") + node2 := createNode("fake-gke-node2", "30.2.2.2", "10.2.2.2") + node3 := createNode("fake-gke-node3", "30.3.3.3", "10.3.3.3") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expErr: "no grpc port was found for expose-servers service", + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + // Add the default namespace. + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + nsTest := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test"}} + // Create fake k8s client + k8sObjects := append(tt.k8sObjects(), &ns, &nsTest) + + s := scheme.Scheme + //s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringAcceptor{}, &v1alpha1.PeeringAcceptorList{}) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() + + // Create the peering acceptor controller + controller := &PeeringAcceptorController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + Scheme: s, + ReleaseNamespace: tt.releaseNamespace, + ExposeServersServiceName: "test-expose-servers", + } + + // Get addresses from expose-servers service. + addrs, err := controller.getExposeServersServiceAddresses() + if tt.expErr != "" { + require.EqualError(t, err, tt.expErr) + } else { + require.NoError(t, err) + } + + // Assert all the expected addresses are there. + for _, expAddr := range tt.expAddresses { + require.Contains(t, addrs, expAddr) + } + }) + } +} + +// createNode is a test helper to create Kubernetes nodes. +func createNode(name, externalIP, internalIP string) *corev1.Node { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{}, + }, + } + if externalIP != "" { + node.Status.Addresses = append(node.Status.Addresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: externalIP}) + } + if internalIP != "" { + node.Status.Addresses = append(node.Status.Addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: internalIP}) + } + return node +} diff --git a/control-plane/go.mod b/control-plane/go.mod index f95b55638c..3c16db3ca9 100644 --- a/control-plane/go.mod +++ b/control-plane/go.mod @@ -6,18 +6,19 @@ require ( github.com/go-logr/logr v0.4.0 github.com/google/go-cmp v0.5.7 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/hashicorp/consul/api v1.10.1-0.20220614213650-6453375ab228 - github.com/hashicorp/consul/sdk v0.9.0 + github.com/hashicorp/consul/api v1.10.1-0.20220722131443-501089292e33 + github.com/hashicorp/consul/sdk v0.10.0 github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f github.com/hashicorp/go-hclog v0.16.1 - github.com/hashicorp/go-multierror v1.1.0 - github.com/hashicorp/serf v0.9.6 + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/serf v0.9.7 github.com/kr/text v0.2.0 github.com/mitchellh/cli v1.1.0 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.4.1 github.com/stretchr/testify v1.7.0 go.uber.org/zap v1.19.0 + golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac gomodules.xyz/jsonpatch/v2 v2.2.0 k8s.io/api v0.22.2 @@ -40,7 +41,7 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/armon/go-metrics v0.3.9 // indirect + github.com/armon/go-metrics v0.3.10 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.25.41 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -67,11 +68,9 @@ require ( github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.0 // indirect - github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect - github.com/hashicorp/golang-lru v0.5.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/mdns v1.0.4 // indirect github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect github.com/imdario/mergo v0.3.12 // indirect @@ -105,12 +104,11 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect - golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect + golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect - golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/api v0.20.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/control-plane/go.sum b/control-plane/go.sum index 728ae24d43..1c332300bb 100644 --- a/control-plane/go.sum +++ b/control-plane/go.sum @@ -81,8 +81,8 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -296,12 +296,11 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.10.1-0.20220614213650-6453375ab228 h1:BqzKe5O+75uYcFfJI0mJz3rhCgdVztvEj3rEs4xpPr0= -github.com/hashicorp/consul/api v1.10.1-0.20220614213650-6453375ab228/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= +github.com/hashicorp/consul/api v1.10.1-0.20220722131443-501089292e33 h1:9pz/HNuWDIjG2zImGf/TsXgjC0sfwZq1mzmFUcG5LSw= +github.com/hashicorp/consul/api v1.10.1-0.20220722131443-501089292e33/go.mod h1:bcaw5CSZ7NE9qfOfKCI1xb7ZKjzu/MyvQkCLTfqLqxQ= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.4.1-0.20220531155537-364758ef2f50 h1:GwbRRT+QxMRbYI608FGwTfcZ0iOVLX69B2ePjpQoyXw= -github.com/hashicorp/consul/sdk v0.4.1-0.20220531155537-364758ef2f50/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/consul/sdk v0.10.0 h1:rGLEh2AWK4K0KCMvqWAz2EYxQqgciIfMagWZ0nVe5MI= +github.com/hashicorp/consul/sdk v0.10.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -311,6 +310,7 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f h1:7WFMVeuJQp6BkzuTv9O52pzwtEFVUJubKYN+zez8eTI= github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f/go.mod h1:D4eo8/CN92vm9/9UDG+ldX1/fMFa4kpl8qzyTolus8o= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o= github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -320,8 +320,9 @@ github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iP github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= @@ -336,8 +337,9 @@ github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2I github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -345,11 +347,12 @@ github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA= github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= +github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -393,6 +396,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -718,8 +722,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -772,7 +776,6 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/control-plane/subcommand/inject-connect/command.go b/control-plane/subcommand/inject-connect/command.go index 059bfd6769..bc69df1be1 100644 --- a/control-plane/subcommand/inject-connect/command.go +++ b/control-plane/subcommand/inject-connect/command.go @@ -94,6 +94,9 @@ type Command struct { flagInitContainerMemoryLimit string flagInitContainerMemoryRequest string + // Server address flags. + flagPollServerExposeService bool + // Transparent proxy flags. flagDefaultEnableTransparentProxy bool flagTransparentProxyDefaultOverwriteProbes bool @@ -189,6 +192,8 @@ func (c *Command) init() { "%q, %q, %q, and %q.", zapcore.DebugLevel.String(), zapcore.InfoLevel.String(), zapcore.WarnLevel.String(), zapcore.ErrorLevel.String())) c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") + c.flagSet.BoolVar(&c.flagPollServerExposeService, "poll-server-expose-service", false, + "Enables polling the Consul servers' external service for its IP(s).") // Proxy sidecar resource setting flags. c.flagSet.StringVar(&c.flagDefaultSidecarProxyCPURequest, "default-sidecar-proxy-cpu-request", "", "Default sidecar proxy CPU request.") @@ -440,11 +445,14 @@ func (c *Command) Run(args []string) int { if c.flagEnablePeering { if err = (&connectinject.PeeringAcceptorController{ - Client: mgr.GetClient(), - ConsulClient: c.consulClient, - Log: ctrl.Log.WithName("controller").WithName("peering-acceptor"), - Scheme: mgr.GetScheme(), - Context: ctx, + Client: mgr.GetClient(), + ConsulClient: c.consulClient, + ExposeServersServiceName: c.flagResourcePrefix + "-expose-servers", + PollServerExternalService: c.flagPollServerExposeService, + ReleaseNamespace: c.flagReleaseNamespace, + Log: ctrl.Log.WithName("controller").WithName("peering-acceptor"), + Scheme: mgr.GetScheme(), + Context: ctx, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "peering-acceptor") return 1