diff --git a/addons/cni-cilium/Kustomization b/addons/cni-cilium/Kustomization index 0912844cd..4d9d5717c 100644 --- a/addons/cni-cilium/Kustomization +++ b/addons/cni-cilium/Kustomization @@ -5,7 +5,7 @@ namespace: kube-system helmCharts: - name: cilium repo: https://helm.cilium.io/ - version: 1.15.6 + version: 1.16.3 releaseName: cilium namespace: kube-system valuesFile: helm-values @@ -18,20 +18,6 @@ patches: name: cilium-config namespace: kube-system $patch: delete - - patch: |- - apiVersion: batch/v1 - kind: Job - metadata: - name: hubble-generate-certs - namespace: kube-system - $patch: delete - - patch: |- - apiVersion: batch/v1 - kind: CronJob - metadata: - name: hubble-generate-certs - namespace: kube-system - $patch: delete # A hack, to compensate for the lack of ability to set .Capabilities when using helmCharts API. - target: @@ -133,3 +119,28 @@ patches: name: cilium-config key: KUBERNETES_SERVICE_PORT optional: true + - patch: |- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cilium-envoy + namespace: kube-system + spec: + template: + spec: + containers: + - name: cilium-envoy + image: '{{ .InternalImages.Get "CiliumEnvoy" }}' + env: + - name: KUBERNETES_SERVICE_HOST + valueFrom: + configMapKeyRef: + name: cilium-config + key: KUBERNETES_SERVICE_HOST + optional: true + - name: KUBERNETES_SERVICE_PORT + valueFrom: + configMapKeyRef: + name: cilium-config + key: KUBERNETES_SERVICE_PORT + optional: true diff --git a/addons/cni-cilium/cilium-config-map.yaml b/addons/cni-cilium/cilium-config-map.yaml index bc12d4e38..499b7ced6 100644 --- a/addons/cni-cilium/cilium-config-map.yaml +++ b/addons/cni-cilium/cilium-config-map.yaml @@ -6,220 +6,152 @@ metadata: name: cilium-config namespace: kube-system data: - # Identity allocation mode selects how identities are shared between cilium - # nodes by setting how they are stored. The options are "crd" or "kvstore". - # - "crd" stores identities in kubernetes as CRDs (custom resource definition). - # These can be queried with: - # kubectl get ciliumid - # - "kvstore" stores identities in an etcd kvstore, that is - # configured below. Cilium versions before 1.6 supported only the kvstore - # backend. Upgrades from these older cilium versions should continue using - # the kvstore by commenting out the identity-allocation-mode below, or - # setting it to "kvstore". - identity-allocation-mode: crd - identity-heartbeat-timeout: "30m0s" - identity-gc-interval: "15m0s" - cilium-endpoint-gc-interval: "5m0s" - nodes-gc-interval: "5m0s" - skip-cnp-status-startup-clean: "false" - - # If you want to run cilium in debug mode change this value to true - debug: "false" - debug-verbose: "" - # The agent can be put into the following three policy enforcement modes - # default, always and never. - # https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes - enable-policy: "default" - # Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this - # field is not set. - proxy-prometheus-port: "9964" - - # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 - # address. - enable-ipv4: "true" - - # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 - # address. -{{ if .Config.ClusterNetwork.HasIPv6 }} - enable-ipv6: "true" -{{ else }} - enable-ipv6: "false" -{{ end }} - # Users who wish to specify their own custom CNI configuration file must set - # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. - custom-cni-conf: "false" - enable-bpf-clock-probe: "false" - # If you want cilium monitor to aggregate tracing for packets, set this level - # to "low", "medium", or "maximum". The higher the level, the less packets - # that will be seen in monitor output. - monitor-aggregation: medium - - # The monitor aggregation interval governs the typical time between monitor - # notification events for each allowed connection. - # - # Only effective when monitor aggregation is set to "medium" or higher. - monitor-aggregation-interval: "5s" - - # The monitor aggregation flags determine which TCP flags which, upon the - # first observation, cause monitor notifications to be generated. - # - # Only effective when monitor aggregation is set to "medium" or higher. - monitor-aggregation-flags: all - # Specifies the ratio (0.0-1.0] of total system memory to use for dynamic - # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. + agent-not-ready-taint-key: "node.cilium.io/agent-not-ready" + arping-refresh-period: "30s" + auto-direct-node-routes: "false" + bpf-events-drop-enabled: "true" + bpf-events-policy-verdict-enabled: "true" + bpf-events-trace-enabled: "true" + bpf-lb-acceleration: disabled + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-sock-terminate-pod-connections: "false" + bpf-lb-sock: "false" bpf-map-dynamic-size-ratio: "0.0025" - # bpf-policy-map-max specifies the maximum number of entries in endpoint - # policy map (per endpoint) bpf-policy-map-max: "16384" - # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, - # backend and affinity maps. - bpf-lb-map-max: "65536" - bpf-lb-external-clusterip: "false" - bpf-lb-acceleration: disabled - - # Pre-allocation of map entries allows per-packet latency to be reduced, at - # the expense of up-front memory allocation for the entries in the maps. The - # default value below will minimize memory usage in the default installation; - # users who are sensitive to latency may consider setting this to "true". - # - # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore - # this option and behave as though it is set to "true". - # - # If this value is modified, then during the next Cilium startup the restore - # of existing endpoints and tracking of ongoing connections may be disrupted. - # As a result, reply packets may be dropped and the load-balancing decisions - # for established connections may change. - # - # If this option is set to "false" during an upgrade from 1.3 or earlier to - # 1.4 or later, then it may cause one-time disruptions during the upgrade. - preallocate-bpf-maps: "false" - - # Regular expression matching compatible Istio sidecar istio-proxy - # container image names - sidecar-istio-proxy-image: "cilium/istio_proxy" - - # Name of the cluster. Only relevant when building a mesh of clusters. - cluster-name: default - # Unique ID of the cluster. Must be unique across all conneted clusters and - # in the range of 1 and 255. Only relevant when building a mesh of clusters. + bpf-root: "/sys/fs/bpf" + cgroup-root: "/run/cilium/cgroupv2" + cilium-endpoint-gc-interval: "5m0s" cluster-id: "0" - - # Encapsulation mode for communication between nodes - # Possible values: - # - disabled - # - vxlan (default) - # - geneve - # Default case - routing-mode: "tunnel" - tunnel-protocol: "vxlan" - service-no-backend-response: reject - - # Enables L7 proxy for L7 policy enforcement and visibility - enable-l7-proxy: "true" - - enable-ipv4-masquerade: "true" - enable-ipv4-big-tcp: "false" - enable-ipv6-big-tcp: "false" - enable-ipv6-masquerade: "true" - - enable-xt-socket-fallback: "true" - install-no-conntrack-iptables-rules: "false" - - auto-direct-node-routes: "false" - enable-local-redirect-policy: "false" - enable-masquerade-to-route-source: "false" - enable-metrics: "true" - -{{ if eq .Config.ClusterNetwork.CNI.Cilium.KubeProxyReplacement "strict" }} - kube-proxy-replacement: "strict" - kube-proxy-replacement-healthz-bind-address: "" -{{ else }} - kube-proxy-replacement: "disabled" + cluster-name: default +{{ if .Config.ClusterNetwork.HasIPv4 }} + cluster-pool-ipv4-cidr: "{{ .Config.ClusterNetwork.PodSubnet }}" + cluster-pool-ipv4-mask-size: "{{ .Config.ClusterNetwork.NodeCIDRMaskSizeIPv4 }}" {{ end }} - max-connected-clusters: "255" - bpf-lb-sock: "false" - enable-health-check-nodeport: "true" - node-port-bind-protection: "true" - enable-auto-protect-node-port-range: "true" - enable-svc-source-range-check: "true" - enable-l2-neigh-discovery: "true" - arping-refresh-period: "30s" - enable-k8s-networkpolicy: "true" - # Tell the agent to generate and write a CNI configuration file - write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist +{{ if .Config.ClusterNetwork.HasIPv6 }} + cluster-pool-ipv6-cidr: "{{ .Config.ClusterNetwork.PodSubnetIPv6 }}" + cluster-pool-ipv6-mask-size: "{{ .Config.ClusterNetwork.NodeCIDRMaskSizeIPv6 }}" +{{ end }} + clustermesh-enable-endpoint-sync: "false" + clustermesh-enable-mcs-api: "false" cni-exclusive: "false" cni-log-file: "/var/run/cilium/cilium-cni.log" + custom-cni-conf: "false" + datapath-mode: veth + debug-verbose: "" + debug: "false" + direct-routing-skip-unreachable: "false" + dnsproxy-enable-transparent-mode: "true" + dnsproxy-socket-linger-timeout: "10" + egress-gateway-reconciliation-trigger-interval: "1s" + enable-auto-protect-node-port-range: "true" + enable-bpf-clock-probe: "false" enable-endpoint-health-checking: "true" enable-health-check-loadbalancer-ip: "false" + enable-health-check-nodeport: "true" enable-health-checking: "true" - enable-remote-node-identity: "true" - enable-well-known-identities: "false" - synchronize-k8s-nodes: "true" - operator-api-serve-addr: "127.0.0.1:9234" - operator-prometheus-serve-addr: :9963 - policy-cidr-match-mode: "" - # Enable Hubble gRPC service. {{ if .Config.ClusterNetwork.CNI.Cilium.EnableHubble }} enable-hubble: "true" {{ else }} enable-hubble: "false" {{ end }} - # UNIX domain socket for Hubble server to listen to. - hubble-socket-path: "/var/run/cilium/hubble.sock" - # An additional address for Hubble server to listen to (e.g. ":4244"). - hubble-listen-address: ":4244" + enable-ipv4-big-tcp: "false" + enable-ipv4-masquerade: "true" + enable-ipv4: "true" + enable-ipv6-big-tcp: "false" + enable-ipv6-masquerade: "true" +{{ if .Config.ClusterNetwork.HasIPv6 }} + enable-ipv6: "true" +{{ else }} + enable-ipv6: "false" +{{ end }} + enable-k8s-networkpolicy: "true" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "true" + enable-local-redirect-policy: "false" + enable-masquerade-to-route-source: "false" + enable-metrics: "true" + enable-node-selector-labels: "false" + enable-policy: "default" + enable-runtime-device-detection: "true" + enable-sctp: "false" + enable-svc-source-range-check: "true" + enable-tcx: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + envoy-base-id: "0" + envoy-keep-cap-netbindservice: "false" + external-envoy-proxy: "true" hubble-disable-tls: "false" hubble-export-file-max-backups: "5" hubble-export-file-max-size-mb: "10" + hubble-listen-address: ":4244" + hubble-socket-path: "/var/run/cilium/hubble.sock" hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt - hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt - ipam: "cluster-pool" + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + identity-allocation-mode: crd + identity-gc-interval: "15m0s" + identity-heartbeat-timeout: "30m0s" + install-no-conntrack-iptables-rules: "false" ipam-cilium-node-update-rate: "15s" -{{ if .Config.ClusterNetwork.HasIPv4 }} - cluster-pool-ipv4-cidr: "{{ .Config.ClusterNetwork.PodSubnet }}" - cluster-pool-ipv4-mask-size: "{{ .Config.ClusterNetwork.NodeCIDRMaskSizeIPv4 }}" -{{ end }} -{{ if .Config.ClusterNetwork.HasIPv6 }} - cluster-pool-ipv6-cidr: "{{ .Config.ClusterNetwork.PodSubnetIPv6 }}" - cluster-pool-ipv6-mask-size: "{{ .Config.ClusterNetwork.NodeCIDRMaskSizeIPv6 }}" + ipam: "cluster-pool" + k8s-client-burst: "20" + k8s-client-qps: "10" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" +{{ if eq .Config.ClusterNetwork.CNI.Cilium.KubeProxyReplacement "strict" }} + kube-proxy-replacement-healthz-bind-address: "" + kube-proxy-replacement: "true" +{{ else }} + kube-proxy-replacement: "false" {{ end }} - dnsproxy-enable-transparent-mode: "true" - egress-gateway-reconciliation-trigger-interval: "1s" - enable-vtep: "false" - vtep-endpoint: "" - vtep-cidr: "" - vtep-mask: "" - vtep-mac: "" - enable-bgp-control-plane: "false" + max-connected-clusters: "255" + mesh-auth-enabled: "true" + mesh-auth-gc-interval: "5m0s" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + monitor-aggregation-flags: all + monitor-aggregation-interval: "5s" + monitor-aggregation: medium + nat-map-stats-entries: "32" + nat-map-stats-interval: 30s + node-port-bind-protection: "true" + nodeport-addresses: "" + nodes-gc-interval: "5m0s" + operator-api-serve-addr: "127.0.0.1:9234" + operator-prometheus-serve-addr: :9963 + policy-cidr-match-mode: "" + preallocate-bpf-maps: "false" procfs: "/host/proc" - bpf-root: "/sys/fs/bpf" - cgroup-root: "/run/cilium/cgroupv2" - enable-k8s-terminating-endpoint: "true" - enable-sctp: "false" - k8s-client-qps: "5" - k8s-client-burst: "10" + proxy-connect-timeout: "2" + proxy-idle-timeout-seconds: "60" + proxy-max-connection-duration-seconds: "0" + proxy-max-requests-per-connection: "0" + proxy-xff-num-trusted-hops-egress: "0" + proxy-xff-num-trusted-hops-ingress: "0" remove-cilium-node-taints: "true" - set-cilium-node-taints: "true" + routing-mode: "tunnel" + service-no-backend-response: reject set-cilium-is-up-condition: "true" - unmanaged-pod-watcher-interval: "15" + set-cilium-node-taints: "true" + synchronize-k8s-nodes: "true" tofqdns-dns-reject-response-code: "refused" tofqdns-enable-dns-compression: "true" tofqdns-endpoint-max-ip-per-hostname: "50" tofqdns-idle-connection-grace-period: "0s" tofqdns-max-deferred-connection-deletes: "10000" tofqdns-proxy-response-max-delay: "100ms" - agent-not-ready-taint-key: "node.cilium.io/agent-not-ready" - mesh-auth-enabled: "true" - mesh-auth-queue-size: "1024" - mesh-auth-rotated-identities-queue-size: "1024" - mesh-auth-gc-interval: "5m0s" - proxy-connect-timeout: "2" - proxy-max-requests-per-connection: "0" - proxy-max-connection-duration-seconds: "0" - external-envoy-proxy: "false" - - {{ if eq .Config.ClusterNetwork.CNI.Cilium.KubeProxyReplacement "strict" }} + tunnel-protocol: "vxlan" + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist +{{ if eq .Config.ClusterNetwork.CNI.Cilium.KubeProxyReplacement "strict" }} KUBERNETES_SERVICE_HOST: "{{ .Config.APIEndpoint.Host }}" KUBERNETES_SERVICE_PORT: "{{ .Config.APIEndpoint.Port }}" - {{ end }} +{{ end }} diff --git a/addons/cni-cilium/cilium.yaml b/addons/cni-cilium/cilium.yaml index 5e10cdb76..116612b59 100644 --- a/addons/cni-cilium/cilium.yaml +++ b/addons/cni-cilium/cilium.yaml @@ -7,13 +7,13 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: cilium-operator + name: cilium-envoy namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: - name: hubble-generate-certs + name: cilium-operator namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 @@ -133,8 +133,6 @@ rules: - apiGroups: - cilium.io resources: - - ciliumnetworkpolicies/status - - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints - ciliuml2announcementpolicies/status @@ -158,6 +156,14 @@ rules: - list - watch - delete + - apiGroups: + - "" + resourceNames: + - cilium-config + resources: + - configmaps + verbs: + - patch - apiGroups: - "" resources: @@ -316,6 +322,7 @@ rules: resources: - ciliumloadbalancerippools - ciliumpodippools + - ciliumbgppeeringpolicies - ciliumbgpclusterconfigs - ciliumbgpnodeconfigoverrides verbs: @@ -344,39 +351,6 @@ rules: - update --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/part-of: cilium - name: hubble-generate-certs -rules: - - apiGroups: - - "" - resources: - - secrets - verbs: - - create - - apiGroups: - - "" - resourceNames: - - hubble-server-certs - - hubble-relay-client-certs - - hubble-relay-server-certs - resources: - - secrets - verbs: - - update - - apiGroups: - - "" - resourceNames: - - cilium-ca - resources: - - secrets - verbs: - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: @@ -422,39 +396,354 @@ subjects: name: cilium-operator namespace: kube-system --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +apiVersion: v1 +data: + bootstrap-config.json: | + { + "node": { + "id": "host~127.0.0.1~no-id~localdomain", + "cluster": "ingress-cluster" + }, + "staticResources": { + "listeners": [ + { + "name": "envoy-prometheus-metrics-listener", + "address": { + "socket_address": { + "address": "0.0.0.0", + "port_value": 9964 + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "stat_prefix": "envoy-prometheus-metrics-listener", + "route_config": { + "virtual_hosts": [ + { + "name": "prometheus_metrics_route", + "domains": [ + "*" + ], + "routes": [ + { + "name": "prometheus_metrics_route", + "match": { + "prefix": "/metrics" + }, + "route": { + "cluster": "/envoy-admin", + "prefix_rewrite": "/stats/prometheus" + } + } + ] + } + ] + }, + "http_filters": [ + { + "name": "envoy.filters.http.router", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "stream_idle_timeout": "0s" + } + } + ] + } + ] + }, + { + "name": "envoy-health-listener", + "address": { + "socket_address": { + "address": "127.0.0.1", + "port_value": 9878 + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "stat_prefix": "envoy-health-listener", + "route_config": { + "virtual_hosts": [ + { + "name": "health", + "domains": [ + "*" + ], + "routes": [ + { + "name": "health", + "match": { + "prefix": "/healthz" + }, + "route": { + "cluster": "/envoy-admin", + "prefix_rewrite": "/ready" + } + } + ] + } + ] + }, + "http_filters": [ + { + "name": "envoy.filters.http.router", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "stream_idle_timeout": "0s" + } + } + ] + } + ] + } + ], + "clusters": [ + { + "name": "ingress-cluster", + "type": "ORIGINAL_DST", + "connectTimeout": "2s", + "lbPolicy": "CLUSTER_PROVIDED", + "typedExtensionProtocolOptions": { + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": { + "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions", + "commonHttpProtocolOptions": { + "idleTimeout": "60s", + "maxConnectionDuration": "0s", + "maxRequestsPerConnection": 0 + }, + "useDownstreamProtocolConfig": {} + } + }, + "cleanupInterval": "2.500s" + }, + { + "name": "egress-cluster-tls", + "type": "ORIGINAL_DST", + "connectTimeout": "2s", + "lbPolicy": "CLUSTER_PROVIDED", + "typedExtensionProtocolOptions": { + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": { + "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions", + "commonHttpProtocolOptions": { + "idleTimeout": "60s", + "maxConnectionDuration": "0s", + "maxRequestsPerConnection": 0 + }, + "upstreamHttpProtocolOptions": {}, + "useDownstreamProtocolConfig": {} + } + }, + "cleanupInterval": "2.500s", + "transportSocket": { + "name": "cilium.tls_wrapper", + "typedConfig": { + "@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext" + } + } + }, + { + "name": "egress-cluster", + "type": "ORIGINAL_DST", + "connectTimeout": "2s", + "lbPolicy": "CLUSTER_PROVIDED", + "typedExtensionProtocolOptions": { + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": { + "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions", + "commonHttpProtocolOptions": { + "idleTimeout": "60s", + "maxConnectionDuration": "0s", + "maxRequestsPerConnection": 0 + }, + "useDownstreamProtocolConfig": {} + } + }, + "cleanupInterval": "2.500s" + }, + { + "name": "ingress-cluster-tls", + "type": "ORIGINAL_DST", + "connectTimeout": "2s", + "lbPolicy": "CLUSTER_PROVIDED", + "typedExtensionProtocolOptions": { + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": { + "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions", + "commonHttpProtocolOptions": { + "idleTimeout": "60s", + "maxConnectionDuration": "0s", + "maxRequestsPerConnection": 0 + }, + "upstreamHttpProtocolOptions": {}, + "useDownstreamProtocolConfig": {} + } + }, + "cleanupInterval": "2.500s", + "transportSocket": { + "name": "cilium.tls_wrapper", + "typedConfig": { + "@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext" + } + } + }, + { + "name": "xds-grpc-cilium", + "type": "STATIC", + "connectTimeout": "2s", + "loadAssignment": { + "clusterName": "xds-grpc-cilium", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "pipe": { + "path": "/var/run/cilium/envoy/sockets/xds.sock" + } + } + } + } + ] + } + ] + }, + "typedExtensionProtocolOptions": { + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": { + "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions", + "explicitHttpConfig": { + "http2ProtocolOptions": {} + } + } + } + }, + { + "name": "/envoy-admin", + "type": "STATIC", + "connectTimeout": "2s", + "loadAssignment": { + "clusterName": "/envoy-admin", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "pipe": { + "path": "/var/run/cilium/envoy/sockets/admin.sock" + } + } + } + } + ] + } + ] + } + } + ] + }, + "dynamicResources": { + "ldsConfig": { + "apiConfigSource": { + "apiType": "GRPC", + "transportApiVersion": "V3", + "grpcServices": [ + { + "envoyGrpc": { + "clusterName": "xds-grpc-cilium" + } + } + ], + "setNodeOnFirstMessageOnly": true + }, + "resourceApiVersion": "V3" + }, + "cdsConfig": { + "apiConfigSource": { + "apiType": "GRPC", + "transportApiVersion": "V3", + "grpcServices": [ + { + "envoyGrpc": { + "clusterName": "xds-grpc-cilium" + } + } + ], + "setNodeOnFirstMessageOnly": true + }, + "resourceApiVersion": "V3" + } + }, + "bootstrapExtensions": [ + { + "name": "envoy.bootstrap.internal_listener", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener" + } + } + ], + "layeredRuntime": { + "layers": [ + { + "name": "static_layer_0", + "staticLayer": { + "overload": { + "global_downstream_max_connections": 50000 + } + } + } + ] + }, + "admin": { + "address": { + "pipe": { + "path": "/var/run/cilium/envoy/sockets/admin.sock" + } + } + } + } +kind: ConfigMap metadata: - labels: - app.kubernetes.io/part-of: cilium - name: hubble-generate-certs -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: hubble-generate-certs -subjects: - - kind: ServiceAccount - name: hubble-generate-certs - namespace: kube-system + name: cilium-envoy-config + namespace: kube-system --- apiVersion: v1 kind: Service metadata: + annotations: + prometheus.io/port: "9964" + prometheus.io/scrape: "true" labels: - app.kubernetes.io/name: hubble-peer + app.kubernetes.io/name: cilium-envoy app.kubernetes.io/part-of: cilium - k8s-app: cilium - name: hubble-peer + io.cilium/app: proxy + k8s-app: cilium-envoy + name: cilium-envoy namespace: kube-system spec: - internalTrafficPolicy: Local + clusterIP: None ports: - - name: peer-service - port: 443 + - name: envoy-metrics + port: 9964 protocol: TCP - targetPort: 4244 + targetPort: envoy-metrics selector: - k8s-app: cilium + k8s-app: cilium-envoy + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -568,7 +857,6 @@ spec: kubernetes.io/os: linux priorityClassName: system-cluster-critical restartPolicy: Always - serviceAccount: cilium-operator serviceAccountName: cilium-operator tolerations: - operator: Exists @@ -609,11 +897,6 @@ spec: k8s-app: cilium template: metadata: - annotations: - container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined - container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined - container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined - container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined labels: app.kubernetes.io/name: cilium-agent app.kubernetes.io/part-of: cilium @@ -756,6 +1039,9 @@ spec: successThreshold: 1 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: + - mountPath: /var/run/cilium/envoy/sockets + name: envoy-sockets + readOnly: false - mountPath: /host/proc/sys/net name: host-proc-sys-net - mountPath: /host/proc/sys/kernel @@ -974,7 +1260,9 @@ spec: kubernetes.io/os: linux priorityClassName: system-node-critical restartPolicy: Always - serviceAccount: cilium + securityContext: + appArmorProfile: + type: Unconfined serviceAccountName: cilium terminationGracePeriodSeconds: 1 tolerations: @@ -1013,6 +1301,10 @@ spec: path: /run/xtables.lock type: FileOrCreate name: xtables-lock + - hostPath: + path: /var/run/cilium/envoy/sockets + type: DirectoryOrCreate + name: envoy-sockets - name: clustermesh-secrets projected: defaultMode: 256 @@ -1030,6 +1322,16 @@ spec: path: common-etcd-client-ca.crt name: clustermesh-apiserver-remote-cert optional: true + - secret: + items: + - key: tls.key + path: local-etcd-client.key + - key: tls.crt + path: local-etcd-client.crt + - key: ca.crt + path: local-etcd-client-ca.crt + name: clustermesh-apiserver-local-cert + optional: true - hostPath: path: /proc/sys/net type: Directory @@ -1056,3 +1358,178 @@ spec: rollingUpdate: maxUnavailable: 2 type: RollingUpdate +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app.kubernetes.io/name: cilium-envoy + app.kubernetes.io/part-of: cilium + k8s-app: cilium-envoy + name: cilium-envoy + name: cilium-envoy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium-envoy + template: + metadata: + labels: + app.kubernetes.io/name: cilium-envoy + app.kubernetes.io/part-of: cilium + k8s-app: cilium-envoy + name: cilium-envoy + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: cilium.io/no-schedule + operator: NotIn + values: + - "true" + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium-envoy + topologyKey: kubernetes.io/hostname + automountServiceAccountToken: true + containers: + - args: + - -- + - -c /var/run/cilium/envoy/bootstrap-config.json + - --base-id 0 + - --log-level info + - --log-format [%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v + command: + - /usr/bin/cilium-envoy-starter + env: + - name: KUBERNETES_SERVICE_HOST + valueFrom: + configMapKeyRef: + key: KUBERNETES_SERVICE_HOST + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_PORT + valueFrom: + configMapKeyRef: + key: KUBERNETES_SERVICE_PORT + name: cilium-config + optional: true + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + image: '{{ .InternalImages.Get "CiliumEnvoy" }}' + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9878 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-envoy + ports: + - containerPort: 9964 + hostPort: 9964 + name: envoy-metrics + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9878 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_ADMIN + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9878 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 2 + successThreshold: 1 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/cilium/envoy/sockets + name: envoy-sockets + readOnly: false + - mountPath: /var/run/cilium/envoy/artifacts + name: envoy-artifacts + readOnly: true + - mountPath: /var/run/cilium/envoy/ + name: envoy-config + readOnly: true + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + securityContext: + appArmorProfile: + type: Unconfined + serviceAccountName: cilium-envoy + terminationGracePeriodSeconds: 1 + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/run/cilium/envoy/sockets + type: DirectoryOrCreate + name: envoy-sockets + - hostPath: + path: /var/run/cilium/envoy/artifacts + type: DirectoryOrCreate + name: envoy-artifacts + - configMap: + defaultMode: 256 + items: + - key: bootstrap-config.json + path: bootstrap-config.json + name: cilium-envoy-config + name: envoy-config + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + updateStrategy: + rollingUpdate: + maxUnavailable: 2 + type: RollingUpdate diff --git a/addons/cni-cilium/helm-values b/addons/cni-cilium/helm-values index 4bc02df8f..50d4b5b8d 100644 --- a/addons/cni-cilium/helm-values +++ b/addons/cni-cilium/helm-values @@ -3,14 +3,14 @@ cni: hubble: relay: - enabled: false + enabled: true tls: auto: method: cronJob ui: - enabled: false + enabled: true -kubeProxyReplacement: strict +kubeProxyReplacement: true operator: enabled: true diff --git a/addons/cni-cilium/hubble.yaml b/addons/cni-cilium/hubble.yaml index 1a391393d..58b1f8138 100644 --- a/addons/cni-cilium/hubble.yaml +++ b/addons/cni-cilium/hubble.yaml @@ -1,6 +1,5 @@ {{ if .Config.ClusterNetwork.CNI.Cilium.EnableHubble }} --- -# Source: cilium/templates/hubble-relay/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: @@ -24,7 +23,6 @@ data: {{ $hubble_ipv6 := default "true" .Params.HubbleIPv6 }} --- -# Source: cilium/templates/hubble-ui/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: @@ -67,31 +65,34 @@ data: # double `/index.html` is required here try_files $uri $uri/ /index.html /index.html; } + + # Liveness probe + location /healthz { + access_log off; + add_header Content-Type text/plain; + return 200 'ok'; + } } } --- -# Source: cilium/templates/hubble-relay/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: "hubble-relay" + name: hubble-generate-certs namespace: kube-system --- -# Source: cilium/templates/hubble-ui/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: "hubble-ui" + name: hubble-relay namespace: kube-system --- -# Source: cilium/templates/hubble/tls-cronjob/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: "hubble-generate-certs" + name: hubble-ui namespace: kube-system --- -# Source: cilium/templates/hubble-ui/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -137,13 +138,13 @@ rules: - list - watch --- -# Source: cilium/templates/hubble/tls-cronjob/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole +kind: Role metadata: - name: hubble-generate-certs labels: app.kubernetes.io/part-of: cilium + name: hubble-generate-certs + namespace: kube-system rules: - apiGroups: - "" @@ -153,25 +154,26 @@ rules: - create - apiGroups: - "" - resources: - - secrets resourceNames: - hubble-server-certs - hubble-relay-client-certs - hubble-relay-server-certs + - hubble-metrics-server-certs + - hubble-ui-client-certs + resources: + - secrets verbs: - update - apiGroups: - "" - resources: - - secrets resourceNames: - cilium-ca + resources: + - secrets verbs: - get - update --- -# Source: cilium/templates/hubble-ui/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -187,90 +189,86 @@ subjects: name: hubble-ui namespace: kube-system --- -# Source: cilium/templates/hubble/tls-cronjob/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: - name: hubble-generate-certs labels: app.kubernetes.io/part-of: cilium + name: hubble-generate-certs + namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole + kind: Role name: hubble-generate-certs subjects: -- kind: ServiceAccount - name: "hubble-generate-certs" - namespace: kube-system + - kind: ServiceAccount + name: hubble-generate-certs + namespace: kube-system --- -# Source: cilium/templates/hubble-relay/service.yaml -kind: Service apiVersion: v1 +kind: Service metadata: - name: hubble-relay - namespace: kube-system labels: - k8s-app: hubble-relay app.kubernetes.io/name: hubble-relay app.kubernetes.io/part-of: cilium + k8s-app: hubble-relay + name: hubble-relay + namespace: kube-system spec: - type: "ClusterIP" + ports: + - port: 80 + protocol: TCP + targetPort: grpc selector: k8s-app: hubble-relay - ports: - - protocol: TCP - port: 80 - targetPort: 4245 + type: ClusterIP --- -# Source: cilium/templates/hubble-ui/service.yaml -kind: Service apiVersion: v1 +kind: Service metadata: - name: hubble-ui - namespace: kube-system labels: - k8s-app: hubble-ui app.kubernetes.io/name: hubble-ui app.kubernetes.io/part-of: cilium -spec: - type: "ClusterIP" - selector: k8s-app: hubble-ui + name: hubble-ui + namespace: kube-system +spec: ports: - name: http port: 80 targetPort: 8081 + selector: + k8s-app: hubble-ui + type: ClusterIP --- -# Source: cilium/templates/hubble/peer-service.yaml apiVersion: v1 kind: Service metadata: - name: hubble-peer - namespace: kube-system labels: - k8s-app: cilium - app.kubernetes.io/part-of: cilium app.kubernetes.io/name: hubble-peer + app.kubernetes.io/part-of: cilium + k8s-app: cilium + name: hubble-peer + namespace: kube-system spec: + internalTrafficPolicy: Local + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 selector: k8s-app: cilium - ports: - - name: peer-service - port: 443 - protocol: TCP - targetPort: 4244 - internalTrafficPolicy: Local --- -# Source: cilium/templates/hubble-relay/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: hubble-relay - namespace: kube-system labels: - k8s-app: hubble-relay app.kubernetes.io/name: hubble-relay app.kubernetes.io/part-of: cilium + k8s-app: hubble-relay + name: hubble-relay + namespace: kube-system spec: replicas: 1 selector: @@ -282,93 +280,101 @@ spec: type: RollingUpdate template: metadata: - annotations: + annotations: null labels: - k8s-app: hubble-relay app.kubernetes.io/name: hubble-relay app.kubernetes.io/part-of: cilium + k8s-app: hubble-relay spec: - securityContext: - fsGroup: 65532 + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + automountServiceAccountToken: false containers: - - name: hubble-relay + - args: + - serve + command: + - hubble-relay + image: '{{ .InternalImages.Get "HubbleRelay" }}' + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 12 + grpc: + port: 4222 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 10 + name: hubble-relay + ports: + - containerPort: 4245 + name: grpc + readinessProbe: + grpc: + port: 4222 + timeoutSeconds: 3 securityContext: capabilities: drop: - - ALL + - ALL runAsGroup: 65532 runAsNonRoot: true runAsUser: 65532 - image: {{ .InternalImages.Get "HubbleRelay" }} - imagePullPolicy: IfNotPresent - command: - - hubble-relay - args: - - serve - ports: - - name: grpc - containerPort: 4245 - readinessProbe: - tcpSocket: - port: grpc - livenessProbe: - tcpSocket: - port: grpc - volumeMounts: - - name: config - mountPath: /etc/hubble-relay - readOnly: true - - name: tls - mountPath: /var/lib/hubble-relay/tls - readOnly: true + startupProbe: + failureThreshold: 20 + grpc: + port: 4222 + initialDelaySeconds: 10 + periodSeconds: 3 terminationMessagePolicy: FallbackToLogsOnError - restartPolicy: Always - priorityClassName: - serviceAccount: "hubble-relay" - serviceAccountName: "hubble-relay" - automountServiceAccountToken: false - terminationGracePeriodSeconds: 1 - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - k8s-app: cilium - topologyKey: kubernetes.io/hostname + volumeMounts: + - mountPath: /etc/hubble-relay + name: config + readOnly: true + - mountPath: /var/lib/hubble-relay/tls + name: tls + readOnly: true nodeSelector: kubernetes.io/os: linux + priorityClassName: null + restartPolicy: Always + securityContext: + fsGroup: 65532 + serviceAccountName: hubble-relay + terminationGracePeriodSeconds: 1 volumes: - - name: config - configMap: - name: hubble-relay-config - items: - - key: config.yaml - path: config.yaml - - name: tls - projected: - # note: the leading zero means this number is in octal representation: do not remove it - defaultMode: 0400 - sources: - - secret: - name: hubble-relay-client-certs - items: - - key: tls.crt - path: client.crt - - key: tls.key - path: client.key - - key: ca.crt - path: hubble-server-ca.crt + - configMap: + items: + - key: config.yaml + path: config.yaml + name: hubble-relay-config + name: config + - name: tls + projected: + defaultMode: 256 + sources: + - secret: + items: + - key: tls.crt + path: client.crt + - key: tls.key + path: client.key + - key: ca.crt + path: hubble-server-ca.crt + name: hubble-relay-client-certs --- -# Source: cilium/templates/hubble-ui/deployment.yaml -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: - name: hubble-ui - namespace: kube-system labels: - k8s-app: hubble-ui app.kubernetes.io/name: hubble-ui app.kubernetes.io/part-of: cilium + k8s-app: hubble-ui + name: hubble-ui + namespace: kube-system spec: replicas: 1 selector: @@ -380,66 +386,74 @@ spec: type: RollingUpdate template: metadata: - annotations: + annotations: null labels: - k8s-app: hubble-ui app.kubernetes.io/name: hubble-ui app.kubernetes.io/part-of: cilium + k8s-app: hubble-ui spec: - priorityClassName: - serviceAccount: "hubble-ui" - serviceAccountName: "hubble-ui" automountServiceAccountToken: true containers: - - name: frontend - image: {{ .InternalImages.Get "HubbleUI" }} - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 8081 - volumeMounts: - - name: hubble-ui-nginx-conf - mountPath: /etc/nginx/conf.d/default.conf - subPath: nginx.conf - - name: tmp-dir - mountPath: /tmp - terminationMessagePolicy: FallbackToLogsOnError - - name: backend - image: {{ .InternalImages.Get "HubbleUIBackend" }} - imagePullPolicy: IfNotPresent - env: - - name: EVENTS_SERVER_PORT - value: "8090" - - name: FLOWS_API_ADDR - value: "hubble-relay:80" - ports: - - name: grpc - containerPort: 8090 - volumeMounts: - terminationMessagePolicy: FallbackToLogsOnError + - image: '{{ .InternalImages.Get "HubbleUI" }}' + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8081 + name: frontend + ports: + - containerPort: 8081 + name: http + readinessProbe: + httpGet: + path: / + port: 8081 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/nginx/conf.d/default.conf + name: hubble-ui-nginx-conf + subPath: nginx.conf + - mountPath: /tmp + name: tmp-dir + - env: + - name: EVENTS_SERVER_PORT + value: "8090" + - name: FLOWS_API_ADDR + value: hubble-relay:80 + image: '{{ .InternalImages.Get "HubbleUIBackend" }}' + imagePullPolicy: IfNotPresent + name: backend + ports: + - containerPort: 8090 + name: grpc + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: null nodeSelector: kubernetes.io/os: linux + priorityClassName: null + securityContext: + fsGroup: 1001 + runAsGroup: 1001 + runAsUser: 1001 + serviceAccountName: hubble-ui volumes: - - configMap: - defaultMode: 420 - name: hubble-ui-nginx - name: hubble-ui-nginx-conf - - emptyDir: {} - name: tmp-dir + - configMap: + defaultMode: 420 + name: hubble-ui-nginx + name: hubble-ui-nginx-conf + - emptyDir: {} + name: tmp-dir --- -# Source: cilium/templates/hubble/tls-cronjob/cronjob.yaml apiVersion: batch/v1 kind: CronJob metadata: - name: hubble-generate-certs - namespace: kube-system labels: - k8s-app: hubble-generate-certs app.kubernetes.io/name: hubble-generate-certs app.kubernetes.io/part-of: cilium - annotations: + k8s-app: hubble-generate-certs + name: hubble-generate-certs + namespace: kube-system spec: - schedule: "0 0 1 */4 *" concurrencyPolicy: Forbid jobTemplate: spec: @@ -448,76 +462,127 @@ spec: labels: k8s-app: hubble-generate-certs spec: + affinity: null + automountServiceAccountToken: true containers: - - name: certgen - image: {{ .InternalImages.Get "CiliumCertGen" }} - imagePullPolicy: IfNotPresent + - args: + - --ca-generate + - --ca-reuse-secret + - --ca-secret-namespace=kube-system + - --ca-secret-name=cilium-ca + - --ca-common-name=Cilium CA command: - - "/usr/bin/cilium-certgen" - # Because this is executed as a job, we pass the values as command - # line args instead of via config map. This allows users to inspect - # the values used in past runs by inspecting the completed pod. - args: - - "--cilium-namespace=kube-system" - - "--ca-generate" - - "--ca-reuse-secret" - - "--hubble-server-cert-generate" - - "--hubble-server-cert-common-name=*.default.hubble-grpc.cilium.io" - - "--hubble-server-cert-validity-duration=94608000s" - - "--hubble-relay-client-cert-generate" - - "--hubble-relay-client-cert-validity-duration=94608000s" - hostNetwork: true - serviceAccount: "hubble-generate-certs" - serviceAccountName: "hubble-generate-certs" - automountServiceAccountToken: true + - /usr/bin/cilium-certgen + env: + - name: CILIUM_CERTGEN_CONFIG + value: | + certs: + - name: hubble-server-certs + namespace: kube-system + commonName: "*.default.hubble-grpc.cilium.io" + hosts: + - "*.default.hubble-grpc.cilium.io" + usage: + - signing + - key encipherment + - server auth + - client auth + validity: 26280h + - name: hubble-relay-client-certs + namespace: kube-system + commonName: "*.hubble-relay.cilium.io" + hosts: + - "*.hubble-relay.cilium.io" + usage: + - signing + - key encipherment + - client auth + validity: 26280h + image: '{{ .InternalImages.Get "CiliumCertGen" }}' + imagePullPolicy: IfNotPresent + name: certgen + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + hostNetwork: false restartPolicy: OnFailure + securityContext: + seccompProfile: + type: RuntimeDefault + serviceAccount: hubble-generate-certs + serviceAccountName: hubble-generate-certs ttlSecondsAfterFinished: 1800 + schedule: 0 0 1 */4 * --- -# Source: cilium/templates/cilium-secrets-namespace.yaml -# Only create the namespace if it's different from Ingress secret namespace or Ingress is not enabled. - -# Only create the namespace if it's different from Ingress and Gateway API secret namespaces (if enabled). ---- -# Source: cilium/templates/hubble/tls-cronjob/job.yaml apiVersion: batch/v1 kind: Job metadata: - name: hubble-generate-certs - namespace: kube-system + annotations: + helm.sh/hook: post-install,post-upgrade labels: - k8s-app: hubble-generate-certs app.kubernetes.io/name: hubble-generate-certs app.kubernetes.io/part-of: cilium - annotations: - "helm.sh/hook": post-install,post-upgrade + k8s-app: hubble-generate-certs + name: hubble-generate-certs + namespace: kube-system spec: template: metadata: labels: k8s-app: hubble-generate-certs spec: + affinity: null + automountServiceAccountToken: true containers: - - name: certgen - image: {{ .InternalImages.Get "CiliumCertGen" }} - imagePullPolicy: IfNotPresent + - args: + - --ca-generate + - --ca-reuse-secret + - --ca-secret-namespace=kube-system + - --ca-secret-name=cilium-ca + - --ca-common-name=Cilium CA command: - - "/usr/bin/cilium-certgen" - # Because this is executed as a job, we pass the values as command - # line args instead of via config map. This allows users to inspect - # the values used in past runs by inspecting the completed pod. - args: - - "--cilium-namespace=kube-system" - - "--ca-generate" - - "--ca-reuse-secret" - - "--hubble-server-cert-generate" - - "--hubble-server-cert-common-name=*.default.hubble-grpc.cilium.io" - - "--hubble-server-cert-validity-duration=94608000s" - - "--hubble-relay-client-cert-generate" - - "--hubble-relay-client-cert-validity-duration=94608000s" - hostNetwork: true - serviceAccount: "hubble-generate-certs" - serviceAccountName: "hubble-generate-certs" - automountServiceAccountToken: true + - /usr/bin/cilium-certgen + env: + - name: CILIUM_CERTGEN_CONFIG + value: | + certs: + - name: hubble-server-certs + namespace: kube-system + commonName: "*.default.hubble-grpc.cilium.io" + hosts: + - "*.default.hubble-grpc.cilium.io" + usage: + - signing + - key encipherment + - server auth + - client auth + validity: 26280h + - name: hubble-relay-client-certs + namespace: kube-system + commonName: "*.hubble-relay.cilium.io" + hosts: + - "*.hubble-relay.cilium.io" + usage: + - signing + - key encipherment + - client auth + validity: 26280h + image: '{{ .InternalImages.Get "CiliumCertGen" }}' + imagePullPolicy: IfNotPresent + name: certgen + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + hostNetwork: false restartPolicy: OnFailure + securityContext: + seccompProfile: + type: RuntimeDefault + serviceAccount: hubble-generate-certs + serviceAccountName: hubble-generate-certs ttlSecondsAfterFinished: 1800 {{ end }} diff --git a/pkg/addons/ensure.go b/pkg/addons/ensure.go index 22a19ba75..d9cd1fa6c 100644 --- a/pkg/addons/ensure.go +++ b/pkg/addons/ensure.go @@ -98,6 +98,9 @@ func collectAddons(s *state.State) []addonAction { case s.Cluster.ClusterNetwork.CNI.Cilium != nil: addonsToDeploy = append(addonsToDeploy, addonAction{ name: resources.AddonCNICilium, + supportFn: func() error { + return migrateCiliumHubbleCertsJob(s) + }, }) case s.Cluster.ClusterNetwork.CNI.WeaveNet != nil: addonsToDeploy = append(addonsToDeploy, addonAction{ diff --git a/pkg/addons/helpers.go b/pkg/addons/helpers.go index 5112afa02..8e4a34f4e 100644 --- a/pkg/addons/helpers.go +++ b/pkg/addons/helpers.go @@ -28,6 +28,7 @@ import ( "k8c.io/kubeone/pkg/templates/resources" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" @@ -67,6 +68,24 @@ var ( } ) +func migrateCiliumHubbleCertsJob(s *state.State) error { + key := client.ObjectKey{ + Name: "hubble-generate-certs", + Namespace: metav1.NamespaceSystem, + } + + job := &batchv1.Job{} + if err := s.DynamicClient.Get(s.Context, key, job); err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + + return err + } + + return clientutil.DeleteIfExists(s.Context, s.DynamicClient, job) +} + func migrateAWSCSIDriver(s *state.State) error { if err := migrateAWSCSIController(s); err != nil { return err diff --git a/pkg/tasks/kubeconfig.go b/pkg/tasks/kubeconfig.go index a2e3f894e..c3aedc1ba 100644 --- a/pkg/tasks/kubeconfig.go +++ b/pkg/tasks/kubeconfig.go @@ -44,7 +44,7 @@ func saveKubeconfig(st *state.State) error { } func removeSuperKubeconfig(st *state.State) error { - st.Logger.Info("Removing %s...", superAdminConfPath) + st.Logger.Infof("Removing %s...", superAdminConfPath) host, err := st.Cluster.Leader() if err != nil { diff --git a/pkg/templates/images/images.go b/pkg/templates/images/images.go index 2bf9b1760..ed38b8d58 100644 --- a/pkg/templates/images/images.go +++ b/pkg/templates/images/images.go @@ -56,6 +56,7 @@ const ( // Cilium CNI Cilium CiliumOperator + CiliumEnvoy HubbleRelay HubbleUI HubbleUIBackend @@ -391,14 +392,15 @@ func optionalResources() map[Resource]map[string]string { CalicoVXLANNode: {"*": "quay.io/calico/node:v3.28.2"}, // Cilium - Cilium: {"*": "quay.io/cilium/cilium:v1.15.6@sha256:6aa840986a3a9722cd967ef63248d675a87add7e1704740902d5d3162f0c0def"}, - CiliumOperator: {"*": "quay.io/cilium/operator-generic:v1.15.6@sha256:5789f0935eef96ad571e4f5565a8800d3a8fbb05265cf6909300cd82fd513c3d"}, + Cilium: {"*": "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"}, + CiliumOperator: {"*": "quay.io/cilium/operator-generic:v1.16.3@sha256:6e2925ef47a1c76e183c48f95d4ce0d34a1e5e848252f910476c3e11ce1ec94b"}, + CiliumEnvoy: {"*": "quay.io/cilium/cilium-envoy:v1.29.9-1728346947-0d05e48bfbb8c4737ec40d5781d970a550ed2bbd@sha256:42614a44e508f70d03a04470df5f61e3cffd22462471a0be0544cf116f2c50ba"}, // Hubble - HubbleRelay: {"*": "quay.io/cilium/hubble-relay:v1.15.6@sha256:a0863dd70d081b273b87b9b7ce7e2d3f99171c2f5e202cd57bc6691e51283e0c"}, - HubbleUI: {"*": "quay.io/cilium/hubble-ui:v0.13.0@sha256:7d663dc16538dd6e29061abd1047013a645e6e69c115e008bee9ea9fef9a6666"}, - HubbleUIBackend: {"*": "quay.io/cilium/hubble-ui-backend:v0.13.0@sha256:1e7657d997c5a48253bb8dc91ecee75b63018d16ff5e5797e5af367336bc8803"}, - CiliumCertGen: {"*": "quay.io/cilium/certgen:v0.1.12@sha256:bbc5e65e9dc65bc6b58967fe536b7f3b54e12332908aeb0a96a36866b4372b4e"}, + HubbleRelay: {"*": "quay.io/cilium/hubble-relay:v1.16.3@sha256:feb60efd767e0e7863a94689f4a8db56a0acc7c1d2b307dee66422e3dc25a089"}, + HubbleUI: {"*": "quay.io/cilium/hubble-ui:v0.13.1@sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6"}, + HubbleUIBackend: {"*": "quay.io/cilium/hubble-ui-backend:v0.13.1@sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b"}, + CiliumCertGen: {"*": "quay.io/cilium/certgen:v0.2.0@sha256:169d93fd8f2f9009db3b9d5ccd37c2b753d0989e1e7cd8fe79f9160c459eef4f"}, // Cluster-autoscaler addon ClusterAutoscaler: { diff --git a/pkg/templates/images/resource_string.go b/pkg/templates/images/resource_string.go index 7034c5eac..2ddd2b716 100644 --- a/pkg/templates/images/resource_string.go +++ b/pkg/templates/images/resource_string.go @@ -14,108 +14,109 @@ func _() { _ = x[Flannel-4] _ = x[Cilium-5] _ = x[CiliumOperator-6] - _ = x[HubbleRelay-7] - _ = x[HubbleUI-8] - _ = x[HubbleUIBackend-9] - _ = x[CiliumCertGen-10] - _ = x[WeaveNetCNIKube-11] - _ = x[WeaveNetCNINPC-12] - _ = x[DNSNodeCache-13] - _ = x[MachineController-14] - _ = x[MetricsServer-15] - _ = x[OperatingSystemManager-16] - _ = x[ClusterAutoscaler-17] - _ = x[AwsCCM-18] - _ = x[AzureCCM-19] - _ = x[AzureCNM-20] - _ = x[CSISnapshotController-21] - _ = x[CSISnapshotWebhook-22] - _ = x[AwsEbsCSI-23] - _ = x[AwsEbsCSIAttacher-24] - _ = x[AwsEbsCSILivenessProbe-25] - _ = x[AwsEbsCSINodeDriverRegistrar-26] - _ = x[AwsEbsCSIProvisioner-27] - _ = x[AwsEbsCSIResizer-28] - _ = x[AwsEbsCSISnapshotter-29] - _ = x[AzureFileCSI-30] - _ = x[AzureFileCSIAttacher-31] - _ = x[AzureFileCSILivenessProbe-32] - _ = x[AzureFileCSINodeDriverRegistar-33] - _ = x[AzureFileCSIProvisioner-34] - _ = x[AzureFileCSIResizer-35] - _ = x[AzureFileCSISnapshotter-36] - _ = x[AzureDiskCSI-37] - _ = x[AzureDiskCSIAttacher-38] - _ = x[AzureDiskCSILivenessProbe-39] - _ = x[AzureDiskCSINodeDriverRegistar-40] - _ = x[AzureDiskCSIProvisioner-41] - _ = x[AzureDiskCSIResizer-42] - _ = x[AzureDiskCSISnapshotter-43] - _ = x[NutanixCSILivenessProbe-44] - _ = x[NutanixCSI-45] - _ = x[NutanixCSIProvisioner-46] - _ = x[NutanixCSIRegistrar-47] - _ = x[NutanixCSIResizer-48] - _ = x[NutanixCSISnapshotter-49] - _ = x[DigitalOceanCSI-50] - _ = x[DigitalOceanCSIAlpine-51] - _ = x[DigitalOceanCSIAttacher-52] - _ = x[DigitalOceanCSINodeDriverRegistar-53] - _ = x[DigitalOceanCSIProvisioner-54] - _ = x[DigitalOceanCSIResizer-55] - _ = x[DigitalOceanCSISnapshotter-56] - _ = x[OpenstackCSI-57] - _ = x[OpenstackCSINodeDriverRegistar-58] - _ = x[OpenstackCSILivenessProbe-59] - _ = x[OpenstackCSIAttacher-60] - _ = x[OpenstackCSIProvisioner-61] - _ = x[OpenstackCSIResizer-62] - _ = x[OpenstackCSISnapshotter-63] - _ = x[HetznerCSI-64] - _ = x[HetznerCSIAttacher-65] - _ = x[HetznerCSIResizer-66] - _ = x[HetznerCSIProvisioner-67] - _ = x[HetznerCSILivenessProbe-68] - _ = x[HetznerCSINodeDriverRegistar-69] - _ = x[DigitaloceanCCM-70] - _ = x[EquinixMetalCCM-71] - _ = x[HetznerCCM-72] - _ = x[GCPCCM-73] - _ = x[NutanixCCM-74] - _ = x[OpenstackCCM-75] - _ = x[VsphereCCM-76] - _ = x[CSIVaultSecretProvider-77] - _ = x[SecretStoreCSIDriverNodeRegistrar-78] - _ = x[SecretStoreCSIDriver-79] - _ = x[SecretStoreCSIDriverLivenessProbe-80] - _ = x[SecretStoreCSIDriverCRDs-81] - _ = x[VMwareCloudDirectorCSI-82] - _ = x[VMwareCloudDirectorCSIAttacher-83] - _ = x[VMwareCloudDirectorCSIProvisioner-84] - _ = x[VMwareCloudDirectorCSIResizer-85] - _ = x[VMwareCloudDirectorCSINodeDriverRegistrar-86] - _ = x[VsphereCSIDriver-87] - _ = x[VsphereCSISyncer-88] - _ = x[VsphereCSIAttacher-89] - _ = x[VsphereCSILivenessProbe-90] - _ = x[VsphereCSINodeDriverRegistar-91] - _ = x[VsphereCSIProvisioner-92] - _ = x[VsphereCSIResizer-93] - _ = x[VsphereCSISnapshotter-94] - _ = x[GCPComputeCSIDriver-95] - _ = x[GCPComputeCSIProvisioner-96] - _ = x[GCPComputeCSIAttacher-97] - _ = x[GCPComputeCSIResizer-98] - _ = x[GCPComputeCSISnapshotter-99] - _ = x[GCPComputeCSINodeDriverRegistrar-100] - _ = x[CalicoVXLANCNI-101] - _ = x[CalicoVXLANController-102] - _ = x[CalicoVXLANNode-103] + _ = x[CiliumEnvoy-7] + _ = x[HubbleRelay-8] + _ = x[HubbleUI-9] + _ = x[HubbleUIBackend-10] + _ = x[CiliumCertGen-11] + _ = x[WeaveNetCNIKube-12] + _ = x[WeaveNetCNINPC-13] + _ = x[DNSNodeCache-14] + _ = x[MachineController-15] + _ = x[MetricsServer-16] + _ = x[OperatingSystemManager-17] + _ = x[ClusterAutoscaler-18] + _ = x[AwsCCM-19] + _ = x[AzureCCM-20] + _ = x[AzureCNM-21] + _ = x[CSISnapshotController-22] + _ = x[CSISnapshotWebhook-23] + _ = x[AwsEbsCSI-24] + _ = x[AwsEbsCSIAttacher-25] + _ = x[AwsEbsCSILivenessProbe-26] + _ = x[AwsEbsCSINodeDriverRegistrar-27] + _ = x[AwsEbsCSIProvisioner-28] + _ = x[AwsEbsCSIResizer-29] + _ = x[AwsEbsCSISnapshotter-30] + _ = x[AzureFileCSI-31] + _ = x[AzureFileCSIAttacher-32] + _ = x[AzureFileCSILivenessProbe-33] + _ = x[AzureFileCSINodeDriverRegistar-34] + _ = x[AzureFileCSIProvisioner-35] + _ = x[AzureFileCSIResizer-36] + _ = x[AzureFileCSISnapshotter-37] + _ = x[AzureDiskCSI-38] + _ = x[AzureDiskCSIAttacher-39] + _ = x[AzureDiskCSILivenessProbe-40] + _ = x[AzureDiskCSINodeDriverRegistar-41] + _ = x[AzureDiskCSIProvisioner-42] + _ = x[AzureDiskCSIResizer-43] + _ = x[AzureDiskCSISnapshotter-44] + _ = x[NutanixCSILivenessProbe-45] + _ = x[NutanixCSI-46] + _ = x[NutanixCSIProvisioner-47] + _ = x[NutanixCSIRegistrar-48] + _ = x[NutanixCSIResizer-49] + _ = x[NutanixCSISnapshotter-50] + _ = x[DigitalOceanCSI-51] + _ = x[DigitalOceanCSIAlpine-52] + _ = x[DigitalOceanCSIAttacher-53] + _ = x[DigitalOceanCSINodeDriverRegistar-54] + _ = x[DigitalOceanCSIProvisioner-55] + _ = x[DigitalOceanCSIResizer-56] + _ = x[DigitalOceanCSISnapshotter-57] + _ = x[OpenstackCSI-58] + _ = x[OpenstackCSINodeDriverRegistar-59] + _ = x[OpenstackCSILivenessProbe-60] + _ = x[OpenstackCSIAttacher-61] + _ = x[OpenstackCSIProvisioner-62] + _ = x[OpenstackCSIResizer-63] + _ = x[OpenstackCSISnapshotter-64] + _ = x[HetznerCSI-65] + _ = x[HetznerCSIAttacher-66] + _ = x[HetznerCSIResizer-67] + _ = x[HetznerCSIProvisioner-68] + _ = x[HetznerCSILivenessProbe-69] + _ = x[HetznerCSINodeDriverRegistar-70] + _ = x[DigitaloceanCCM-71] + _ = x[EquinixMetalCCM-72] + _ = x[HetznerCCM-73] + _ = x[GCPCCM-74] + _ = x[NutanixCCM-75] + _ = x[OpenstackCCM-76] + _ = x[VsphereCCM-77] + _ = x[CSIVaultSecretProvider-78] + _ = x[SecretStoreCSIDriverNodeRegistrar-79] + _ = x[SecretStoreCSIDriver-80] + _ = x[SecretStoreCSIDriverLivenessProbe-81] + _ = x[SecretStoreCSIDriverCRDs-82] + _ = x[VMwareCloudDirectorCSI-83] + _ = x[VMwareCloudDirectorCSIAttacher-84] + _ = x[VMwareCloudDirectorCSIProvisioner-85] + _ = x[VMwareCloudDirectorCSIResizer-86] + _ = x[VMwareCloudDirectorCSINodeDriverRegistrar-87] + _ = x[VsphereCSIDriver-88] + _ = x[VsphereCSISyncer-89] + _ = x[VsphereCSIAttacher-90] + _ = x[VsphereCSILivenessProbe-91] + _ = x[VsphereCSINodeDriverRegistar-92] + _ = x[VsphereCSIProvisioner-93] + _ = x[VsphereCSIResizer-94] + _ = x[VsphereCSISnapshotter-95] + _ = x[GCPComputeCSIDriver-96] + _ = x[GCPComputeCSIProvisioner-97] + _ = x[GCPComputeCSIAttacher-98] + _ = x[GCPComputeCSIResizer-99] + _ = x[GCPComputeCSISnapshotter-100] + _ = x[GCPComputeCSINodeDriverRegistrar-101] + _ = x[CalicoVXLANCNI-102] + _ = x[CalicoVXLANController-103] + _ = x[CalicoVXLANNode-104] } -const _Resource_name = "CalicoCNICalicoControllerCalicoNodeFlannelCiliumCiliumOperatorHubbleRelayHubbleUIHubbleUIBackendCiliumCertGenWeaveNetCNIKubeWeaveNetCNINPCDNSNodeCacheMachineControllerMetricsServerOperatingSystemManagerClusterAutoscalerAwsCCMAzureCCMAzureCNMCSISnapshotControllerCSISnapshotWebhookAwsEbsCSIAwsEbsCSIAttacherAwsEbsCSILivenessProbeAwsEbsCSINodeDriverRegistrarAwsEbsCSIProvisionerAwsEbsCSIResizerAwsEbsCSISnapshotterAzureFileCSIAzureFileCSIAttacherAzureFileCSILivenessProbeAzureFileCSINodeDriverRegistarAzureFileCSIProvisionerAzureFileCSIResizerAzureFileCSISnapshotterAzureDiskCSIAzureDiskCSIAttacherAzureDiskCSILivenessProbeAzureDiskCSINodeDriverRegistarAzureDiskCSIProvisionerAzureDiskCSIResizerAzureDiskCSISnapshotterNutanixCSILivenessProbeNutanixCSINutanixCSIProvisionerNutanixCSIRegistrarNutanixCSIResizerNutanixCSISnapshotterDigitalOceanCSIDigitalOceanCSIAlpineDigitalOceanCSIAttacherDigitalOceanCSINodeDriverRegistarDigitalOceanCSIProvisionerDigitalOceanCSIResizerDigitalOceanCSISnapshotterOpenstackCSIOpenstackCSINodeDriverRegistarOpenstackCSILivenessProbeOpenstackCSIAttacherOpenstackCSIProvisionerOpenstackCSIResizerOpenstackCSISnapshotterHetznerCSIHetznerCSIAttacherHetznerCSIResizerHetznerCSIProvisionerHetznerCSILivenessProbeHetznerCSINodeDriverRegistarDigitaloceanCCMEquinixMetalCCMHetznerCCMGCPCCMNutanixCCMOpenstackCCMVsphereCCMCSIVaultSecretProviderSecretStoreCSIDriverNodeRegistrarSecretStoreCSIDriverSecretStoreCSIDriverLivenessProbeSecretStoreCSIDriverCRDsVMwareCloudDirectorCSIVMwareCloudDirectorCSIAttacherVMwareCloudDirectorCSIProvisionerVMwareCloudDirectorCSIResizerVMwareCloudDirectorCSINodeDriverRegistrarVsphereCSIDriverVsphereCSISyncerVsphereCSIAttacherVsphereCSILivenessProbeVsphereCSINodeDriverRegistarVsphereCSIProvisionerVsphereCSIResizerVsphereCSISnapshotterGCPComputeCSIDriverGCPComputeCSIProvisionerGCPComputeCSIAttacherGCPComputeCSIResizerGCPComputeCSISnapshotterGCPComputeCSINodeDriverRegistrarCalicoVXLANCNICalicoVXLANControllerCalicoVXLANNode" +const _Resource_name = "CalicoCNICalicoControllerCalicoNodeFlannelCiliumCiliumOperatorCiliumEnvoyHubbleRelayHubbleUIHubbleUIBackendCiliumCertGenWeaveNetCNIKubeWeaveNetCNINPCDNSNodeCacheMachineControllerMetricsServerOperatingSystemManagerClusterAutoscalerAwsCCMAzureCCMAzureCNMCSISnapshotControllerCSISnapshotWebhookAwsEbsCSIAwsEbsCSIAttacherAwsEbsCSILivenessProbeAwsEbsCSINodeDriverRegistrarAwsEbsCSIProvisionerAwsEbsCSIResizerAwsEbsCSISnapshotterAzureFileCSIAzureFileCSIAttacherAzureFileCSILivenessProbeAzureFileCSINodeDriverRegistarAzureFileCSIProvisionerAzureFileCSIResizerAzureFileCSISnapshotterAzureDiskCSIAzureDiskCSIAttacherAzureDiskCSILivenessProbeAzureDiskCSINodeDriverRegistarAzureDiskCSIProvisionerAzureDiskCSIResizerAzureDiskCSISnapshotterNutanixCSILivenessProbeNutanixCSINutanixCSIProvisionerNutanixCSIRegistrarNutanixCSIResizerNutanixCSISnapshotterDigitalOceanCSIDigitalOceanCSIAlpineDigitalOceanCSIAttacherDigitalOceanCSINodeDriverRegistarDigitalOceanCSIProvisionerDigitalOceanCSIResizerDigitalOceanCSISnapshotterOpenstackCSIOpenstackCSINodeDriverRegistarOpenstackCSILivenessProbeOpenstackCSIAttacherOpenstackCSIProvisionerOpenstackCSIResizerOpenstackCSISnapshotterHetznerCSIHetznerCSIAttacherHetznerCSIResizerHetznerCSIProvisionerHetznerCSILivenessProbeHetznerCSINodeDriverRegistarDigitaloceanCCMEquinixMetalCCMHetznerCCMGCPCCMNutanixCCMOpenstackCCMVsphereCCMCSIVaultSecretProviderSecretStoreCSIDriverNodeRegistrarSecretStoreCSIDriverSecretStoreCSIDriverLivenessProbeSecretStoreCSIDriverCRDsVMwareCloudDirectorCSIVMwareCloudDirectorCSIAttacherVMwareCloudDirectorCSIProvisionerVMwareCloudDirectorCSIResizerVMwareCloudDirectorCSINodeDriverRegistrarVsphereCSIDriverVsphereCSISyncerVsphereCSIAttacherVsphereCSILivenessProbeVsphereCSINodeDriverRegistarVsphereCSIProvisionerVsphereCSIResizerVsphereCSISnapshotterGCPComputeCSIDriverGCPComputeCSIProvisionerGCPComputeCSIAttacherGCPComputeCSIResizerGCPComputeCSISnapshotterGCPComputeCSINodeDriverRegistrarCalicoVXLANCNICalicoVXLANControllerCalicoVXLANNode" -var _Resource_index = [...]uint16{0, 9, 25, 35, 42, 48, 62, 73, 81, 96, 109, 124, 138, 150, 167, 180, 202, 219, 225, 233, 241, 262, 280, 289, 306, 328, 356, 376, 392, 412, 424, 444, 469, 499, 522, 541, 564, 576, 596, 621, 651, 674, 693, 716, 739, 749, 770, 789, 806, 827, 842, 863, 886, 919, 945, 967, 993, 1005, 1035, 1060, 1080, 1103, 1122, 1145, 1155, 1173, 1190, 1211, 1234, 1262, 1277, 1292, 1302, 1308, 1318, 1330, 1340, 1362, 1395, 1415, 1448, 1472, 1494, 1524, 1557, 1586, 1627, 1643, 1659, 1677, 1700, 1728, 1749, 1766, 1787, 1806, 1830, 1851, 1871, 1895, 1927, 1941, 1962, 1977} +var _Resource_index = [...]uint16{0, 9, 25, 35, 42, 48, 62, 73, 84, 92, 107, 120, 135, 149, 161, 178, 191, 213, 230, 236, 244, 252, 273, 291, 300, 317, 339, 367, 387, 403, 423, 435, 455, 480, 510, 533, 552, 575, 587, 607, 632, 662, 685, 704, 727, 750, 760, 781, 800, 817, 838, 853, 874, 897, 930, 956, 978, 1004, 1016, 1046, 1071, 1091, 1114, 1133, 1156, 1166, 1184, 1201, 1222, 1245, 1273, 1288, 1303, 1313, 1319, 1329, 1341, 1351, 1373, 1406, 1426, 1459, 1483, 1505, 1535, 1568, 1597, 1638, 1654, 1670, 1688, 1711, 1739, 1760, 1777, 1798, 1817, 1841, 1862, 1882, 1906, 1938, 1952, 1973, 1988} func (i Resource) String() string { i -= 1 diff --git a/pkg/templates/kubeadm/v1beta4/kubeadm.go b/pkg/templates/kubeadm/v1beta4/kubeadm.go index d9a9302d0..c359f61cc 100644 --- a/pkg/templates/kubeadm/v1beta4/kubeadm.go +++ b/pkg/templates/kubeadm/v1beta4/kubeadm.go @@ -193,7 +193,7 @@ func NewConfig(s *state.State, host kubeoneapi.HostConfig) (*Config, error) { } if cluster.ClusterNetwork.KubeProxy != nil && cluster.ClusterNetwork.KubeProxy.SkipInstallation { - clusterConfig.DNS.Disabled = true + clusterConfig.Proxy.Disabled = true } if err = addFeaturesExtraMounts(s, clusterConfig); err != nil {