Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update Cilium to v1.16.3 #3415

Merged
merged 7 commits into from
Oct 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 26 additions & 15 deletions addons/cni-cilium/Kustomization
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ namespace: kube-system
helmCharts:
- name: cilium
repo: https://helm.cilium.io/
version: 1.15.6
version: 1.16.3
releaseName: cilium
namespace: kube-system
valuesFile: helm-values
Expand All @@ -18,20 +18,6 @@ patches:
name: cilium-config
namespace: kube-system
$patch: delete
- patch: |-
apiVersion: batch/v1
kind: Job
metadata:
name: hubble-generate-certs
namespace: kube-system
$patch: delete
- patch: |-
apiVersion: batch/v1
kind: CronJob
metadata:
name: hubble-generate-certs
namespace: kube-system
$patch: delete

# A hack, to compensate for the lack of ability to set .Capabilities when using helmCharts API.
- target:
Expand Down Expand Up @@ -133,3 +119,28 @@ patches:
name: cilium-config
key: KUBERNETES_SERVICE_PORT
optional: true
- patch: |-
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium-envoy
namespace: kube-system
spec:
template:
spec:
containers:
- name: cilium-envoy
image: '{{ .InternalImages.Get "CiliumEnvoy" }}'
env:
- name: KUBERNETES_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: cilium-config
key: KUBERNETES_SERVICE_HOST
optional: true
- name: KUBERNETES_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: cilium-config
key: KUBERNETES_SERVICE_PORT
optional: true
302 changes: 117 additions & 185 deletions addons/cni-cilium/cilium-config-map.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,220 +6,152 @@ metadata:
name: cilium-config
namespace: kube-system
data:
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd" or "kvstore".
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# These can be queried with:
# kubectl get ciliumid
# - "kvstore" stores identities in an etcd kvstore, that is
# configured below. Cilium versions before 1.6 supported only the kvstore
# backend. Upgrades from these older cilium versions should continue using
# the kvstore by commenting out the identity-allocation-mode below, or
# setting it to "kvstore".
identity-allocation-mode: crd
identity-heartbeat-timeout: "30m0s"
identity-gc-interval: "15m0s"
cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s"
skip-cnp-status-startup-clean: "false"

# If you want to run cilium in debug mode change this value to true
debug: "false"
debug-verbose: ""
# The agent can be put into the following three policy enforcement modes
# default, always and never.
# https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes
enable-policy: "default"
# Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this
# field is not set.
proxy-prometheus-port: "9964"

# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
enable-ipv4: "true"

# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# address.
{{ if .Config.ClusterNetwork.HasIPv6 }}
enable-ipv6: "true"
{{ else }}
enable-ipv6: "false"
{{ end }}
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
enable-bpf-clock-probe: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: medium

# The monitor aggregation interval governs the typical time between monitor
# notification events for each allowed connection.
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-interval: "5s"

# The monitor aggregation flags determine which TCP flags which, upon the
# first observation, cause monitor notifications to be generated.
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-flags: all
# Specifies the ratio (0.0-1.0] of total system memory to use for dynamic
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
arping-refresh-period: "30s"
auto-direct-node-routes: "false"
bpf-events-drop-enabled: "true"
bpf-events-policy-verdict-enabled: "true"
bpf-events-trace-enabled: "true"
bpf-lb-acceleration: disabled
bpf-lb-external-clusterip: "false"
bpf-lb-map-max: "65536"
bpf-lb-sock-terminate-pod-connections: "false"
bpf-lb-sock: "false"
bpf-map-dynamic-size-ratio: "0.0025"
# bpf-policy-map-max specifies the maximum number of entries in endpoint
# policy map (per endpoint)
bpf-policy-map-max: "16384"
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
# backend and affinity maps.
bpf-lb-map-max: "65536"
bpf-lb-external-clusterip: "false"
bpf-lb-acceleration: disabled

# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# As a result, reply packets may be dropped and the load-balancing decisions
# for established connections may change.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "false"

# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"

# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
bpf-root: "/sys/fs/bpf"
cgroup-root: "/run/cilium/cgroupv2"
cilium-endpoint-gc-interval: "5m0s"
cluster-id: "0"

# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
# Default case
routing-mode: "tunnel"
tunnel-protocol: "vxlan"
service-no-backend-response: reject

# Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "true"

enable-ipv4-masquerade: "true"
enable-ipv4-big-tcp: "false"
enable-ipv6-big-tcp: "false"
enable-ipv6-masquerade: "true"

enable-xt-socket-fallback: "true"
install-no-conntrack-iptables-rules: "false"

auto-direct-node-routes: "false"
enable-local-redirect-policy: "false"
enable-masquerade-to-route-source: "false"
enable-metrics: "true"

{{ if eq .Config.ClusterNetwork.CNI.Cilium.KubeProxyReplacement "strict" }}
kube-proxy-replacement: "strict"
kube-proxy-replacement-healthz-bind-address: ""
{{ else }}
kube-proxy-replacement: "disabled"
cluster-name: default
{{ if .Config.ClusterNetwork.HasIPv4 }}
cluster-pool-ipv4-cidr: "{{ .Config.ClusterNetwork.PodSubnet }}"
cluster-pool-ipv4-mask-size: "{{ .Config.ClusterNetwork.NodeCIDRMaskSizeIPv4 }}"
{{ end }}
max-connected-clusters: "255"
bpf-lb-sock: "false"
enable-health-check-nodeport: "true"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
enable-svc-source-range-check: "true"
enable-l2-neigh-discovery: "true"
arping-refresh-period: "30s"
enable-k8s-networkpolicy: "true"
# Tell the agent to generate and write a CNI configuration file
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
{{ if .Config.ClusterNetwork.HasIPv6 }}
cluster-pool-ipv6-cidr: "{{ .Config.ClusterNetwork.PodSubnetIPv6 }}"
cluster-pool-ipv6-mask-size: "{{ .Config.ClusterNetwork.NodeCIDRMaskSizeIPv6 }}"
{{ end }}
clustermesh-enable-endpoint-sync: "false"
clustermesh-enable-mcs-api: "false"
cni-exclusive: "false"
cni-log-file: "/var/run/cilium/cilium-cni.log"
custom-cni-conf: "false"
datapath-mode: veth
debug-verbose: ""
debug: "false"
direct-routing-skip-unreachable: "false"
dnsproxy-enable-transparent-mode: "true"
dnsproxy-socket-linger-timeout: "10"
egress-gateway-reconciliation-trigger-interval: "1s"
enable-auto-protect-node-port-range: "true"
enable-bpf-clock-probe: "false"
enable-endpoint-health-checking: "true"
enable-health-check-loadbalancer-ip: "false"
enable-health-check-nodeport: "true"
enable-health-checking: "true"
enable-remote-node-identity: "true"
enable-well-known-identities: "false"
synchronize-k8s-nodes: "true"
operator-api-serve-addr: "127.0.0.1:9234"
operator-prometheus-serve-addr: :9963
policy-cidr-match-mode: ""
# Enable Hubble gRPC service.
{{ if .Config.ClusterNetwork.CNI.Cilium.EnableHubble }}
enable-hubble: "true"
{{ else }}
enable-hubble: "false"
{{ end }}
# UNIX domain socket for Hubble server to listen to.
hubble-socket-path: "/var/run/cilium/hubble.sock"
# An additional address for Hubble server to listen to (e.g. ":4244").
hubble-listen-address: ":4244"
enable-ipv4-big-tcp: "false"
enable-ipv4-masquerade: "true"
enable-ipv4: "true"
enable-ipv6-big-tcp: "false"
enable-ipv6-masquerade: "true"
{{ if .Config.ClusterNetwork.HasIPv6 }}
enable-ipv6: "true"
{{ else }}
enable-ipv6: "false"
{{ end }}
enable-k8s-networkpolicy: "true"
enable-k8s-terminating-endpoint: "true"
enable-l2-neigh-discovery: "true"
enable-l7-proxy: "true"
enable-local-redirect-policy: "false"
enable-masquerade-to-route-source: "false"
enable-metrics: "true"
enable-node-selector-labels: "false"
enable-policy: "default"
enable-runtime-device-detection: "true"
enable-sctp: "false"
enable-svc-source-range-check: "true"
enable-tcx: "true"
enable-vtep: "false"
enable-well-known-identities: "false"
enable-xt-socket-fallback: "true"
envoy-base-id: "0"
envoy-keep-cap-netbindservice: "false"
external-envoy-proxy: "true"
hubble-disable-tls: "false"
hubble-export-file-max-backups: "5"
hubble-export-file-max-size-mb: "10"
hubble-listen-address: ":4244"
hubble-socket-path: "/var/run/cilium/hubble.sock"
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
ipam: "cluster-pool"
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
identity-allocation-mode: crd
identity-gc-interval: "15m0s"
identity-heartbeat-timeout: "30m0s"
install-no-conntrack-iptables-rules: "false"
ipam-cilium-node-update-rate: "15s"
{{ if .Config.ClusterNetwork.HasIPv4 }}
cluster-pool-ipv4-cidr: "{{ .Config.ClusterNetwork.PodSubnet }}"
cluster-pool-ipv4-mask-size: "{{ .Config.ClusterNetwork.NodeCIDRMaskSizeIPv4 }}"
{{ end }}
{{ if .Config.ClusterNetwork.HasIPv6 }}
cluster-pool-ipv6-cidr: "{{ .Config.ClusterNetwork.PodSubnetIPv6 }}"
cluster-pool-ipv6-mask-size: "{{ .Config.ClusterNetwork.NodeCIDRMaskSizeIPv6 }}"
ipam: "cluster-pool"
k8s-client-burst: "20"
k8s-client-qps: "10"
k8s-require-ipv4-pod-cidr: "false"
k8s-require-ipv6-pod-cidr: "false"
{{ if eq .Config.ClusterNetwork.CNI.Cilium.KubeProxyReplacement "strict" }}
kube-proxy-replacement-healthz-bind-address: ""
kube-proxy-replacement: "true"
{{ else }}
kube-proxy-replacement: "false"
{{ end }}
dnsproxy-enable-transparent-mode: "true"
egress-gateway-reconciliation-trigger-interval: "1s"
enable-vtep: "false"
vtep-endpoint: ""
vtep-cidr: ""
vtep-mask: ""
vtep-mac: ""
enable-bgp-control-plane: "false"
max-connected-clusters: "255"
mesh-auth-enabled: "true"
mesh-auth-gc-interval: "5m0s"
mesh-auth-queue-size: "1024"
mesh-auth-rotated-identities-queue-size: "1024"
monitor-aggregation-flags: all
monitor-aggregation-interval: "5s"
monitor-aggregation: medium
nat-map-stats-entries: "32"
nat-map-stats-interval: 30s
node-port-bind-protection: "true"
nodeport-addresses: ""
nodes-gc-interval: "5m0s"
operator-api-serve-addr: "127.0.0.1:9234"
operator-prometheus-serve-addr: :9963
policy-cidr-match-mode: ""
preallocate-bpf-maps: "false"
procfs: "/host/proc"
bpf-root: "/sys/fs/bpf"
cgroup-root: "/run/cilium/cgroupv2"
enable-k8s-terminating-endpoint: "true"
enable-sctp: "false"
k8s-client-qps: "5"
k8s-client-burst: "10"
proxy-connect-timeout: "2"
proxy-idle-timeout-seconds: "60"
proxy-max-connection-duration-seconds: "0"
proxy-max-requests-per-connection: "0"
proxy-xff-num-trusted-hops-egress: "0"
proxy-xff-num-trusted-hops-ingress: "0"
remove-cilium-node-taints: "true"
set-cilium-node-taints: "true"
routing-mode: "tunnel"
service-no-backend-response: reject
set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15"
set-cilium-node-taints: "true"
synchronize-k8s-nodes: "true"
tofqdns-dns-reject-response-code: "refused"
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "50"
tofqdns-idle-connection-grace-period: "0s"
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-proxy-response-max-delay: "100ms"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
mesh-auth-enabled: "true"
mesh-auth-queue-size: "1024"
mesh-auth-rotated-identities-queue-size: "1024"
mesh-auth-gc-interval: "5m0s"
proxy-connect-timeout: "2"
proxy-max-requests-per-connection: "0"
proxy-max-connection-duration-seconds: "0"
external-envoy-proxy: "false"

{{ if eq .Config.ClusterNetwork.CNI.Cilium.KubeProxyReplacement "strict" }}
tunnel-protocol: "vxlan"
unmanaged-pod-watcher-interval: "15"
vtep-cidr: ""
vtep-endpoint: ""
vtep-mac: ""
vtep-mask: ""
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
{{ if eq .Config.ClusterNetwork.CNI.Cilium.KubeProxyReplacement "strict" }}
KUBERNETES_SERVICE_HOST: "{{ .Config.APIEndpoint.Host }}"
KUBERNETES_SERVICE_PORT: "{{ .Config.APIEndpoint.Port }}"
{{ end }}
{{ end }}
Loading